diff --git a/EyeTrackApp/eye_processor.py b/EyeTrackApp/eye_processor.py index f142f62..0471284 100644 --- a/EyeTrackApp/eye_processor.py +++ b/EyeTrackApp/eye_processor.py @@ -239,7 +239,7 @@ class EyeProcessor: def HSRACM(self): - cx, cy, thresh = External_Run.HSRACE(self) + cx, cy, thresh = External_Run.HSRACS(self) if self.prev_x == None: self.prev_x = cx self.prev_y = cy @@ -257,7 +257,7 @@ class EyeProcessor: # print("EYE MOVED TOO FAST") # self.output_images_and_update(thresh, EyeInformation(InformationOrigin.HSRAC, 0, 0, 0, False)) def HSFM(self): - cx, cy, frame = HSF(self) + cx, cy, frame = External_Run_HSF.HSFS(self) out_x, out_y = cal_osc(self, cx, cy) if cx == 0: self.output_images_and_update(frame, EyeInformation(InformationOrigin.HSF, out_x, out_y, 0, True)) #update app diff --git a/EyeTrackApp/haar_surround_feature.py b/EyeTrackApp/haar_surround_feature.py index 6730b1b..2a5920a 100644 --- a/EyeTrackApp/haar_surround_feature.py +++ b/EyeTrackApp/haar_surround_feature.py @@ -1,35 +1,5 @@ -''' ------------------------------------------------------------------------------------------------------- - - ,@@@@@@ - @@@@@@@@@@@ @@@ - @@@@@@@@@@@@ @@@@@@@@@@@ - @@@@@@@@@@@@@ @@@@@@@@@@@@@@ - @@@@@@@/ ,@@@@@@@@@@@@@ - /@@@@@@@@@@@@@@@ @@@@@@@@ - @@@@@@@@@@@@@@@@@@@@@@@@ @@@@@ - @@@@@@@@ @@@@@ - ,@@@ @@@@& - @@@@@@. @@@@ - @@@ @@@@@@@@@/ @@@@@ - ,@@@. @@@@@@((@ @@@@( - //@@@ ,, @@@@ @@@@@ - @@@( @@@@@@@ - @@@ @ @@@@@@@@# - @@@@@@@@@@@@@@@@@ - @@@@@@@@@@@@@( - -HSR By: Sean.Denka (Optimization Wizard, Contributor), Summer#2406 (Main Algorithm Engineer) -Algorithm App Implimentations By: Prohurtz#0001, qdot (Inital App Creator) - -Copyright (c) 2022 EyeTrackVR <3 ------------------------------------------------------------------------------------------------------- -''' - - import functools import math -import os import sys import timeit from functools import lru_cache @@ -39,6 +9,8 @@ import numpy as np # from line_profiler_pycharm import profile +video_path = "ezgif.com-gif-maker.avi" +imshow_enable = True calc_print_enable = True save_video = False skip_autoradius = False @@ -50,6 +22,7 @@ lru_maxsize_vs = 64 # CV param default_radius = 20 auto_radius_range = (default_radius - 10, default_radius + 10) # (10,30) +auto_radius_step = 1 blink_init_frames = 60 * 3 # 60fps*3sec,Number of blink statistical frames # step==(x,y) default_step = (5, 5) # bigger the steps,lower the processing time! ofc acc also takes an impact @@ -86,7 +59,7 @@ def TimeitWrapper(*args, **kwargs): class TimeitResult(object): """ from https://github.com/ipython/ipython/blob/339c0d510a1f3cb2158dd8c6e7f4ac89aa4c89d8/IPython/core/magics/execution.py#L55 - + Object returned by the timeit magic with info about the run. Contains the following attributes : loops: (int) number of loops done per measurement @@ -278,7 +251,7 @@ class HaarSurroundFeature: def __init__(self, r_inner, r_outer=None, val=None): if r_outer is None: r_outer = r_inner * 3 - + # print(r_outer) r_inner2 = r_inner * r_inner count_inner = r_inner2 count_outer = r_outer * r_outer - r_inner2 @@ -309,10 +282,15 @@ class HaarSurroundFeature: return kernel +def to_gray(frame): + # Faster by quitting checking if the input image is already grayscale + # Perhaps it would be faster with less overhead to call cv2.cvtColor directly instead of using this function + return cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + + @lru_cache(maxsize=lru_maxsize_vs) def frameint_get_xy_step(imageshape, xysteps, pad, start_offset=None, end_offset=None): """ - :param imageshape: (height(row),width(col)). row==y,cal==x :param xysteps: (x,y) :param pad: int @@ -361,7 +339,6 @@ def get_hsf_empty_array(len_syx, frameint_x, frame_int_dtype, fcshape): # @profile def conv_int(frame_int, kernel, xy_step, padding, xy_steps_list): """ - :param frame_int: :param kernel: hsf :param step: (x,y) @@ -442,183 +419,438 @@ def conv_int(frame_int, kernel, xy_step, padding, xy_steps_list): return frame_conv, min_response, center -# @profile - +class Auto_Radius_Calc(object): + def __init__(self): + self.response_list = [] + self.radius_cand_list = [] + self.adj_comp_flag = False + + self.radius_middle_index = None + + self.left_item = None + self.right_item = None + self.left_index = None + self.right_index = None - -timedict = {"to_gray": [], "int_img": [], "conv_int": [], "crop": [], "total_cv": []} -# I'd like to take into account things like print, end_time - start_time processing time, etc., but it's too much trouble. - -# For measuring total processing time -main_start_time = timeit.default_timer() - -rng = np.random.default_rng() -cvparam = CvParameters(default_radius, default_step) - -cv_mode = ["first_frame", "radius_adjust", "init", "normal"] -now_mode = cv_mode[0] - -radius_cand_list = [] - -# response_min=0 -response_max = None -response_list = [] - -def HSF(self): - #global now_mode - #global response_list - #global radius_cand_list - #global response_max - # default_radius = 15 - #frame = self.current_image_gray - - global now_mode - global response_list - global radius_cand_list - global response_max - - global skip_autoradius - global default_radius - - global prev_rany - global prev_ranx - global prev_hsfy - global prev_hsfx - skip_autoradius = self.settings.gui_skip_autoradius - default_radius = self.settings.gui_HSF_radius - - frame = self.current_image_gray - if now_mode == cv_mode[1]: - prev_res_len = len(response_list) + def get_radius(self): + prev_res_len = len(self.response_list) # adjustment of radius if prev_res_len == 1: # len==1==response_list==[default_radius] - cvparam.radius = auto_radius_range[0] + self.adj_comp_flag = False + return auto_radius_range[0] elif prev_res_len == 2: # len==2==response_list==[default_radius, auto_radius_range[0]] - cvparam.radius = auto_radius_range[1] + self.adj_comp_flag = False + return auto_radius_range[1] elif prev_res_len == 3: # len==3==response_list==[default_radius,auto_radius_range[0],auto_radius_range[1]] - sort_res = sorted(response_list, key=lambda x: x[1])[0] + if self.response_list[1][1] < self.response_list[2][1]: + self.left_item = self.response_list[1] + self.right_item = self.response_list[0] + else: + self.left_item = self.response_list[0] + self.right_item = self.response_list[2] + self.radius_cand_list = [i for i in range(self.left_item[0], self.right_item[0] + auto_radius_step, auto_radius_step)] + self.left_index = 0 + self.right_index = len(self.radius_cand_list) - 1 + self.radius_middle_index = (self.left_index + self.right_index) // 2 + self.adj_comp_flag = False + return self.radius_cand_list[self.radius_middle_index] + else: + if self.left_index <= self.right_index and self.left_index != self.radius_middle_index: + if (self.left_item[1] + self.response_list[-1][1]) < (self.right_item[1] + self.response_list[-1][1]): + self.right_item = self.response_list[-1] + self.right_index = self.radius_middle_index - 1 + self.radius_middle_index = (self.left_index + self.right_index) // 2 + self.adj_comp_flag = False + return self.radius_cand_list[self.radius_middle_index] + if (self.left_item[1] + self.response_list[-1][1]) > (self.right_item[1] + self.response_list[-1][1]): + self.left_item = self.response_list[-1] + self.left_index = self.radius_middle_index + 1 + self.radius_middle_index = (self.left_index + self.right_index) // 2 + self.adj_comp_flag = False + return self.radius_cand_list[self.radius_middle_index] + self.adj_comp_flag = True + return self.radius_cand_list[self.radius_middle_index] + + def get_radius_base(self): + """ + Use it when the new version doesn't work well. + :return: + """ + + prev_res_len = len(self.response_list) + # adjustment of radius + if prev_res_len == 1: + # len==1==response_list==[default_radius] + self.adj_comp_flag = False + return auto_radius_range[0] + elif prev_res_len == 2: + # len==2==response_list==[default_radius, auto_radius_range[0]] + self.adj_comp_flag = False + return auto_radius_range[1] + elif prev_res_len == 3: + # len==3==response_list==[default_radius,auto_radius_range[0],auto_radius_range[1]] + sort_res = sorted(self.response_list, key=lambda x: x[1])[0] # Extract the radius with the lowest response value if sort_res[0] == default_radius: # If the default value is best, change now_mode to init after setting radius to the default value. - cvparam.radius = default_radius - now_mode = cv_mode[2] if not skip_blink_detect else cv_mode[3] - response_list = [] + self.adj_comp_flag = True + return default_radius elif sort_res[0] == auto_radius_range[0]: - radius_cand_list = [i for i in range(auto_radius_range[0], default_radius, default_step[0])][1:] - # default_step is defined separately for xy, but radius is shared by xy, so it may be buggy - # It should be no problem to set it to anything other than default_step - cvparam.radius = radius_cand_list.pop() + self.radius_cand_list = [i for i in range(auto_radius_range[0], default_radius, auto_radius_step)][1:] + self.adj_comp_flag = False + return self.radius_cand_list.pop() else: - radius_cand_list = [i for i in range(default_radius, auto_radius_range[1], default_step[0])][1:] - # default_step is defined separately for xy, but radius is shared by xy, so it may be buggy - # It should be no problem to set it to anything other than default_step - cvparam.radius = radius_cand_list.pop() + self.radius_cand_list = [i for i in range(default_radius, auto_radius_range[1], auto_radius_step)][1:] + self.adj_comp_flag = False + return self.radius_cand_list.pop() else: # Try the contents of the radius_cand_list in order until the radius_cand_list runs out # Better make it a binary search. - if len(radius_cand_list) == 0: - sort_res = sorted(response_list, key=lambda x: x[1])[0] - cvparam.radius = sort_res[0] - now_mode = cv_mode[2] if not skip_blink_detect else cv_mode[3] - response_list = [] + if len(self.radius_cand_list) == 0: + sort_res = sorted(self.response_list, key=lambda x: x[1])[0] + self.adj_comp_flag = True + return sort_res[0] else: - cvparam.radius = radius_cand_list.pop() + self.adj_comp_flag = False + return self.radius_cand_list.pop() - radius, pad, step, hsf = cvparam.get_rpsh() - - # For measuring processing time of image processing - cv_start_time = timeit.default_timer() - - gray_frame = frame - timedict["to_gray"].append(timeit.default_timer() - cv_start_time) - - # Calculate the integral image of the frame - int_start_time = timeit.default_timer() - # BORDER_CONSTANT is faster than BORDER_REPLICATE There seems to be almost no negative impact when BORDER_CONSTANT is used. - frame_pad = cv2.copyMakeBorder(gray_frame, pad, pad, pad, pad, cv2.BORDER_CONSTANT) - frame_int = cv2.integral(frame_pad) - timedict["int_img"].append(timeit.default_timer() - int_start_time) - - # Convolve the feature with the integral image - conv_int_start_time = timeit.default_timer() - xy_step = frameint_get_xy_step(frame_int.shape, step, pad, start_offset=None, end_offset=None) - frame_conv, response, center_xy = conv_int(frame_int, hsf, step, pad, xy_step) - timedict["conv_int"].append(timeit.default_timer() - conv_int_start_time) - - crop_start_time = timeit.default_timer() - # Define the center point and radius - center_x, center_y = center_xy - upper_x = center_x + 20 - lower_x = center_x - 20 - upper_y = center_y + 20 - lower_y = center_y - 20 - - # Crop the image using the calculated bounds - cropped_image = gray_frame[lower_y:upper_y, lower_x:upper_x] - if now_mode == cv_mode[0] or now_mode == cv_mode[1]: - # If mode is first_frame or radius_adjust, record current radius and response - response_list.append((radius, response)) - elif now_mode == cv_mode[2]: - # Statistics for blink detection - if len(response_list) < blink_init_frames: - # Record the average value of cropped_image - response_list.append(cv2.mean(cropped_image)[0]) - else: - # Calculate response_max by computing interquartile range, IQR - # Change cv_mode to normal - response_list = np.array(response_list) - # 25%,75% - # This value may need to be adjusted depending on the environment. - quartile_1, quartile_3 = np.percentile(response_list, [25, 75]) - iqr = quartile_3 - quartile_1 - # response_min = quartile_1 - (iqr * 1.5) - response_max = quartile_3 + (iqr * 1.5) - now_mode = cv_mode[3] - else: - if 0 in cropped_image.shape: - # If shape contains 0, it is not detected well. - print("Something's wrong.") - else: - # If the average value of cropped_image is greater than response_max - # (i.e., if the cropimage is whitish - if response_max is not None and cv2.mean(cropped_image)[0] > response_max: - # blink - cv2.circle(frame, (center_x, center_y), 10, (0, 0, 255), -1) - - # If you want to update response_max. it may be more cost-effective to rewrite response_list in the following way - # https://stackoverflow.com/questions/42771110/fastest-way-to-left-cycle-a-numpy-array-like-pop-push-for-a-queue - - cv_end_time = timeit.default_timer() - timedict["crop"].append(cv_end_time - crop_start_time) - timedict["total_cv"].append(cv_end_time - cv_start_time) - - # the lower the response the better the likelyhood of there being a pupil. you can adujst the radius and steps accordingly - print('Kernel response:', response) - print('Pixel position:', center_xy) + def add_response(self, radius, response): + self.response_list.append((radius, response)) + return None - if now_mode == cv_mode[0]: - # Moving from first_frame to the next mode - if skip_autoradius and skip_blink_detect: - now_mode = cv_mode[3] - response_list = [] - elif skip_autoradius: - now_mode = cv_mode[2] - response_list = [] - else: - now_mode = cv_mode[1] - - try: - self.failed = 0 - cv2.circle(frame, (center_x, center_y), 10, (0, 0, 255), -1) - return center_x, center_y, frame +class Blink_Detector(object): + def __init__(self): + self.response_list = [] + self.response_max = None + self.enable_detect_flg = False + self.quartile_1 = None + + def calc_thresh(self): + # Calculate response_max by computing interquartile range, IQR + # self.response_listo = np.array(self.response_listo) + # 25%,75% + # This value may need to be adjusted depending on the environment. + # quartile_1, quartile_3 = np.percentile(self.response_listo, [25, 75]) + # iqr = quartile_3 - quartile_1 + # self.response_maxo = quartile_3 + (iqr * 1.5) - except: - self.failed = self.failed + 1 - return 0, 0, frame + # quartile_1, quartile_3 = np.percentile(self.response_list, [25, 75]) + # or + quartile_1, quartile_3 = np.percentile(np.array(self.response_list), [25, 75]) + self.quartile_1 = quartile_1 + iqr = quartile_3 - quartile_1 + # response_min = quartile_1 - (iqr * 1.5) + + self.response_max = float(quartile_3 + (iqr * 1.5)) + # or + # self.response_max = quartile_3 + (iqr * 1.5) + + self.enable_detect_flg = True + return None + + def detect(self, now_response): + return now_response > self.response_max + + def add_response(self, response): + self.response_list.append(response) + return None + + def response_len(self): + return len(self.response_list) +class CenterCorrection(object): + def __init__(self): + # Tunable parameters + kernel_size = 7 # 3 or 5 or 7 + self.hist_thr = float(4) # 4% + self.center_q1_radius = 20 + + self.setup_comp = False + self.quartile_1 = None + self.radius = None + self.frame_shape = None + self.frame_mask = None + self.frame_bin = None + self.frame_final = None + self.morph_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_size, kernel_size)) + self.morph_kernel2 = np.ones((3, 3)) + self.hist_index = np.arange(256) + self.hist = np.empty((256, 1)) + self.hist_norm = np.empty((256, 1)) + + def init_array(self, gray_shape, quartile_1, radius): + self.frame_shape = gray_shape + self.frame_mask = np.empty(gray_shape, dtype=np.uint8) + self.frame_bin = np.empty(gray_shape, dtype=np.uint8) + self.frame_final = np.empty(gray_shape, dtype=np.uint8) + self.quartile_1 = quartile_1 + self.radius = radius + self.setup_comp = True + + # def reset_array(self): + # self.frame_mask.fill(0) + + def correction(self, gray_frame, orig_x, orig_y): + center_x, center_y = orig_x, orig_y + self.frame_mask.fill(0) + + cv2.circle(self.frame_mask, center=(center_x, center_y), radius=int(self.radius * 2), color=255, thickness=-1) + + # bottleneck + cv2.calcHist([gray_frame], [0], None, [256], [0, 256], hist=self.hist) + + cv2.normalize(self.hist, self.hist_norm, alpha=100.0, norm_type=cv2.NORM_L1) + hist_per = self.hist_norm.cumsum() + hist_index_list = self.hist_index[hist_per >= self.hist_thr] + frame_thr = hist_index_list[0] if len(hist_index_list) else np.percentile(cv2.bitwise_or(255 - self.frame_mask, gray_frame), 4) + + # bottleneck + self.frame_bin = cv2.threshold(gray_frame, frame_thr, 1, cv2.THRESH_BINARY_INV)[1] + cropped_x, cropped_y, cropped_w, cropped_h = cv2.boundingRect(self.frame_bin) + + self.frame_final = cv2.bitwise_and(self.frame_bin, self.frame_mask) + + # bottleneck + self.frame_final = cv2.morphologyEx(self.frame_final, cv2.MORPH_CLOSE, self.morph_kernel) + self.frame_final = cv2.morphologyEx(self.frame_final, cv2.MORPH_OPEN, self.morph_kernel) + + if (cropped_h, cropped_w) == self.frame_shape: + # Not detected. + base_x, base_y = center_x, center_y + else: + base_x = cropped_x + cropped_w // 2 + base_y = cropped_y + cropped_h // 2 + if self.frame_final[base_y, base_x] != 1: + if self.frame_final[center_y, center_x] != 1: + self.frame_final = cv2.morphologyEx(self.frame_final, cv2.MORPH_DILATE, self.morph_kernel2, iterations=3) + else: + base_x, base_y = center_x, center_y + + contours, _ = cv2.findContours(self.frame_final, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) + contours_box = [cv2.boundingRect(cnt) for cnt in contours] + contours_dist = np.array( + [abs(base_x - (cnt_x + cnt_w / 2)) + abs(base_y - (cnt_y + cnt_h / 2)) for cnt_x, cnt_y, cnt_w, cnt_h in contours_box]) + + if len(contours_box): + cropped_x2, cropped_y2, cropped_w2, cropped_h2 = contours_box[contours_dist.argmin()] + x = cropped_x2 + cropped_w2 // 2 + y = cropped_y2 + cropped_h2 // 2 + else: + x = center_x + y = center_y + + # if imshow_enable: + # cv2.circle(frame, (orig_x, orig_y), 10, (255, 0, 0), -1) + # cv2.circle(frame, (x, y), 7, (0, 0, 255), -1) + + # + # out_x = center_x if abs(x - center_x) > radius else x + # out_y = center_y if abs(y - center_y) > radius else y + out_x, out_y = orig_x, orig_y + if gray_frame[int(max(y - 5, 0)):int(min(y + 5, self.frame_shape[0])), + int(max(x - 5, 0)):int(min(x + 5, self.frame_shape[1]))].min() < self.quartile_1: + out_x = x + out_y = y + + # if imshow_enable: + # cv2.circle(frame, (out_x, out_y), 5, (0, 255, 0), -1) + # + # cv2.imshow("frame_bin", self.frame_bin * 255) + # cv2.imshow("frame_final", self.frame_final * 255) + return out_x, out_y + +class HSRAC_cls(object): + def __init__(self): + # I'd like to take into account things like print, end_time - start_time processing time, etc., but it's too much trouble. + + # For measuring total processing time + + self.main_start_time = timeit.default_timer() + + self.rng = np.random.default_rng() + self.cvparam = CvParameters(default_radius, default_step) + + self.cv_modeo = ["first_frame", "radius_adjust", "blink_adjust", "normal"] + self.now_modeo = self.cv_modeo[0] + + self.auto_radius_calc = Auto_Radius_Calc() + self.blink_detector = Blink_Detector() + self.center_q1 = Blink_Detector() + self.center_correct = CenterCorrection() + + self.cap = None + + self.timedict = {"to_gray": [], "int_img": [], "conv_int": [], "crop": [], "total_cv": []} + + def open_video(self, video_path): + # Temporary implementation to run + cap = cv2.VideoCapture(video_path) + if not cap.isOpened(): + raise IOError("Error opening video stream or file") + self.cap = cap + return True + + def read_frame(self): + # Temporary implementation to run + if not self.cap.isOpened(): + return False + ret, frame = self.cap.read() + if ret: + # I have set it to grayscale (1ch) just in case, but if the frame is 1ch, this line can be commented out. + self.current_image_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + return True + return False + + def single_run(self): + # Temporary implementation to run + + ## default_radius = 14 + + frame = self.current_image_gray + if self.now_modeo == self.cv_modeo[1]: + # adjustment of radius + + # debug print + # if calc_print_enable: + # temp_radius = self.auto_radius_calc.get_radius() + # print('Now radius:', temp_radius) + # self.cvparam.radius = temp_radius + + self.cvparam.radius = self.auto_radius_calc.get_radius() + if self.auto_radius_calc.adj_comp_flag: + self.now_modeo = self.cv_modeo[2] if not skip_blink_detect else self.cv_modeo[3] + + radius, pad, step, hsf = self.cvparam.get_rpsh() + + # For measuring processing time of image processing + cv_start_time = timeit.default_timer() + + gray_frame = frame + self.timedict["to_gray"].append(timeit.default_timer() - cv_start_time) + + # Calculate the integral image of the frame + int_start_time = timeit.default_timer() + # BORDER_CONSTANT is faster than BORDER_REPLICATE There seems to be almost no negative impact when BORDER_CONSTANT is used. + frame_pad = cv2.copyMakeBorder(gray_frame, pad, pad, pad, pad, cv2.BORDER_CONSTANT) + frame_int = cv2.integral(frame_pad) + self.timedict["int_img"].append(timeit.default_timer() - int_start_time) + + # Convolve the feature with the integral image + conv_int_start_time = timeit.default_timer() + xy_step = frameint_get_xy_step(frame_int.shape, step, pad, start_offset=None, end_offset=None) + frame_conv, response, center_xy = conv_int(frame_int, hsf, step, pad, xy_step) + self.timedict["conv_int"].append(timeit.default_timer() - conv_int_start_time) + + crop_start_time = timeit.default_timer() + # Define the center point and radius + center_x, center_y = center_xy + upper_x = center_x + radius + lower_x = center_x - radius + upper_y = center_y + radius + lower_y = center_y - radius + + # Crop the image using the calculated bounds + cropped_image = gray_frame[lower_y:upper_y, lower_x:upper_x] + + if self.now_modeo == self.cv_modeo[0] or self.now_modeo == self.cv_modeo[1]: + # If mode is first_frame or radius_adjust, record current radius and response + self.auto_radius_calc.add_response(radius, response) + elif self.now_modeo == self.cv_modeo[2]: + # Statistics for blink detection + if self.blink_detector.response_len() < blink_init_frames: + self.blink_detector.add_response(cv2.mean(cropped_image)[0]) + + upper_x = center_x + self.center_correct.center_q1_radius + lower_x = center_x - self.center_correct.center_q1_radius + upper_y = center_y + self.center_correct.center_q1_radius + lower_y = center_y - self.center_correct.center_q1_radius + self.center_q1.add_response(cv2.mean(gray_frame[lower_y:upper_y, lower_x:upper_x])[0]) + + else: + + self.blink_detector.calc_thresh() + self.center_q1.calc_thresh() + self.now_modeo = self.cv_modeo[3] + else: + if 0 in cropped_image.shape: + # If shape contains 0, it is not detected well. + print("Something's wrong.") + else: + orig_x, orig_y = center_x, center_y + if self.blink_detector.enable_detect_flg: + # If the average value of cropped_image is greater than response_max + # (i.e., if the cropimage is whitish + if self.blink_detector.detect(cv2.mean(cropped_image)[0]): + # blink + pass + else: + # pass + if not self.center_correct.setup_comp: + self.center_correct.init_array(gray_frame.shape, self.center_q1.quartile_1, radius) + + center_x, center_y = self.center_correct.correction(gray_frame, center_x, center_y) + # Define the center point and radius + center_xy = (center_x, center_y) + upper_x = center_x + radius + lower_x = center_x - radius + upper_y = center_y + radius + lower_y = center_y - radius + # Crop the image using the calculated bounds + cropped_image = gray_frame[lower_y:upper_y, lower_x:upper_x] + if imshow_enable or save_video: + cv2.circle(frame, (orig_x, orig_y), 6, (0, 0, 255), -1) + cv2.circle(frame, (center_x, center_y), 3, (255, 0, 0), -1) + # If you want to update response_max. it may be more cost-effective to rewrite response_list in the following way + # https://stackoverflow.com/questions/42771110/fastest-way-to-left-cycle-a-numpy-array-like-pop-push-for-a-queue + + cv_end_time = timeit.default_timer() + self.timedict["crop"].append(cv_end_time - crop_start_time) + self.timedict["total_cv"].append(cv_end_time - cv_start_time) + + if calc_print_enable: + # the lower the response the better the likelyhood of there being a pupil. you can adujst the radius and steps accordingly + print('Kernel response:', response) + print('Pixel position:', center_xy) + + if imshow_enable: + if self.now_modeo != self.cv_modeo[0] and self.now_modeo != self.cv_modeo[1]: + if 0 in cropped_image.shape: + # If shape contains 0, it is not detected well. + pass + else: + cv2.imshow("crop", cropped_image) + cv2.imshow("frame", frame) + if cv2.waitKey(1) & 0xFF == ord("q"): + pass + + if self.now_modeo == self.cv_modeo[0]: + # Moving from first_frame to the next mode + if skip_autoradius and skip_blink_detect: + self.now_modeo = self.cv_modeo[3] + elif skip_autoradius: + self.now_modeo = self.cv_modeo[2] + else: + self.now_modeo = self.cv_modeo[1] + + return center_x, center_y, frame + +class External_Run_HSF: + + hsrac = HSRAC_cls() + + def HSFS(self): + External_Run_HSF.hsrac.current_image_gray = self.current_image_gray + center_x, center_y, frame = External_Run_HSF.hsrac.single_run() + return center_x, center_y, frame + +if __name__ == '__main__': + hsrac = HSRAC_cls() + hsrac.open_video(video_path) + while hsrac.read_frame(): + _ = hsrac.single_run() \ No newline at end of file diff --git a/EyeTrackApp/hsrac.py b/EyeTrackApp/hsrac.py index 97bd416..5e08323 100644 --- a/EyeTrackApp/hsrac.py +++ b/EyeTrackApp/hsrac.py @@ -7,6 +7,274 @@ from functools import lru_cache import cv2 import numpy as np + + +#RANSAC + +video_path = "demo2.mp4" +imshow_enable = False +save_video = False + +thresh_add = 10 + + +class TimeitResult(object): + """ + from https://github.com/ipython/ipython/blob/339c0d510a1f3cb2158dd8c6e7f4ac89aa4c89d8/IPython/core/magics/execution.py#L55 + + Object returned by the timeit magic with info about the run. + Contains the following attributes : + loops: (int) number of loops done per measurement + repeat: (int) number of times the measurement has been repeated + best: (float) best execution time / number + all_runs: (list of float) execution time of each run (in s) + """ + + def __init__(self, loops, repeat, best, worst, all_runs, precision): + self.loops = loops + self.repeat = repeat + self.best = best + self.worst = worst + self.all_runs = all_runs + self._precision = precision + self.timings = [dt / self.loops for dt in all_runs] + + @property + def average(self): + return math.fsum(self.timings) / len(self.timings) + + @property + def stdev(self): + mean = self.average + return (math.fsum([(x - mean) ** 2 for x in self.timings]) / len(self.timings)) ** 0.5 + + def __str__(self): + pm = '+-' + if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding: + try: + u'\xb1'.encode(sys.stdout.encoding) + pm = u'\xb1' + except: + pass + return "min:{best} max:{worst} mean:{mean} {pm} {std} per loop (mean {pm} std. dev. of {runs} run{run_plural}, {loops:,} loop{loop_plural} each)".format( + pm=pm, + runs=self.repeat, + loops=self.loops, + loop_plural="" if self.loops == 1 else "s", + run_plural="" if self.repeat == 1 else "s", + mean=format_time(self.average, self._precision), + std=format_time(self.stdev, self._precision), + best=format_time(self.best, self._precision), + worst=format_time(self.worst, self._precision), + ) + + def _repr_pretty_(self, p, cycle): + unic = self.__str__() + p.text(u'') + + +class FPSResult(object): + """ + base https://github.com/ipython/ipython/blob/339c0d510a1f3cb2158dd8c6e7f4ac89aa4c89d8/IPython/core/magics/execution.py#L55 + """ + + def __init__(self, loops, repeat, best, worst, all_runs, precision): + self.loops = loops + self.repeat = repeat + self.best = 1 / best + self.worst = 1 / worst + self.all_runs = all_runs + self._precision = precision + self.fps = [1 / dt for dt in all_runs] + self.unit = "fps" + + @property + def average(self): + return math.fsum(self.fps) / len(self.fps) + + @property + def stdev(self): + mean = self.average + return (math.fsum([(x - mean) ** 2 for x in self.fps]) / len(self.fps)) ** 0.5 + + def __str__(self): + pm = '+-' + if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding: + try: + u'\xb1'.encode(sys.stdout.encoding) + pm = u'\xb1' + except: + pass + return "min:{best} max:{worst} mean:{mean} {pm} {std} per loop (mean {pm} std. dev. of {runs} run{run_plural}, {loops:,} loop{loop_plural} each)".format( + pm=pm, + runs=self.repeat, + loops=self.loops, + loop_plural="" if self.loops == 1 else "s", + run_plural="" if self.repeat == 1 else "s", + mean="%.*g%s" % (self._precision, self.average, self.unit), + std="%.*g%s" % (self._precision, self.stdev, self.unit), + best="%.*g%s" % (self._precision, self.best, self.unit), + worst="%.*g%s" % (self._precision, self.worst, self.unit), + ) + + def _repr_pretty_(self, p, cycle): + unic = self.__str__() + p.text(u'') + + +def format_time(timespan, precision=3): + """ + https://github.com/ipython/ipython/blob/339c0d510a1f3cb2158dd8c6e7f4ac89aa4c89d8/IPython/core/magics/execution.py#L1473 + Formats the timespan in a human readable form + """ + + if timespan >= 60.0: + # we have more than a minute, format that in a human readable form + # Idea from http://snipplr.com/view/5713/ + parts = [("d", 60 * 60 * 24), ("h", 60 * 60), ("min", 60), ("s", 1)] + time = [] + leftover = timespan + for suffix, length in parts: + value = int(leftover / length) + if value > 0: + leftover = leftover % length + time.append(u'%s%s' % (str(value), suffix)) + if leftover < 1: + break + return " ".join(time) + + # Unfortunately the unicode 'micro' symbol can cause problems in + # certain terminals. + # See bug: https://bugs.launchpad.net/ipython/+bug/348466 + # Try to prevent crashes by being more secure than it needs to + # E.g. eclipse is able to print a ยต, but has no sys.stdout.encoding set. + units = [u"s", u"ms", u'us', "ns"] # the save value + if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding: + try: + u'\xb5'.encode(sys.stdout.encoding) + units = [u"s", u"ms", u'\xb5s', "ns"] + except: + pass + scaling = [1, 1e3, 1e6, 1e9] + + if timespan > 0.0: + order = min(-int(math.floor(math.log10(timespan)) // 3), 3) + else: + order = 3 + return u"%.*g %s" % (precision, timespan * scaling[order], units[order]) + + +def ellipse_model(data, y, f): + """ + There is no need to make this process a function, since making the process a function will slow it down a little by calling it. + The results may be slightly different from the lambda version due to calculation errors derived from float types, but the calculation results are virtually the same. + a = 1.0,b = P[0],c = P[1],d = P[2],e = P[3],f = P[4] + :param data: + :param y: np.c_[d, e, a, c, b] + :param f: f == P[4, 0] + :return: this_return == np.array([ellipse_model(x, y) for (x, y) in data ]) + """ + return data.dot(y) + f + + +# @profile +def fit_rotated_ellipse_ransac(data: np.ndarray, rng: np.random.Generator, iter=100, sample_num=10, offset=80 # 80.0, 10, 80 + ): # before changing these values, please read up on the ransac algorithm + # However if you want to change any value just know that higher iterations will make processing frames slower + effective_sample = None + + # The array contents do not change during the loop, so only one call is needed. + # They say len is faster than shape. + # Reference url: https://stackoverflow.com/questions/35547853/what-is-faster-python3s-len-or-numpys-shape + len_data = len(data) + + if len_data < sample_num: + return None + + # Type of calculation result + ret_dtype = np.float64 + + # Sorts a random number array of size (iter,len_data). After sorting, returns the index of sample_num random numbers before sorting. + # If the array size is less than about 100, this is faster than rng.choice. + rng_sample = rng.random((iter, len_data)).argsort()[:, :sample_num] + # or + # I don't see any advantage to doing this. + # rng_sample = np.asarray(rng.random((iter, len_data)).argsort()[:, :sample_num], dtype=np.int32) + + # I don't think it looks beautiful. + # x,y,x**2,y**2,x*y,1,-1*x**2 + datamod = np.concatenate( + [data, data ** 2, (data[:, 0] * data[:, 1])[:, np.newaxis], np.ones((len_data, 1), dtype=ret_dtype), + (-1 * data[:, 0] ** 2)[:, np.newaxis]], axis=1, + dtype=ret_dtype) + + datamod_slim = np.array(datamod[:, :5], dtype=ret_dtype) + + datamod_rng = datamod[rng_sample] + datamod_rng6 = datamod_rng[:, :, 6] + datamod_rng_swap = datamod_rng[:, :, [4, 3, 0, 1, 5]] + datamod_rng_swap_trans = datamod_rng_swap.transpose((0, 2, 1)) + + # These two lines are one of the bottlenecks + datamod_rng_5x5 = np.matmul(datamod_rng_swap_trans, datamod_rng_swap) + datamod_rng_p5smp = np.matmul(np.linalg.inv(datamod_rng_5x5), datamod_rng_swap_trans) + + datamod_rng_p = np.matmul(datamod_rng_p5smp, datamod_rng6[:, :, np.newaxis]).reshape((-1, 5)) + + # I don't think it looks beautiful. + ellipse_y_arr = np.asarray( + [datamod_rng_p[:, 2], datamod_rng_p[:, 3], np.ones(len(datamod_rng_p)), datamod_rng_p[:, 1], datamod_rng_p[:, 0]], dtype=ret_dtype) + + ellipse_data_arr = ellipse_model(datamod_slim, ellipse_y_arr, np.asarray(datamod_rng_p[:, 4])).transpose((1, 0)) + ellipse_data_abs = np.abs(ellipse_data_arr) + ellipse_data_index = np.argmax(np.sum(ellipse_data_abs < offset, axis=1), axis=0) + effective_data_arr = ellipse_data_arr[ellipse_data_index] + effective_sample_p_arr = datamod_rng_p[ellipse_data_index] + + return fit_rotated_ellipse(effective_data_arr, effective_sample_p_arr) + + +# @profile +def fit_rotated_ellipse(data, P): + a = 1.0 + b = P[0] + c = P[1] + d = P[2] + e = P[3] + f = P[4] + # The cost of trigonometric functions is high. + theta = 0.5 * np.arctan(b / (a - c), dtype=np.float64) + theta_sin = np.sin(theta, dtype=np.float64) + theta_cos = np.cos(theta, dtype=np.float64) + tc2 = theta_cos ** 2 + ts2 = theta_sin ** 2 + b_tcs = b * theta_cos * theta_sin + + # Do the calculation only once + cxy = b ** 2 - 4 * a * c + cx = (2 * c * d - b * e) / cxy + cy = (2 * a * e - b * d) / cxy + + # I just want to clear things up around here. + cu = a * cx ** 2 + b * cx * cy + c * cy ** 2 - f + cu_r = np.array([(a * tc2 + b_tcs + c * ts2), (a * ts2 - b_tcs + c * tc2)]) + wh = np.sqrt(cu / cu_r) + + w, h = wh[0], wh[1] + + error_sum = np.sum(data) + # print("fitting error = %.3f" % (error_sum)) + + return (cx, cy, w, h, theta) + + + + + + + +#HSF + # from line_profiler_pycharm import profile video_path = "ezgif.com-gif-maker.avi" @@ -703,7 +971,7 @@ class HSRAC_cls(object): ret, frame = self.cap.read() if ret: # I have set it to grayscale (1ch) just in case, but if the frame is 1ch, this line can be commented out. - self.current_image_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) return True return False @@ -790,21 +1058,21 @@ class HSRAC_cls(object): # blink pass else: - pass - # if not self.center_correct.setup_comp: - # self.center_correct.init_array(gray_frame.shape, self.center_q1.quartile_1, radius) + #pass + if not self.center_correct.setup_comp: + self.center_correct.init_array(gray_frame.shape, self.center_q1.quartile_1, radius) - # center_x, center_y = self.center_correct.correction(gray_frame, center_x, center_y) - # # Define the center point and radius - # center_xy = (center_x, center_y) - # upper_x = center_x + radius - # lower_x = center_x - radius - # upper_y = center_y + radius - # lower_y = center_y - radius - # # Crop the image using the calculated bounds - # cropped_image = gray_frame[lower_y:upper_y, lower_x:upper_x] + center_x, center_y = self.center_correct.correction(gray_frame, center_x, center_y) + # Define the center point and radius + center_xy = (center_x, center_y) + upper_x = center_x + radius + lower_x = center_x - radius + upper_y = center_y + radius + lower_y = center_y - radius + # Crop the image using the calculated bounds + cropped_image = gray_frame[lower_y:upper_y, lower_x:upper_x] if imshow_enable or save_video: - cv2.circle(frame, (orig_x, orig_y), 10, (0, 0, 255), -1) + cv2.circle(frame, (orig_x, orig_y), 6, (0, 255, 255), -1) cv2.circle(frame, (center_x, center_y), 3, (255, 0, 0), -1) # If you want to update response_max. it may be more cost-effective to rewrite response_list in the following way # https://stackoverflow.com/questions/42771110/fastest-way-to-left-cycle-a-numpy-array-like-pop-push-for-a-queue @@ -838,13 +1106,112 @@ class HSRAC_cls(object): else: self.now_modeo = self.cv_modeo[1] - return center_x, center_y, frame +#run ransac on the HSF crop\ + try: + kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) + thresh_add = 10 + rng = np.random.default_rng() + + f = False + + # Convert the image to grayscale, and set up thresholding. Thresholds here are basically a + # low-pass filter that will set any pixel < the threshold value to 0. Thresholding is user + # configurable in this utility as we're dealing with variable lighting amounts/placement, as + # well as camera positioning and lensing. Therefore everyone's cutoff may be different. + # + # The goal of thresholding settings is to make sure we can ONLY see the pupil. This is why we + # crop the image earlier; it gives us less possible dark area to get confused about in the + # next step. + frame = cropped_image + # For measuring processing time of image processing + # Crop first to reduce the amount of data to process. + #frame = frame[0:len(frame) - 5, :] + + # To reduce the processing data, first convert to 1-channel and then blur. + # The processing results were the same when I swapped the order of blurring and 1-channelization. + try: + frame = cv2.GaussianBlur(frame, (5, 5), 0) + except: + pass + + # this will need to be adjusted everytime hardware is changed (brightness of IR, Camera postion, etc)m + min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(frame) + + maxloc0_hf, maxloc1_hf = int(0.5 * max_loc[0]), int(0.5 * max_loc[1]) + + # crop 15% sqare around min_loc + # frame = frame[max_loc[1] - maxloc1_hf:max_loc[1] + maxloc1_hf, + # max_loc[0] - maxloc0_hf:max_loc[0] + maxloc0_hf] + + threshold_value = min_val + thresh_add + _, thresh = cv2.threshold(frame, threshold_value, 255, cv2.THRESH_BINARY) + try: + opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel) + closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel) + th_frame = 255 - closing + except: + # I want to eliminate try here because try tends to be slow in execution. + th_frame = 255 - frame + + detect_start_time = timeit.default_timer() + contours, _ = cv2.findContours(th_frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) + hull = [] + # This way is faster than contours[i] + # But maybe this one is faster. hull = [cv2.convexHull(cnt, False) for cnt in contours] + for cnt in contours: + hull.append(cv2.convexHull(cnt, False)) + if not hull: + # If empty, go to next loop + pass + try: + + cnt = sorted(hull, key=cv2.contourArea) + maxcnt = cnt[-1] + # ellipse = cv2.fitEllipse(maxcnt) + ransac_data = fit_rotated_ellipse_ransac(maxcnt.reshape(-1, 2), rng) + if ransac_data is None: + # ransac_data is None==maxcnt.shape[0]