From 7e1c138136774fb09ccf875acf9c3e6865f8d964 Mon Sep 17 00:00:00 2001 From: PallasNeko <124042774+PallasNeko@users.noreply.github.com> Date: Thu, 16 Mar 2023 02:15:55 +0900 Subject: [PATCH] Improve hsrac using bench_hsrac --- EyeTrackApp/eyetrack_settings.backup | 1 - EyeTrackApp/haar_surround_feature.py | 2 +- EyeTrackApp/hsrac.py | 599 ++++++++++++++------------- 3 files changed, 312 insertions(+), 290 deletions(-) delete mode 100644 EyeTrackApp/eyetrack_settings.backup diff --git a/EyeTrackApp/eyetrack_settings.backup b/EyeTrackApp/eyetrack_settings.backup deleted file mode 100644 index 5282ce9..0000000 --- a/EyeTrackApp/eyetrack_settings.backup +++ /dev/null @@ -1 +0,0 @@ -{"version": 1, "right_eye": {"rotation_angle": 168, "roi_window_x": 225, "roi_window_y": 161, "roi_window_w": 101, "roi_window_h": 68, "focal_length": 30, "capture_source": "http://192.168.1.43:4747/video", "gui_circular_crop": false}, "left_eye": {"rotation_angle": 37, "roi_window_x": 146, "roi_window_y": 108, "roi_window_w": 115, "roi_window_h": 110, "focal_length": 30, "capture_source": "http://192.168.0.62", "gui_circular_crop": false}, "settings": {"gui_flip_x_axis_left": false, "gui_flip_x_axis_right": false, "gui_flip_y_axis": false, "gui_RANSAC3D": true, "gui_HSF": true, "gui_BLOB": false, "gui_BLINK": true, "gui_HSRAC": true, "gui_HSF_radius": 15, "gui_min_cutoff": "0.0004", "gui_speed_coefficient": "0.9", "gui_osc_address": "127.0.0.1", "gui_osc_port": 9000, "gui_osc_receiver_port": 9001, "gui_osc_recenter_address": "/avatar/parameters/etvr_recenter", "gui_osc_recalibrate_address": "/avatar/parameters/etvr_recalibrate", "gui_blob_maxsize": 25.0, "gui_blob_minsize": 10.0, "gui_recenter_eyes": false, "gui_eye_falloff": false, "tracker_single_eye": 2, "gui_blink_sync": false, "gui_threshold": 65, "gui_HSRACP": 1, "gui_RANSAC3DP": 2, "gui_HSFP": 3, "gui_BLOBP": 4, "gui_skip_autoradius": true, "gui_thresh_add": 20, "gui_update_check": true}, "eye_display_id": 0} \ No newline at end of file diff --git a/EyeTrackApp/haar_surround_feature.py b/EyeTrackApp/haar_surround_feature.py index 750ebae..73c8150 100644 --- a/EyeTrackApp/haar_surround_feature.py +++ b/EyeTrackApp/haar_surround_feature.py @@ -13,7 +13,7 @@ from utils.img_utils import safe_crop video_path = "ezgif.com-gif-maker.avi" imshow_enable = False -calc_print_enable = True +calc_print_enable = False save_video = False skip_autoradius = False skip_blink_detect = False diff --git a/EyeTrackApp/hsrac.py b/EyeTrackApp/hsrac.py index 307ab3c..968adee 100644 --- a/EyeTrackApp/hsrac.py +++ b/EyeTrackApp/hsrac.py @@ -1,26 +1,27 @@ +import math import timeit +from functools import lru_cache import cv2 import numpy as np +from numpy.linalg import _umath_linalg from haar_surround_feature import ( AutoRadiusCalc, BlinkDetector, - CvParameters, conv_int, - frameint_get_xy_step, + CvParameters, conv_int, get_frameint_empty_array, get_hsf_center, ) from utils.img_utils import safe_crop from utils.misc_utils import clamp - # from line_profiler_pycharm import profile #RANSAC thresh_add = 10 -imshow_enable = True -calc_print_enable = True +imshow_enable = False +calc_print_enable = False save_video = False skip_autoradius = False skip_blink_detect = False @@ -28,6 +29,7 @@ skip_blink_detect = False # cache param lru_maxsize_vvs = 16 lru_maxsize_vs = 64 +lru_maxsize_s=128 # CV param default_radius = 20 auto_radius_range = (default_radius - 10, default_radius + 10) # (10,30) @@ -37,24 +39,48 @@ blink_init_frames = 60 * 3 # 60fps*3sec,Number of blink statistical frames default_step = (5, 5) # bigger the steps,lower the processing time! ofc acc also takes an impact -def ellipse_model(data, y, f): - """ - There is no need to make this process a function, since making the process a function will slow it down a little by calling it. - The results may be slightly different from the lambda version due to calculation errors derived from float types, but the calculation results are virtually the same. - a = 1.0,b = P[0],c = P[1],d = P[2],e = P[3],f = P[4] - :param data: - :param y: np.c_[d, e, a, c, b] - :param f: f == P[4, 0] - :return: this_return == np.array([ellipse_model(x, y) for (x, y) in data ]) - """ - return data.dot(y) + f +@lru_cache(maxsize=lru_maxsize_s) +def get_ransac_empty_array_new(iter_num, sample_num, len_data): + # Function to reduce array allocation by providing an empty array first and recycling it with lru + use_dtype = np.float64 + dm_rng = np.empty((iter_num, sample_num, 7), dtype=use_dtype) + dm_rng_swap = np.empty((iter_num, sample_num, 5), dtype=use_dtype) + dm_rng_swap_trans = dm_rng_swap.transpose((0, 2, 1)) + # dm_rng_swap_trans = np.empty((iter_num, 5,sample_num), dtype=use_dtype) + dm_rng_5x5 = np.empty((iter_num, 5, 5), dtype=use_dtype) + dm_rng_p5smp = np.empty((iter_num, 5, sample_num), dtype=use_dtype) + dm_rng_p = np.empty((iter_num, 5), dtype=use_dtype) + dm_rng_p_npaxis = dm_rng_p[:, :, np.newaxis] + ellipse_y_arr = np.empty((iter_num, 5), dtype=use_dtype) + ellipse_y_arr[:, 2] = 1 + swap_index = np.array([4, 3, 0, 1, 5], dtype=np.uint8) + dm_brod = np.broadcast_to(dm_rng_p[:, 4, np.newaxis], (iter_num, len_data)) + dm_rng_six = dm_rng[:, :, 6, np.newaxis] + dm_rng_p_24 = dm_rng_p[:, 2:4] + dm_rng_p_10 = dm_rng_p[:, 1::-1] + el_y_arr_2 = ellipse_y_arr[:, :2] + el_y_arr_3 = ellipse_y_arr[:, 3:] + datamod = np.empty((len_data, 7), dtype=use_dtype) # np.empty((len(data), 7), dtype=ret_dtype) + datamod[:, 5] = 1 + datamod_b = datamod[:, :5] # .T + rdm_index_init_arr = np.empty((iter_num, len_data), dtype=np.uint16) + rdm_index_init_arr[:, :] = np.arange(len_data, dtype=np.uint16) + rdm_index = np.empty((iter_num, len_data), dtype=np.uint16) + rdm_index_smpnum = rdm_index[:, :sample_num] + ellipse_data_arr = np.empty((iter_num, len_data), dtype=use_dtype) + th_abs = np.empty((iter_num, len_data), dtype=use_dtype) + dm_data = datamod[:, :2] # = data + dm_p2 = datamod[:, 2:4] # = data * data + dm_mul = datamod[:, 4] # = data[:, 0] * data[:, 1] + dm_neg = datamod[:, 6] # = -datamod[:, 2] + inv_ext = np.linalg.linalg.get_linalg_error_extobj(np.linalg.linalg._raise_linalgerror_singular) + return dm_rng, dm_rng_swap, dm_rng_swap_trans, dm_rng_5x5, dm_rng_p5smp, dm_rng_p, dm_rng_p_npaxis, ellipse_y_arr, swap_index, dm_brod, dm_rng_six, dm_rng_p_24, dm_rng_p_10, el_y_arr_2, el_y_arr_3, datamod, datamod_b, dm_data, dm_p2, dm_mul, dm_neg, rdm_index_init_arr, rdm_index, rdm_index_smpnum, ellipse_data_arr, th_abs, inv_ext # @profile -def fit_rotated_ellipse_ransac(data: np.ndarray, rng: np.random.Generator, iter=100, sample_num=10, offset=80 # 80.0, 10, 80 - ): # before changing these values, please read up on the ransac algorithm +def fit_rotated_ellipse_ransac(data: np.ndarray, sfc: np.random.Generator, iter_num=100, sample_num=10, offset=80): + # before changing these values, please read up on the ransac algorithm # However if you want to change any value just know that higher iterations will make processing frames slower - effective_sample = None # The array contents do not change during the loop, so only one call is needed. # They say len is faster than shape. @@ -64,81 +90,95 @@ def fit_rotated_ellipse_ransac(data: np.ndarray, rng: np.random.Generator, iter= if len_data < sample_num: return None - # Type of calculation result - ret_dtype = np.float64 + dm_rng, dm_rng_swap, dm_rng_swap_trans, dm_rng_5x5, dm_rng_p5smp, dm_rng_p, dm_rng_p_npaxis, ellipse_y_arr, swap_index, dm_brod, dm_rng_six, dm_rng_p_24, dm_rng_p_10, el_y_arr_2, el_y_arr_3, datamod, datamod_b, dm_data, dm_p2, dm_mul, dm_neg, rdm_index_init_arr, rdm_index, rdm_index_smpnum, ellipse_data_arr, th_abs, inv_ext = get_ransac_empty_array_new( + iter_num, sample_num, len_data) - # Sorts a random number array of size (iter,len_data). After sorting, returns the index of sample_num random numbers before sorting. - # If the array size is less than about 100, this is faster than rng.choice. - rng_sample = rng.random((iter, len_data)).argsort()[:, :sample_num] + dm_data[:, :] = data # [:] + dm_p2[:, :] = data * data + dm_mul[:] = data[:, 0] * data[:, 1] + dm_neg[:] = -dm_p2[:, 0] # -1 * data[:, 0] ** 2# + + sfc.permuted(rdm_index_init_arr, axis=1, out=rdm_index) + + # np.take replaces a[ind,:] and is 3-4 times faster, https://gist.github.com/rossant/4645217 + # a.take() is faster than np.take(a) + datamod.take(rdm_index_smpnum, axis=0, mode="clip", out=dm_rng) + + dm_rng_swap[:, :, :] = dm_rng[:, :, swap_index] # or - # I don't see any advantage to doing this. - # rng_sample = np.asarray(rng.random((iter, len_data)).argsort()[:, :sample_num], dtype=np.int32) + # dm_rng.take(swap_index, axis=2, mode="clip", out=dm_rng_swap) + # or + # dm_rng_swap = np.take(dm_rng,[4, 3, 0, 1, 5],axis=2) - # I don't think it looks beautiful. - # x,y,x**2,y**2,x*y,1,-1*x**2 - datamod = np.concatenate( - [data, data ** 2, (data[:, 0] * data[:, 1])[:, np.newaxis], np.ones((len_data, 1), dtype=ret_dtype), - (-1 * data[:, 0] ** 2)[:, np.newaxis]], axis=1, - dtype=ret_dtype) + np.matmul(dm_rng_swap_trans, dm_rng_swap, out=dm_rng_5x5) + # np.linalg.solve(np.matmul(dm_rng_swap_trans, dm_rng_swap), dm_rng_swap_trans) # solve is slow https://github.com/bogovicj/JaneliaMLCourse/issues/1 + _umath_linalg.inv(dm_rng_5x5, signature='d->d', + extobj=inv_ext, out=dm_rng_5x5) + np.matmul(dm_rng_5x5, dm_rng_swap_trans, out=dm_rng_p5smp) - datamod_slim = np.array(datamod[:, :5], dtype=ret_dtype) + np.matmul(dm_rng_p5smp, dm_rng_six, out=dm_rng_p_npaxis) - datamod_rng = datamod[rng_sample] - datamod_rng6 = datamod_rng[:, :, 6] - datamod_rng_swap = datamod_rng[:, :, [4, 3, 0, 1, 5]] - datamod_rng_swap_trans = datamod_rng_swap.transpose((0, 2, 1)) + el_y_arr_2[:, :] = dm_rng_p_24 + el_y_arr_3[:, :] = dm_rng_p_10 - # These two lines are one of the bottlenecks - datamod_rng_5x5 = np.matmul(datamod_rng_swap_trans, datamod_rng_swap) - datamod_rng_p5smp = np.matmul(np.linalg.inv(datamod_rng_5x5), datamod_rng_swap_trans) + cv2.gemm(ellipse_y_arr, datamod_b, 1.0, dm_brod, 1.0, dst=ellipse_data_arr, flags=cv2.GEMM_2_T) - datamod_rng_p = np.matmul(datamod_rng_p5smp, datamod_rng6[:, :, np.newaxis]).reshape((-1, 5)) + np.abs(ellipse_data_arr, out=th_abs) + cv2.threshold(th_abs, offset, 1.0, cv2.THRESH_BINARY_INV, dst=th_abs) + ellipse_data_index = \ + cv2.minMaxLoc(cv2.reduce(th_abs, 1, cv2.REDUCE_SUM))[3][1] - # I don't think it looks beautiful. - ellipse_y_arr = np.asarray( - [datamod_rng_p[:, 2], datamod_rng_p[:, 3], np.ones(len(datamod_rng_p)), datamod_rng_p[:, 1], datamod_rng_p[:, 0]], dtype=ret_dtype) + # error_num = ellipse_data_arr[ellipse_data_index].sum() + error_num = cv2.sumElems(ellipse_data_arr[ellipse_data_index])[0] + effective_sample_p_arr = dm_rng_p[ellipse_data_index].tolist() - ellipse_data_arr = ellipse_model(datamod_slim, ellipse_y_arr, np.asarray(datamod_rng_p[:, 4])).transpose((1, 0)) - ellipse_data_abs = np.abs(ellipse_data_arr) - ellipse_data_index = np.argmax(np.sum(ellipse_data_abs < offset, axis=1), axis=0) - effective_data_arr = ellipse_data_arr[ellipse_data_index] - effective_sample_p_arr = datamod_rng_p[ellipse_data_index] - - return fit_rotated_ellipse(effective_data_arr, effective_sample_p_arr) + return fit_rotated_ellipse(error_num, effective_sample_p_arr) # @profile def fit_rotated_ellipse(data, P): a = 1.0 - b = P[0] - c = P[1] - d = P[2] - e = P[3] - f = P[4] - # The cost of trigonometric functions is high. - theta = 0.5 * np.arctan(b / (a - c), dtype=np.float64) - theta_sin = np.sin(theta, dtype=np.float64) - theta_cos = np.cos(theta, dtype=np.float64) - tc2 = theta_cos ** 2 - ts2 = theta_sin ** 2 + # b, c, d, e, f = P[0], P[1], P[2], P[3], P[4] + b, c, d, e = P[0], P[1], P[2], P[3] + theta = 0.5 * math.atan(b / (a - c)) # math.atan2(b, a - c) + theta_sin, theta_cos = math.sin(theta), math.cos(theta) + tc2 = theta_cos * theta_cos + ts2 = theta_sin * theta_sin b_tcs = b * theta_cos * theta_sin - - # Do the calculation only once - cxy = b ** 2 - 4 * a * c + cxy = b * b - 4 * a * c cx = (2 * c * d - b * e) / cxy cy = (2 * a * e - b * d) / cxy - - # I just want to clear things up around here. - cu = a * cx ** 2 + b * cx * cy + c * cy ** 2 - f - cu_r = np.array([(a * tc2 + b_tcs + c * ts2), (a * ts2 - b_tcs + c * tc2)]) - wh = np.sqrt(cu / cu_r) - - w, h = wh[0], wh[1] - - error_sum = np.sum(data) + # cu = a * cx * cx + b * cx * cy + c * cy * cy - P[4] + cu = c * cy * cy + cx * (a * cx + b * cy) - P[4] + # here: https://stackoverflow.com/questions/327002/which-is-faster-in-python-x-5-or-math-sqrtx + # and : https://gist.github.com/zed/783011 + w = math.sqrt(cu / (a * tc2 + b_tcs + c * ts2)) + h = math.sqrt(cu / (a * ts2 - b_tcs + c * tc2)) + error_sum = data # sum(data) # print("fitting error = %.3f" % (error_sum)) - return (cx, cy, w, h, theta) + return cx, cy, w, h, theta + + +@lru_cache(lru_maxsize_vvs) +def get_ransac_frame(frame_shape): + return np.empty(frame_shape, dtype=np.uint8), np.empty(frame_shape, dtype=np.uint8) # np.float64) + + +@lru_cache(lru_maxsize_s) +def get_center_noclamp(center_xy, radius): + center_x, center_y = center_xy + upper_x = center_x + radius + lower_x = center_x - radius + upper_y = center_y + radius + lower_y = center_y - radius + + ransac_upper_x = center_x + max(20, radius) + ransac_lower_x = center_x - max(20, radius) + ransac_upper_y = center_y + max(20, radius) + ransac_lower_y = center_y - max(20, radius) + ransac_xy_offset = (ransac_lower_x, ransac_lower_y) + return center_x, center_y, upper_x, lower_x, upper_y, lower_y, ransac_lower_x, ransac_lower_y, ransac_upper_x, ransac_upper_y, ransac_xy_offset class HSRAC_cls(object): @@ -165,11 +205,17 @@ class HSRAC_cls(object): self.timedict = {"to_gray": [], "int_img": [], "conv_int": [], "crop": [], "total_cv": []} # ransac - self.rng = np.random.default_rng() - + self.sfc = np.random.default_rng(np.random.SFC64()) + # self.kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) # or - self.kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3)) + # https://stackoverflow.com/questions/31025368/erode-is-too-slow-opencv + self.kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) + + self.gauss_k = cv2.getGaussianKernel(5, 2) + # cv2.getGaussianKernel(kernel size, sigma) + # Increasing the kernel size improves accuracy but slows down performance. + # Increasing sigma improves accuracy a little, but has less effect than kernel size. def open_video(self, video_path): # Temporary implementation to run @@ -193,123 +239,113 @@ class HSRAC_cls(object): def single_run(self): # Temporary implementation to run - ## default_radius = 14 - - # ori_frame = self.current_image.copy()# debug code - # cropbox=[] # debug code - + if imshow_enable: + ori_frame = self.current_image_gray.copy() # debug code + blink_bd = False - frame = self.current_image_gray - if self.now_modeo == self.cv_modeo[1]: # adjustment of radius - + # debug print # if calc_print_enable: # temp_radius = self.auto_radius_calc.get_radius() # print('Now radius:', temp_radius) # self.cvparam.radius = temp_radius - + self.cvparam.radius = self.auto_radius_calc.get_radius() if self.auto_radius_calc.adj_comp_flag: self.now_modeo = self.cv_modeo[2] if not skip_blink_detect else self.cv_modeo[3] - + radius, pad, step, hsf = self.cvparam.get_rpsh() - + # For measuring processing time of image processing - cv_start_time = timeit.default_timer() - + # cv_start_time = timeit.default_timer() + frame = self.current_image_gray gray_frame = frame - self.timedict["to_gray"].append(timeit.default_timer() - cv_start_time) - + # self.timedict["to_gray"].append(timeit.default_timer() - cv_start_time) + # Calculate the integral image of the frame - int_start_time = timeit.default_timer() - # BORDER_CONSTANT is faster than BORDER_REPLICATE There seems to be almost no negative impact when BORDER_CONSTANT is used. - frame_pad = cv2.copyMakeBorder(gray_frame, pad, pad, pad, pad, cv2.BORDER_CONSTANT) - frame_int = cv2.integral(frame_pad) - self.timedict["int_img"].append(timeit.default_timer() - int_start_time) - + # int_start_time = timeit.default_timer() + frame_pad, frame_int, inner_sum, in_p00, in_p11, in_p01, in_p10, y_ro_m, x_ro_m, y_ro_p, x_ro_p, outer_sum, out_p_temp, out_p00, out_p11, out_p01, out_p10, response_list, frame_conv, frame_conv_stride = get_frameint_empty_array( + gray_frame.shape, pad, step[0], step[1], hsf.r_in, hsf.r_out) + cv2.copyMakeBorder(gray_frame, pad, pad, pad, pad, cv2.BORDER_CONSTANT, dst=frame_pad) + cv2.integral(frame_pad, sum=frame_int, sdepth=cv2.CV_32S) + + # self.timedict["int_img"].append(timeit.default_timer() - int_start_time) + # Convolve the feature with the integral image - conv_int_start_time = timeit.default_timer() - xy_step = frameint_get_xy_step(frame_int.shape, step, pad, start_offset=None, end_offset=None) - frame_conv, response, center_xy = conv_int(frame_int, hsf, step, pad, xy_step) - self.timedict["conv_int"].append(timeit.default_timer() - conv_int_start_time) - - crop_start_time = timeit.default_timer() + # conv_int_start_time = timeit.default_timer() + response, hsf_min_loc = conv_int(frame_int, hsf, inner_sum, in_p00, in_p11, in_p01, in_p10, y_ro_m, x_ro_m, y_ro_p, x_ro_p, + outer_sum, out_p_temp, out_p00, out_p11, out_p01, out_p10, response_list, + frame_conv_stride) + center_xy = get_hsf_center(pad, step[0], step[1], hsf_min_loc) + # visualization of HSF + # cv2.normalize(cv2.filter2D(cv2.filter2D(frame_pad, cv2.CV_64F, hsf.get_kernel()[hsf.get_kernel().shape[0]//2,:].reshape(1,-1), borderType=cv2.BORDER_CONSTANT), cv2.CV_64F, hsf.get_kernel()[:,hsf.get_kernel().shape[1]//2].reshape(-1,1), borderType=cv2.BORDER_CONSTANT),None,0,255,cv2.NORM_MINMAX,dtype=cv2.CV_8U)) + + + # self.timedict["conv_int"].append(timeit.default_timer() - conv_int_start_time) + + # crop_start_time = timeit.default_timer() # Define the center point and radius - center_x, center_y = center_xy - upper_x = center_x + radius - lower_x = center_x - radius - upper_y = center_y + radius - lower_y = center_y - radius - - # Crop the image using the calculated bounds - cropped_image = safe_crop(gray_frame, lower_x, lower_y, upper_x, upper_y) - - # cropbox=[clamp(val, 0, gray_frame.shape[i]) for i,val in zip([1,0,1,0],[lower_x,lower_y,upper_x,upper_y])] # debug code - + + center_x, center_y, upper_x, lower_x, upper_y, lower_y, ransac_lower_x, ransac_lower_y, ransac_upper_x, ransac_upper_y, ransac_xy_offset = get_center_noclamp( + center_xy, radius) + if self.now_modeo == self.cv_modeo[0] or self.now_modeo == self.cv_modeo[1]: # If mode is first_frame or radius_adjust, record current radius and response self.auto_radius_calc.add_response(radius, response) elif self.now_modeo == self.cv_modeo[2]: # Statistics for blink detection if self.blink_detector.response_len() < blink_init_frames: - self.blink_detector.add_response(cv2.mean(cropped_image)[0]) - - upper_x = center_x + max(20,radius) - lower_x = center_x - max(20,radius) - upper_y = center_y + max(20,radius) - lower_y = center_y - max(20,radius) + self.blink_detector.add_response(cv2.mean(safe_crop(gray_frame, lower_x, lower_y, upper_x, upper_y, 1))[0]) self.center_q1.add_response( - cv2.mean(safe_crop(gray_frame, lower_x, lower_y, upper_x, upper_y,keepsize=False))[ + cv2.mean(safe_crop(gray_frame, center_x - max(20, radius), center_y - max(20, radius), center_x + max(20, radius), + center_y + max(20, radius), keepsize=False))[ 0 ] ) - + else: - + self.blink_detector.calc_thresh() self.center_q1.calc_thresh() self.now_modeo = self.cv_modeo[3] else: - if 0 in cropped_image.shape: # This line may not be needed. The image will be cropped using safecrop. - # If shape contains 0, it is not detected well. - print("Something's wrong.") - else: - orig_x, orig_y = center_x, center_y - if self.blink_detector.enable_detect_flg: - # If the average value of cropped_image is greater than response_max - # (i.e., if the cropimage is whitish - if self.blink_detector.detect(cv2.mean(cropped_image)[0]): - # blink - print("BLINK BD") - blink_bd=True - # if imshow_enable or save_video: - # cv2.circle(frame, (orig_x, orig_y), 6, (0, 0, 255), -1) - # cv2.circle(ori_frame, (center_x, center_y), 7, (255, 0, 0), -1) + if self.blink_detector.enable_detect_flg and self.blink_detector.detect( + cv2.mean(safe_crop(gray_frame, lower_x, lower_y, upper_x, upper_y, 1))[0]): + # If the average value of cropped_image is greater than response_max + # (i.e., if the cropimage is whitish + # blink + print("BLINK BD") + blink_bd = True + + # if imshow_enable or save_video: + # cv2.circle(frame, (orig_x, orig_y), 6, (0, 0, 255), -1) + # cv2.circle(ori_frame, (center_x, center_y), 7, (255, 0, 0), -1) + # If you want to update response_max. it may be more cost-effective to rewrite response_list in the following way # https://stackoverflow.com/questions/42771110/fastest-way-to-left-cycle-a-numpy-array-like-pop-push-for-a-queue - - cv_end_time = timeit.default_timer() - self.timedict["crop"].append(cv_end_time - crop_start_time) - self.timedict["total_cv"].append(cv_end_time - cv_start_time) - - # if calc_print_enable: - # the lower the response the better the likelyhood of there being a pupil. you can adujst the radius and steps accordingly - # print('Kernel response:', response) - # print('Pixel position:', center_xy) - - # - # if imshow_enable: - # if self.now_modeo != self.cv_modeo[0] and self.now_modeo != self.cv_modeo[1]: - # if 0 in cropped_image.shape: - # If shape contains 0, it is not detected well. - # pass - # else: - # cv2.imshow("crop", cropped_image) - # cv2.imshow("frame", frame) - # if cv2.waitKey(1) & 0xFF == ord("q"): - # pass + + # cv_end_time = timeit.default_timer() + # self.timedict["crop"].append(timeit.default_timer() - crop_start_time) + # self.timedict["total_cv"].append(cv_end_time - cv_start_time) + + # if calc_print_enable: + # the lower the response the better the likelyhood of there being a pupil. you can adujst the radius and steps accordingly + # print('Kernel response:', response) + # print('Pixel position:', center_xy) + + # + # if imshow_enable: + # if self.now_modeo != self.cv_modeo[0] and self.now_modeo != self.cv_modeo[1]: + # if 0 in cropped_image.shape: + # If shape contains 0, it is not detected well. + # pass + # else: + # cv2.imshow("crop", cropped_image) + # cv2.imshow("frame", frame) + # if cv2.waitKey(1) & 0xFF == ord("q"): + # pass if self.now_modeo == self.cv_modeo[0]: # Moving from first_frame to the next mode @@ -319,150 +355,137 @@ class HSRAC_cls(object): self.now_modeo = self.cv_modeo[2] else: self.now_modeo = self.cv_modeo[1] - # For measuring processing time of image processing - cv_start_time = timeit.default_timer() - # Crop first to reduce the amount of data to process. - # frame = cropped_image[0:len(cropped_image) - 10, :] - # To reduce the processing data, first convert to 1-channel and then blur. - # The processing results were the same when I swapped the order of blurring and 1-channelization. - frame_gray = cv2.GaussianBlur(frame, (5, 5), 0) - hsf_center_x, hsf_center_y = center_x.copy(), center_y.copy() - # ransac_xy_offset = (hsf_center_x-20, hsf_center_y-20) - upper_x = hsf_center_x + max(20, radius) - lower_x = hsf_center_x - max(20, radius) - upper_y = hsf_center_y + max(20, radius) - lower_y = hsf_center_y - max(20, radius) - ransac_xy_offset = (lower_x, lower_y) - - # Crop the image using the calculated bounds + ransac_start_time = timeit.default_timer() - frame_gray_crop = safe_crop(frame_gray, lower_x, lower_y, upper_x, upper_y) - frame = frame_gray_crop - # this will need to be adjusted everytime hardware is changed (brightness of IR, Camera postion, etc)m - min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(frame_gray_crop) - - threshold_value = min_val + thresh_add - _, thresh = cv2.threshold(frame_gray_crop, threshold_value, 255, cv2.THRESH_BINARY) - # print(thresh.shape, frame_gray.shape) - try: - - opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, self.kernel) - closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, self.kernel) - th_frame = 255 - closing - except: - # I want to eliminate try here because try tends to be slow in execution. - th_frame = 255 - frame_gray_crop - - contours, _ = cv2.findContours(th_frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) + # frame_gray = cv2.GaussianBlur(frame, (5, 5), 0) + # cv2.GaussianBlur is slow (uses 10% of the time of all this script) + # use cv2.blur() # or - # contours, _=cv2.findContours(th_frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) - + # frame_gray =cv2.boxFilter(frame, -1,(5, 5))# https://github.com/bfraboni/FastGaussianBlur + # cv2.boxFilter(frame_gray, -1,(5, 5),dst=frame_gray) + # cv2.boxFilter(frame_gray, -1,(5, 5),dst=frame_gray) + # or + frame_gray = cv2.sepFilter2D(frame, -1, self.gauss_k, self.gauss_k) + + + # Crop the image using the calculated bounds + # todo:safecrop tune + frame_gray_crop = safe_crop(frame_gray, ransac_lower_x, ransac_lower_y, ransac_upper_x, ransac_upper_y, 1) + th_frame, fic_frame = get_ransac_frame(frame_gray_crop.shape) + frame = frame_gray_crop # todo: It can cause bugs. + + # this will need to be adjusted everytime hardware is changed (brightness of IR, Camera postion, etc)m + # min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(frame_gray_crop) + min_val = cv2.minMaxLoc(frame_gray_crop)[0] + # threshold_value = min_val + thresh_add + + cv2.threshold(frame_gray_crop, min_val + thresh_add, 255, cv2.THRESH_BINARY_INV, dst=th_frame) + # print(thresh.shape, frame_gray.shape) + + # cv2.morphologyEx(th_frame, cv2.MORPH_OPEN, self.kernel, dst=fic_frame) + # cv2.morphologyEx(fic_frame, cv2.MORPH_CLOSE, self.kernel, dst=fic_frame) + # cv2.bitwise_not(fic_frame, fic_frame) + # https://stackoverflow.com/questions/23062572/why-multiple-openings-closing-with-a-same-kernel-does-not-have-effect + # try (cv2.absdiff(cv2.morphologyEx(th_frame, cv2.MORPH_OPEN, self.kernel),cv2.morphologyEx( cv2.morphologyEx(th_frame, cv2.MORPH_OPEN, self.kernel), cv2.MORPH_CLOSE, self.kernel))>1).sum() + cv2.morphologyEx(th_frame, cv2.MORPH_OPEN, self.kernel, dst=fic_frame) # or cv2.MORPH_CLOSE + + contours = cv2.findContours(fic_frame, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[0] + # or + # contours = cv2.findContours(fic_frame, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)[0] if not blink_bd and self.blink_detector.enable_detect_flg: threshold_value = self.center_q1.quartile_1 - if threshold_value= 2.1 * h: #new blink detection algo lmao this works pretty good actually - print("RAN BLINK") - #return center_x, center_y, frame, frame, True - - # cx = center_x - (csx - cx) # we find the difference between the crop size and ransac point, and subtract from the center point from HSF - # cy = center_y - (csy - cy) - - # csy = frame.shape[0] - # csx = frame.shape[1] - csy = gray_frame.shape[0] - csx = gray_frame.shape[1] + cv2.threshold(frame_gray_crop, threshold_value, 255, cv2.THRESH_BINARY_INV, dst=th_frame) + # cv2.morphologyEx(th_frame, cv2.MORPH_OPEN, self.kernel, dst=fic_frame) + # cv2.morphologyEx(fic_frame, cv2.MORPH_CLOSE, self.kernel, dst=fic_frame) + # cv2.bitwise_not(fic_frame, fic_frame) + # https://stackoverflow.com/questions/23062572/why-multiple-openings-closing-with-a-same-kernel-does-not-have-effect + # try (cv2.absdiff(cv2.morphologyEx(th_frame, cv2.MORPH_OPEN, self.kernel),cv2.morphologyEx( cv2.morphologyEx(th_frame, cv2.MORPH_OPEN, self.kernel), cv2.MORPH_CLOSE, self.kernel))>1).sum() + cv2.morphologyEx(th_frame, cv2.MORPH_OPEN, self.kernel, dst=fic_frame) # or cv2.MORPH_CLOSE + contours = (*contours, *cv2.findContours(fic_frame, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[0]) + # or + # contours = (*contours, *cv2.findContours(fic_frame, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)[0]) - # cx = clamp((cx - 20) + center_x, 0, csx) - # cy = clamp((cy - 20) + center_y, 0, csy) - cx = int(clamp(cx + ransac_xy_offset[0], 0, csx)) - cy = int(clamp(cy + ransac_xy_offset[1], 0, csy)) + if not contours: + # If empty, go to next loop + return int(center_x), int(center_y), th_frame, frame, gray_frame + cnt_ind = None + max_area = -1 + for i, cnt in enumerate(contours): + now_area = cv2.contourArea(cnt) + if max_area < now_area: + max_area = now_area + cnt_ind = i + hull = cv2.convexHull(contours[cnt_ind], False) + # if not hull: + # If empty, go to next loop + # return int(center_x), int(center_y), th_frame, frame, gray_frame + ransac_data = fit_rotated_ellipse_ransac(hull.reshape(-1, 2).astype(np.float64), self.sfc) + if ransac_data is None: + # ransac_data is None==maxcnt.shape[0]= 2.1 * h: # new blink detection algo lmao this works pretty good actually + print("RAN BLINK") + # return center_x, center_y, frame, frame, True - except Exception as e: - # print(e) - pass + # cx = center_x - (csx - cx) # we find the difference between the crop size and ransac point, and subtract from the center point from HSF + # cy = center_y - (csy - cy) - # debug code - # try: - # if any([isinstance(val, float) for val in [cx, cy]]): - # print() - # return int(cx), int(cy),cropbox, ori_frame,thresh, frame, gray_frame - # except: - # if any([isinstance(val, float) for val in [center_x, center_y]]): - # print() - # return center_x, center_y,cropbox, ori_frame,thresh, frame, gray_frame - # print(frame_gray.shape, thresh.shape) + # csy = frame.shape[0] + # csx = frame.shape[1] + csy = gray_frame.shape[0] + csx = gray_frame.shape[1] + + # cx = clamp((cx - 20) + center_x, 0, csx) + # cy = clamp((cy - 20) + center_y, 0, csy) + cx = int(clamp(cx + ransac_xy_offset[0], 0, csx)) + cy = int(clamp(cy + ransac_xy_offset[1], 0, csy)) + + # cv_end_time = timeit.default_timer() + if imshow_enable:#imsave_flg: + + cv2.circle(ori_frame, (int(center_x), int(center_y)), 3, (128, 0, 0), -1) + cv2.drawContours(ori_frame, contours, -1, (255, 0, 0), 1) + cv2.circle(ori_frame, (int(cx), int(cy)), 2, (255, 0, 0), -1) + # cx1, cy1, w1, h1, theta1 = fit_rotated_ellipse(maxcnt.reshape(-1, 2)) + # cv2.ellipse( + # ori_frame, + # (cx, cy), + # (int(w), int(h)), + # theta * 180.0 / np.pi, + # 0.0, + # 360.0, + # (50, 250, 200), + # 1, + # ) + # cv2.imshow("crop", cropped_image) + # cv2.imshow("frame", frame) + if imshow_enable: + cv2.imshow("ori_frame", ori_frame) + if cv2.waitKey(1) & 0xFF == ord("q"): + pass + + # cv_end_time = timeit.default_timer() + # self.timedict["ransac"].append(cv_end_time - ransac_start_time) + # self.timedict["total_cv"].append(cv_end_time - cv_start_time) try: - return int(cx), int(cy), thresh, frame, gray_frame + return int(cx), int(cy), th_frame, frame, gray_frame except: - return int(center_x), int(center_y), thresh, frame, gray_frame + return int(center_x), int(center_y), th_frame, frame, gray_frame