From dd23dcf83827053c7a8d0bcdad219962f3bd5c82 Mon Sep 17 00:00:00 2001 From: PallasNeko <124042774+PallasNeko@users.noreply.github.com> Date: Wed, 1 Feb 2023 23:50:08 +0900 Subject: [PATCH] fix and move --- EyeTrackApp/haar_surround_feature.py | 34 ++- EyeTrackApp/hsrac.py | 250 ++++++++++++------ EyeTrackApp/utils/__init__.py | 0 EyeTrackApp/{ => utils}/img_utils.py | 6 +- EyeTrackApp/{utils.py => utils/misc_utils.py} | 0 EyeTrackApp/{ => utils}/time_utils.py | 0 6 files changed, 197 insertions(+), 93 deletions(-) create mode 100644 EyeTrackApp/utils/__init__.py rename EyeTrackApp/{ => utils}/img_utils.py (72%) rename EyeTrackApp/{utils.py => utils/misc_utils.py} (100%) rename EyeTrackApp/{ => utils}/time_utils.py (100%) diff --git a/EyeTrackApp/haar_surround_feature.py b/EyeTrackApp/haar_surround_feature.py index 16bec9b..be0cc7e 100644 --- a/EyeTrackApp/haar_surround_feature.py +++ b/EyeTrackApp/haar_surround_feature.py @@ -7,12 +7,13 @@ from functools import lru_cache import cv2 import numpy as np -from img_utils import safe_crop +from utils.misc_utils import clamp +from utils.img_utils import safe_crop # from line_profiler_pycharm import profile video_path = "ezgif.com-gif-maker.avi" -imshow_enable = True +imshow_enable = False calc_print_enable = True save_video = False skip_autoradius = False @@ -619,8 +620,10 @@ class HSF_cls(object): def single_run(self): # Temporary implementation to run - ## default_radius = 14 - + # default_radius = 14 + + # cropbox=[] # debug code + frame = self.current_image_gray if self.now_modeo == self.cv_modeo[1]: # adjustment of radius @@ -673,6 +676,9 @@ class HSF_cls(object): # Crop the image using the calculated bounds cropped_image = safe_crop(gray_frame, lower_x, lower_y, upper_x, upper_y) + # cropbox = [clamp(val, 0, gray_frame.shape[i]) for i, val in + # zip([1, 0, 1, 0], [lower_x, lower_y, upper_x, upper_y])] # debug code + if self.now_modeo == self.cv_modeo[0] or self.now_modeo == self.cv_modeo[1]: # If mode is first_frame or radius_adjust, record current radius and response self.auto_radius_calc.add_response(radius, response) @@ -686,7 +692,7 @@ class HSF_cls(object): upper_y = center_y + self.center_correct.center_q1_radius lower_y = center_y - self.center_correct.center_q1_radius self.center_q1.add_response( - cv2.mean(safe_crop(gray_frame, lower_x, lower_y, upper_x, upper_y))[ + cv2.mean(safe_crop(gray_frame, lower_x, lower_y, upper_x, upper_y,keepsize=False))[ 0 ] ) @@ -714,6 +720,12 @@ class HSF_cls(object): self.center_correct.init_array( gray_frame.shape, self.center_q1.quartile_1, radius ) + elif self.center_correct.frame_shape!=gray_frame.shape: + """The resolution should have changed and the statistics should have changed, so essentially the statistics + need to be reworked, but implementation will be postponed as viability is the highest priority. """ + self.center_correct.init_array( + gray_frame.shape, self.center_q1.quartile_1, radius + ) center_x, center_y = self.center_correct.correction( gray_frame, center_x, center_y @@ -728,6 +740,9 @@ class HSF_cls(object): cropped_image = safe_crop( gray_frame, lower_x, lower_y, upper_x, upper_y ) + # cropbox = [clamp(val, 0, gray_frame.shape[i]) for i, val in + # zip([1, 0, 1, 0], [lower_x, lower_y, upper_x, upper_y])] # debug code + # if imshow_enable or save_video: # cv2.circle(frame, (orig_x, orig_y), 6, (0, 0, 255), -1) # cv2.circle(frame, (center_x, center_y), 3, (255, 0, 0), -1) @@ -765,20 +780,27 @@ class HSF_cls(object): self.now_modeo = self.cv_modeo[2] else: self.now_modeo = self.cv_modeo[1] - + + # debug code + # return center_x,center_y,cropbox,frame return center_x, center_y, frame + class External_Run_HSF(object): def __init__(self): self.algo = HSF_cls() def run(self, current_image_gray): self.algo.current_image_gray = current_image_gray + # debug code + # center_x, center_y,cropbox, frame = self.algo.single_run() + # return center_x, center_y,cropbox, frame center_x, center_y, frame = self.algo.single_run() return center_x, center_y, frame + if __name__ == "__main__": hsf = HSF_cls() hsf.open_video(video_path) diff --git a/EyeTrackApp/hsrac.py b/EyeTrackApp/hsrac.py index 521d896..6d8ab57 100644 --- a/EyeTrackApp/hsrac.py +++ b/EyeTrackApp/hsrac.py @@ -1,7 +1,4 @@ -import math -import sys import timeit -from functools import lru_cache import cv2 import numpy as np @@ -9,13 +6,12 @@ import numpy as np from haar_surround_feature import ( AutoRadiusCalc, BlinkDetector, - CenterCorrection, - CvParameters, - conv_int, + CvParameters, conv_int, frameint_get_xy_step, ) -from img_utils import safe_crop -from utils import clamp +from utils.img_utils import safe_crop +from utils.misc_utils import clamp + # from line_profiler_pycharm import profile @@ -190,7 +186,6 @@ class HSRAC_cls(object): self.auto_radius_calc = AutoRadiusCalc() self.blink_detector = BlinkDetector() self.center_q1 = BlinkDetector() - self.center_correct = CenterCorrection() self.cap = None @@ -204,7 +199,10 @@ class HSRAC_cls(object): # ransac self.rng = np.random.default_rng() - self.kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) + + # self.kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) + # or + self.kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3)) def open_video(self, video_path): # Temporary implementation to run @@ -221,17 +219,20 @@ class HSRAC_cls(object): ret, frame = self.cap.read() if ret: # I have set it to grayscale (1ch) just in case, but if the frame is 1ch, this line can be commented out. + # self.current_image=frame # debug code self.current_image_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) return True return False def single_run(self): # Temporary implementation to run - - ## default_radius = 14 - + # default_radius = 14 + + # ori_frame = self.current_image.copy()# debug code + # cropbox=[] # debug code + + blink_bd = False frame = self.current_image_gray - if self.now_modeo == self.cv_modeo[1]: # adjustment of radius @@ -282,7 +283,9 @@ class HSRAC_cls(object): # Crop the image using the calculated bounds cropped_image = safe_crop(gray_frame, lower_x, lower_y, upper_x, upper_y) - + + # cropbox=[clamp(val, 0, gray_frame.shape[i]) for i,val in zip([1,0,1,0],[lower_x,lower_y,upper_x,upper_y])] # debug code + if self.now_modeo == self.cv_modeo[0] or self.now_modeo == self.cv_modeo[1]: # If mode is first_frame or radius_adjust, record current radius and response self.auto_radius_calc.add_response(radius, response) @@ -291,12 +294,12 @@ class HSRAC_cls(object): if self.blink_detector.response_len() < blink_init_frames: self.blink_detector.add_response(cv2.mean(cropped_image)[0]) - upper_x = center_x + self.center_correct.center_q1_radius - lower_x = center_x - self.center_correct.center_q1_radius - upper_y = center_y + self.center_correct.center_q1_radius - lower_y = center_y - self.center_correct.center_q1_radius + upper_x = center_x + 20 + lower_x = center_x - 20 + upper_y = center_y + 20 + lower_y = center_y - 20 self.center_q1.add_response( - cv2.mean(safe_crop(gray_frame, lower_x, lower_y, upper_x, upper_y))[ + cv2.mean(safe_crop(gray_frame, lower_x, lower_y, upper_x, upper_y,keepsize=False))[ 0 ] ) @@ -307,7 +310,7 @@ class HSRAC_cls(object): self.center_q1.calc_thresh() self.now_modeo = self.cv_modeo[3] else: - if 0 in cropped_image.shape: + if 0 in cropped_image.shape: # This line may not be needed. The image will be cropped using safecrop. # If shape contains 0, it is not detected well. print("Something's wrong.") else: @@ -317,30 +320,11 @@ class HSRAC_cls(object): # (i.e., if the cropimage is whitish if self.blink_detector.detect(cv2.mean(cropped_image)[0]): # blink - pass - else: - # pass - if not self.center_correct.setup_comp: - self.center_correct.init_array( - gray_frame.shape, self.center_q1.quartile_1, radius - ) - - center_x, center_y = self.center_correct.correction( - gray_frame, center_x, center_y - ) - # Define the center point and radius - center_xy = (center_x, center_y) - upper_x = center_x + radius - lower_x = center_x - radius - upper_y = center_y + radius - lower_y = center_y - radius - # Crop the image using the calculated bounds - cropped_image = safe_crop( - gray_frame, lower_x, lower_y, upper_x, upper_y - ) + print("BLINK BD") + blink_bd=True # if imshow_enable or save_video: # cv2.circle(frame, (orig_x, orig_y), 6, (0, 0, 255), -1) - # cv2.circle(frame, (center_x, center_y), 3, (255, 0, 0), -1) + # cv2.circle(ori_frame, (center_x, center_y), 7, (255, 0, 0), -1) # If you want to update response_max. it may be more cost-effective to rewrite response_list in the following way # https://stackoverflow.com/questions/42771110/fastest-way-to-left-cycle-a-numpy-array-like-pop-push-for-a-queue @@ -362,7 +346,6 @@ class HSRAC_cls(object): # If shape contains 0, it is not detected well. pass else: - cv2.imshow("crop", cropped_image) cv2.imshow("frame", frame) if cv2.waitKey(1) & 0xFF == ord("q"): @@ -377,8 +360,6 @@ class HSRAC_cls(object): else: self.now_modeo = self.cv_modeo[1] - newFrame2 = frame.copy() - # frame = cropped_image # For measuring processing time of image processing cv_start_time = timeit.default_timer() # Crop first to reduce the amount of data to process. @@ -386,37 +367,56 @@ class HSRAC_cls(object): # To reduce the processing data, first convert to 1-channel and then blur. # The processing results were the same when I swapped the order of blurring and 1-channelization. frame_gray = cv2.GaussianBlur(frame, (5, 5), 0) - - upper_x = center_x + 20 - lower_x = center_x - 20 - upper_y = center_y + 20 - lower_y = center_y - 20 + hsf_center_x, hsf_center_y = center_x.copy(), center_y.copy() + ransac_xy_offset = (hsf_center_x-20, hsf_center_y-20) + upper_x = hsf_center_x + 20 + lower_x = hsf_center_x - 20 + upper_y = hsf_center_y + 20 + lower_y = hsf_center_y - 20 # Crop the image using the calculated bounds - frame_gray = safe_crop(frame_gray, lower_x, lower_y, upper_x, upper_y) - frame = frame_gray + frame_gray_crop = safe_crop(frame_gray, lower_x, lower_y, upper_x, upper_y) + frame = frame_gray_crop # this will need to be adjusted everytime hardware is changed (brightness of IR, Camera postion, etc)m - min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(frame_gray) - - maxloc0_hf, maxloc1_hf = int(0.5 * max_loc[0]), int(0.5 * max_loc[1]) - - # crop 15% sqare around min_loc - # frame_gray = frame_gray[max_loc[1] - maxloc1_hf:max_loc[1] + maxloc1_hf, - # max_loc[0] - maxloc0_hf:max_loc[0] + maxloc0_hf] + min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(frame_gray_crop) threshold_value = min_val + thresh_add - _, thresh = cv2.threshold(frame_gray, threshold_value, 255, cv2.THRESH_BINARY) + _, thresh = cv2.threshold(frame_gray_crop, threshold_value, 255, cv2.THRESH_BINARY) # print(thresh.shape, frame_gray.shape) try: + opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, self.kernel) closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, self.kernel) th_frame = 255 - closing except: # I want to eliminate try here because try tends to be slow in execution. - th_frame = 255 - frame_gray + th_frame = 255 - frame_gray_crop contours, _ = cv2.findContours(th_frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) + # or + # contours, _=cv2.findContours(th_frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) + + if not blink_bd and self.blink_detector.enable_detect_flg: + threshold_value = self.center_q1.quartile_1 + if threshold_value