From 1c7c6d964b4cf0af2e2e7931cce1e346a3e9c079 Mon Sep 17 00:00:00 2001 From: Prohurtz <48768484+RedHawk989@users.noreply.github.com> Date: Thu, 1 Jun 2023 12:03:04 -0500 Subject: [PATCH] IBO fix, HSRAC fix --- EyeTrackApp/algo_settings_widget.py | 1 + EyeTrackApp/blink.py | 5 +- EyeTrackApp/camera_widget.py | 4 +- EyeTrackApp/eye_processor.py | 55 ++++++++++++++--- EyeTrackApp/haar_surround_feature.py | 22 +++++-- EyeTrackApp/intensity_based_openness.py | 53 ++++++++++------- EyeTrackApp/ransac.py | 78 ++++++++++--------------- EyeTrackApp/utils/img_utils.py | 33 ++++++++++- 8 files changed, 164 insertions(+), 87 deletions(-) diff --git a/EyeTrackApp/algo_settings_widget.py b/EyeTrackApp/algo_settings_widget.py index 1c055e3..3bc1d8f 100644 --- a/EyeTrackApp/algo_settings_widget.py +++ b/EyeTrackApp/algo_settings_widget.py @@ -368,4 +368,5 @@ class AlgoSettingsWidget: if changed: self.main_config.save() + #print(self.main_config) self.osc_queue.put(EyeId.ALGOSETTINGS) diff --git a/EyeTrackApp/blink.py b/EyeTrackApp/blink.py index d00303a..6fa83a3 100644 --- a/EyeTrackApp/blink.py +++ b/EyeTrackApp/blink.py @@ -8,10 +8,13 @@ def BLINK(self): intensity = np.sum(self.current_image_gray_clean) self.frames = self.frames + 1 + if len(str(intensity)) >= 8: # filter abnormally high values + print('filter, assume blink') + intensity = self.max_int + 1 if intensity > self.max_int: self.max_int = intensity - if self.frames > 400: #TODO: test this number more (make it a setting??) + if self.frames > 200: #TODO: test this number more (make it a setting??) self.max_ints.append(self.max_int) if intensity < self.min_int: self.min_int = intensity diff --git a/EyeTrackApp/camera_widget.py b/EyeTrackApp/camera_widget.py index 088676f..582116c 100644 --- a/EyeTrackApp/camera_widget.py +++ b/EyeTrackApp/camera_widget.py @@ -352,9 +352,9 @@ class CameraWidget: line_color="white", ) if not np.isnan(eye_info.blink): - graph.draw_line((-100,eye_info.blink * 100), (-100,100), color="black", width=10) + graph.draw_line((-100,eye_info.blink * 100), (-100,100), color="#6f4ca1", width=10) else: - graph.draw_line((-100, 0.0 * 100), (-100, 100), color="black", width=10) + graph.draw_line((-100, 0.0 * 200), (-100, 100), color="black", width=10) if eye_info.blink <= 0.0: graph.update(background_color="#6f4ca1") diff --git a/EyeTrackApp/eye_processor.py b/EyeTrackApp/eye_processor.py index 056c851..ae8b761 100644 --- a/EyeTrackApp/eye_processor.py +++ b/EyeTrackApp/eye_processor.py @@ -57,7 +57,7 @@ from haar_surround_feature import External_Run_HSF from blob import * from ransac import * from blink import * - +from utils.img_utils import circle_crop from eye import EyeInfo, EyeInfoOrigin from intensity_based_openness import * @@ -89,6 +89,7 @@ class EyeProcessor: image_queue_outgoing: "queue.Queue", eye_id, ): + self.main_config = EyeTrackSettingsConfig self.config = config self.settings = settings self.eye_id = eye_id @@ -101,8 +102,9 @@ class EyeProcessor: # Cross algo state self.lkg_projected_sphere = None - self.xc = None - self.yc = None + self.xc = 20 + self.yc = 20 + self.cc_radius = 40 # Image state self.previous_image = None @@ -124,7 +126,7 @@ class EyeProcessor: self.ymax = -69420 self.ymin = 69420 self.blink_clear = False - self.cct = 300 + self.cct = 200 self.cccs = False self.ts = 10 self.previous_rotation = self.config.rotation_angle @@ -230,7 +232,14 @@ class EyeProcessor: rotation_matrix, (cols, rows), borderMode=cv2.BORDER_CONSTANT, - borderValue=(ar + 10, ag + 10, ab + 10),#(255, 255, 255), + borderValue=(ar, ag, ab),#(255, 255, 255), + ) + self.current_image_white = cv2.warpAffine( + self.current_image, + rotation_matrix, + (cols, rows), + borderMode=cv2.BORDER_CONSTANT, + borderValue=(255, 255, 255), ) return True except: @@ -243,7 +252,7 @@ class EyeProcessor: self.eyeopen = BLINK(self) if self.settings.gui_IBO: - self.eyeopen = self.ibo.intense(self.rawx, self.rawy, self.current_image) + self.eyeopen = self.ibo.intense(self.rawx, self.rawy, self.current_image_white) if self.eyeopen < 0.35: #threshold so the eye fully closes #todo: make this a setting? self.eyeopen = 0.0 if self.bd_blink == True: @@ -251,7 +260,7 @@ class EyeProcessor: # self.eyeopen = 0.0 if self.settings.gui_IBO and self.settings.gui_BLINK: - ibo = self.ibo.intense(self.rawx, self.rawy, self.current_image) + ibo = self.ibo.intense(self.rawx, self.rawy, self.current_image_white) blink = BLINK(self) if blink == 0.0: @@ -274,10 +283,19 @@ class EyeProcessor: self.current_algorithm = EyeInfoOrigin.DADDY def HSRACM(self): + if self.eye_id in [EyeId.LEFT] and self.settings.gui_circular_crop_left: + self.current_image_gray, self.cct = circle_crop(self.current_image_gray, self.xc, self.yc, self.cc_radius, self.cct) + else: + pass + if self.eye_id in [EyeId.RIGHT] and self.settings.gui_circular_crop_right: + self.current_image_gray, self.cct = circle_crop(self.current_image_gray, self.xc, self.yc, self.cc_radius, self.cct) + else: + pass + self.hasrac_en = True # todo: add process to initialise er_hsrac when resolution changes self.rawx, self.rawy, self.thresh, self.radius = self.er_hsf.run(self.current_image_gray) - self.rawx, self.rawy, self.thresh = RANSAC3D(self) + self.rawx, self.rawy, self.thresh = RANSAC3D(self, True) #if self.prev_x is None: # self.prev_x = self.rawx @@ -285,19 +303,38 @@ class EyeProcessor: self.out_x, self.out_y = cal.cal_osc(self, self.rawx, self.rawy) self.current_algorithm = EyeInfoOrigin.HSRAC + def HSFM(self): + if self.eye_id in [EyeId.LEFT] and self.settings.gui_circular_crop_left: + self.current_image_gray, self.cct = circle_crop(self.current_image_gray, self.xc, self.yc, self.cc_radius, self.cct) + else: + pass + if self.eye_id in [EyeId.RIGHT] and self.settings.gui_circular_crop_right: + self.current_image_gray, self.cct = circle_crop(self.current_image_gray, self.xc, self.yc, self.cc_radius, self.cct) + else: + pass # todo: add process to initialise er_hsf when resolution changes self.rawx, self.rawy, self.thresh = self.er_hsf.run(self.current_image_gray) self.out_x, self.out_y = cal.cal_osc(self, self.rawx, self.rawy) self.current_algorithm = EyeInfoOrigin.HSF def RANSAC3DM(self): + if self.eye_id in [EyeId.LEFT] and self.settings.gui_circular_crop_left: + self.current_image_gray, self.cct = circle_crop(self.current_image_gray, self.xc, self.yc, self.cc_radius, self.cct) + else: + pass + if self.eye_id in [EyeId.RIGHT] and self.settings.gui_circular_crop_right: + self.current_image_gray, self.cct = circle_crop(self.current_image_gray, self.xc, self.yc, self.cc_radius, self.cct) + else: + pass + self.hasrac_en = False current_image_gray_copy = self.current_image_gray.copy() # Duplicate before overwriting in RANSAC3D. - self.rawx, self.rawy, self.thresh = RANSAC3D(self) + self.rawx, self.rawy, self.thresh = RANSAC3D(self, False) self.out_x, self.out_y = cal.cal_osc(self, self.rawx, self.rawy) self.current_algorithm = EyeInfoOrigin.RANSAC def BLOBM(self): + print("LSKDGFHL") self.rawx, self.rawy, self.thresh = BLOB(self) diff --git a/EyeTrackApp/haar_surround_feature.py b/EyeTrackApp/haar_surround_feature.py index 3a0aa74..85d97f2 100644 --- a/EyeTrackApp/haar_surround_feature.py +++ b/EyeTrackApp/haar_surround_feature.py @@ -3,12 +3,17 @@ from functools import lru_cache import cv2 import numpy as np - - from utils.misc_utils import clamp from utils.img_utils import safe_crop +from enum import IntEnum +class EyeId(IntEnum): + RIGHT = 0 + LEFT = 1 + BOTH = 2 + SETTINGS = 3 + # from line_profiler_pycharm import profile video_path = "ezgif.com-gif-maker.avi" @@ -476,7 +481,7 @@ class HSF_cls(object): self.blink_detector = BlinkDetector() self.center_q1 = BlinkDetector() self.center_correct = CenterCorrection() - + self.cap = None self.timedict = {"to_gray": [], "int_img": [], "conv_int": [], "crop": [], "total_cv": []} @@ -499,11 +504,16 @@ class HSF_cls(object): self.current_image_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) return True return False - + + cct = 300 + ransac_lower_x = 100 + ransac_lower_y = 100 + cx = 0 + cy = 0 + def single_run(self): # Temporary implementation to run - ## default_radius = 14 # cropbox=[] # debug code @@ -669,7 +679,7 @@ class HSF_cls(object): else: self.now_modeo = self.cv_modeo[1] - + # debug code # return center_x,center_y,cropbox,frame return center_x, center_y, frame, radius diff --git a/EyeTrackApp/intensity_based_openness.py b/EyeTrackApp/intensity_based_openness.py index 01baed4..ed47f70 100644 --- a/EyeTrackApp/intensity_based_openness.py +++ b/EyeTrackApp/intensity_based_openness.py @@ -122,6 +122,9 @@ class IntensityBasedOpeness: self.color = [] self.x = [] self.fc = 0 + self.filterlist = [] + + self.maxinten = 0 self.tri_filter = [] # try: @@ -202,24 +205,38 @@ class IntensityBasedOpeness: int_x, int_y = int(x), int(y) if int_x < 0 or int_y < 0: return self.prev_val - upper_x = min(int_x + 15, frame.shape[1]-1) #TODO make this a setting - lower_x = max(int_x - 15, 0) - upper_y = min(int_y + 15, frame.shape[0]-1) - lower_y = max(int_y - 15, 0) + upper_x = min(int_x + 25, frame.shape[1]-1) #TODO make this a setting + lower_x = max(int_x - 25, 0) + upper_y = min(int_y + 25, frame.shape[0]-1) + lower_y = max(int_y - 25, 0) # frame_crop = frame[lower_y:upper_y, lower_x:upper_x] - img = safe_crop(frame, lower_x, lower_y, upper_x, upper_y, 1) + #frame = safe_crop(frame, lower_x, lower_y, upper_x, upper_y, False) #ret_, th = cv2.threshold(frame_crop, 80, 1.0, cv2.THRESH_BINARY_INV, dst=frame_crop) frame_crop = frame + #ret, f = cv2.threshold(frame, 80, 255, cv2.THRESH_BINARY) # ret, frame_crop = cv2.threshold(frame_crop, 80, 255, cv2.THRESH_BINARY) # The same can be done with cv2.integral, but since there is only one area of the rectangle for which we want to know the total value, there is no advantage in terms of computational complexity. intensity = frame_crop.sum() + 1 - self.tri_filter.append(intensity) - if len(self.tri_filter) > 3: - self.tri_filter.pop(0) - intensity = sum(self.tri_filter) / 3 + #cv2.imshow('e', frame) + # if cv2.waitKey(10) == 27: + # exit() + if len(self.filterlist) < 800: + self.filterlist.append(intensity) + else: + self.filterlist.pop() + self.filterlist.append(intensity) + print(len(str(intensity))) + if len(str(intensity)) >= 8: #filter abnormally high values + print('filter, assume blink') + intensity = self.maxval + + #self.tri_filter.append(intensity) + #if len(self.tri_filter) > 3: + # self.tri_filter.pop(0) + # intensity = sum(self.tri_filter) / 3 #avg_color_per_row = np.average(frame_crop, axis=0) #avg_color = np.average(avg_color_per_row, axis=0) # ar, ag, ab = avg_color @@ -229,11 +246,6 @@ class IntensityBasedOpeness: #if cv2.waitKey(1) & 0xFF == ord("q"): # pass - #print(intensity) - # if our blob width/height are within suitable (yet arbitrary) boundaries, call that good. - # - # TODO This should be scaled based on camera resolution. - # numpy:np.sum(),ndarray.sum() # opencv:cv2.sumElems() # I don't know which is faster. @@ -301,7 +313,8 @@ class IntensityBasedOpeness: eyeopen = ((intensity - maxp) / (minp - maxp)) #for whatever reason when input and maxp are too close it outputs high - + # print(eyeopen, maxp, minp) + # eyeopen = ((eyeopen - 0.3) / (1.0 - 0.3)) eyeopen = 1 - eyeopen # print(eyeopen, intensity, maxp, minp, x, y) @@ -325,10 +338,10 @@ class IntensityBasedOpeness: print('BLINK') #intensityold = img.sum() + 1 - avg_color_per_row = np.average(img, axis=0) - avg_color = np.average(avg_color_per_row, axis=0) - ar, ag, ab = avg_color - intensity = int(ar * 8) # higher = closed + # avg_color_per_row = np.average(img, axis=0) + # avg_color = np.average(avg_color_per_row, axis=0) + #ar, ag, ab = avg_color + # intensity = int(ar * 8) # higher = closed #self.old.append(intensity) #self.color.append(intensity) # self.x.append(self.fc) @@ -352,5 +365,5 @@ class IntensityBasedOpeness: #print(eyevec) if eyevec > 0.4: print("BLINK LCOK") - # print(eyeopen) + # print(eyeopen) return eyeopen diff --git a/EyeTrackApp/ransac.py b/EyeTrackApp/ransac.py index 05333c2..645fb45 100644 --- a/EyeTrackApp/ransac.py +++ b/EyeTrackApp/ransac.py @@ -29,6 +29,7 @@ import cv2 import numpy as np from enum import IntEnum from utils.img_utils import safe_crop +from utils.misc_utils import clamp class EyeId(IntEnum): RIGHT = 0 LEFT = 1 @@ -142,62 +143,28 @@ def fit_rotated_ellipse(data, P): return (cx, cy, w, h, theta) cct = 300 -def circle_crop(self): - global cct - avg_color_per_row = np.average(self.current_image, axis=0) - avg_color = np.average(avg_color_per_row, axis=0) - ar, ag, ab = avg_color - if cct == 0: - try: - ht, wd = self.current_image_gray.shape[:2] - radius = int(float(self.lkg_projected_sphere["axes"][0])) - self.xc = int(float(self.lkg_projected_sphere["center"][0])) - self.yc = int(float(self.lkg_projected_sphere["center"][1])) - if radius < 10: #minimum size - radius = 10 - # draw filled circle in white on black background as mask - mask = np.zeros((ht, wd), dtype=np.uint8) - mask = cv2.circle(mask, (self.xc, self.yc), radius, 255, -1) - # create white colored background - color = np.full_like(self.current_image_gray, (ar)) - # apply mask to image - masked_img = cv2.bitwise_and(self.current_image_gray, self.current_image_gray, mask=mask) - # apply inverse mask to colored image - masked_color = cv2.bitwise_and(color, color, mask=255 - mask) - # combine the two masked images - self.current_image_gray = cv2.add(masked_img, masked_color) - return self.current_image_gray - except: - return self.current_image_gray - pass - else: - cct = cct - 1 - return self.current_image_gray +ransac_lower_x = 100 +ransac_lower_y = 100 +cx = 0 +cy = 0 -def RANSAC3D(self): +def RANSAC3D(self, hsrac_en): f = False - if self.eye_id in [EyeId.LEFT] and self.settings.gui_circular_crop_left: - self.current_image_gray = circle_crop(self) - else: - pass + global cct, ransac_lower_y, ransac_lower_x, cx, cy - if self.eye_id in [EyeId.RIGHT] and self.settings.gui_circular_crop_right: - self.current_image_gray = circle_crop(self) - else: - pass - if self.hasrac_en: - ransac_lower_x = self.rawx - self.radius - ransac_upper_x = self.rawx + self.radius - ransac_lower_y = self.rawy - self.radius - ransac_upper_y = self.rawy + self.radius + if hsrac_en: + ransac_upper_x = self.rawx + max(15, self.radius) + ransac_lower_x = self.rawx - max(15, self.radius) + ransac_upper_y = self.rawy + max(15, self.radius) + ransac_lower_y = self.rawy - max(15, self.radius) frame = safe_crop(self.current_image_gray_clean, ransac_lower_x, ransac_lower_y, ransac_upper_x, ransac_upper_y, 1) else: frame = self.current_image_gray_clean - global cct + kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) thresh_add = 10 rng = np.random.default_rng() @@ -315,10 +282,24 @@ def RANSAC3D(self): eym = ellipse_3d["center"][1] d = result_3d["diameter_3d"] + self.cc_radius = int(float(self.lkg_projected_sphere["axes"][0])) + self.xc = int(float(self.lkg_projected_sphere["center"][0])) + self.yc = int(float(self.lkg_projected_sphere["center"][1])) except: f = True # Draw our image and stack it for visual output + if hsrac_en: + csy = newFrame2.shape[0] + csx = newFrame2.shape[1] + + ransac_xy_offset = (ransac_lower_x, ransac_lower_y) + # cx = clamp((cx - 20) + center_x, 0, csx) + # cy = clamp((cy - 20) + center_y, 0, csy) + cx = int(clamp(cx + ransac_xy_offset[0], 0, csx)) + cy = int(clamp(cy + ransac_xy_offset[1], 0, csy)) + + try: cv2.drawContours(self.current_image_gray, contours, -1, (255, 0, 0), 1) # TODO: fix visualizations with HSRAC cv2.circle(self.current_image_gray, (int(cx), int(cy)), 2, (0, 0, 255), -1) @@ -359,9 +340,12 @@ def RANSAC3D(self): # tuple(int(v) for v in ellipse_3d["center"]), # (0, 255, 0), # color (BGR): red # ) - + except: pass + + + self.current_image_gray = newFrame2 y, x = self.current_image_gray.shape thresh = cv2.resize(thresh, (x,y)) diff --git a/EyeTrackApp/utils/img_utils.py b/EyeTrackApp/utils/img_utils.py index b93d4b1..84af99b 100644 --- a/EyeTrackApp/utils/img_utils.py +++ b/EyeTrackApp/utils/img_utils.py @@ -1,7 +1,7 @@ import cv2 +import numpy as np - -def safe_crop(img, x, y, x2, y2, keepsize=True): +def safe_crop(img, x, y, x2, y2, keepsize=False): # The order of the arguments can be reconsidered. img_h, img_w = img.shape[:2] outimg = img[max(0, y) : min(img_h, y2), max(0, x) : min(img_w, x2)].copy() @@ -10,3 +10,32 @@ def safe_crop(img, x, y, x2, y2, keepsize=True): # If the size is different from the expected size (smaller by the amount that is out of range) outimg = cv2.resize(outimg, (reqsize_x, reqsize_y)) return outimg + +def circle_crop(img, xc, yc, radius, cct): + + avg_color_per_row = np.average(img, axis=0) + avg_color = np.average(avg_color_per_row, axis=0) + if cct <= 0: + try: + ht, wd = img.shape[:2] + + if radius < 10: #minimum size + radius = 10 + # draw filled circle in white on black background as mask + mask = np.zeros((ht, wd), dtype=np.uint8) + mask = cv2.circle(mask, (xc, yc), radius, 255, -1) + # create white colored background + color = np.full_like(img, (avg_color)) + # apply mask to image + masked_img = cv2.bitwise_and(img, img, mask=mask) + # apply inverse mask to colored image + masked_color = cv2.bitwise_and(color, color, mask=255 - mask) + # combine the two masked images + outimg = cv2.add(masked_img, masked_color) + return outimg, cct + except: + return img, cct + pass + else: + cct = cct - 1 + return img, cct