From 582255a698c93eabd9d601c43551b582137226fc Mon Sep 17 00:00:00 2001 From: PallasNeko <124042774+PallasNeko@users.noreply.github.com> Date: Wed, 1 Feb 2023 02:08:30 +0900 Subject: [PATCH 1/2] Cleaning Reduction of duplicate functions Appropriate function calls Use of safe cropping --- EyeTrackApp/eye_processor.py | 241 +++--- EyeTrackApp/haar_surround_feature.py | 580 ++++++-------- EyeTrackApp/hsrac.py | 1110 +++++--------------------- EyeTrackApp/img_utils.py | 12 + EyeTrackApp/ransac.py | 2 +- EyeTrackApp/time_utils.py | 171 ++++ EyeTrackApp/utils.py | 2 + 7 files changed, 747 insertions(+), 1371 deletions(-) create mode 100644 EyeTrackApp/img_utils.py create mode 100644 EyeTrackApp/time_utils.py create mode 100644 EyeTrackApp/utils.py diff --git a/EyeTrackApp/eye_processor.py b/EyeTrackApp/eye_processor.py index ac6cef5..9eb0731 100644 --- a/EyeTrackApp/eye_processor.py +++ b/EyeTrackApp/eye_processor.py @@ -19,8 +19,8 @@ @@@@@@@@@@@@@@@@@ @@@@@@@@@@@@@( -HSR By: Sean.Denka (Optimization Wizard, Contributor), Summer#2406 (Main Algorithm Engineer) -RANSAC 3D By: Summer#2406 (Main Algorithm Engineer), Pupil Labs (pye3d), Sean.Denka (Optimization) +HSR By: PallasNeko (Optimization Wizard, Contributor), Summer#2406 (Main Algorithm Engineer) +RANSAC 3D By: Summer#2406 (Main Algorithm Engineer), Pupil Labs (pye3d), PallasNeko (Optimization) BLOB By: Prohurtz#0001 (Main App Developer) Algorithm App Implimentations By: Prohurtz#0001, qdot (Inital App Creator) @@ -28,7 +28,7 @@ Additional Contributors: [Assassin], Summer404NotFound, lorow, ZanzyTHEbar Copyright (c) 2022 EyeTrackVR <3 ------------------------------------------------------------------------------------------------------ -''' +''' from operator import truth from dataclasses import dataclass @@ -46,20 +46,21 @@ import numpy as np import cv2 from enum import Enum from one_euro_filter import OneEuroFilter + if sys.platform.startswith("win"): from winsound import PlaySound, SND_FILENAME, SND_ASYNC import importlib from osc_calibrate_filter import * -from haar_surround_feature import * +from haar_surround_feature import External_Run_HSF from blob import * from ransac import * -from hsrac import * +from hsrac import External_Run_HSRACS from blink import * - from intensity_eye_open import * + class InformationOrigin(Enum): RANSAC = 1 BLOB = 2 @@ -67,7 +68,10 @@ class InformationOrigin(Enum): HSF = 4 HSRAC = 5 + bbb = 0 + + @dataclass class EyeInformation: info_type: InformationOrigin @@ -85,7 +89,7 @@ def run_once(f): if not wrapper.has_run: wrapper.has_run = True return f(*args, **kwargs) - + wrapper.has_run = False return wrapper @@ -97,33 +101,32 @@ async def delayed_setting_change(setting, value): PlaySound('Audio/compleated.wav', SND_FILENAME | SND_ASYNC) - class EyeProcessor: def __init__( - self, - config: "EyeTrackCameraConfig", - settings: "EyeTrackSettingsConfig", - cancellation_event: "threading.Event", - capture_event: "threading.Event", - capture_queue_incoming: "queue.Queue", - image_queue_outgoing: "queue.Queue", - eye_id, + self, + config: "EyeTrackCameraConfig", + settings: "EyeTrackSettingsConfig", + cancellation_event: "threading.Event", + capture_event: "threading.Event", + capture_queue_incoming: "queue.Queue", + image_queue_outgoing: "queue.Queue", + eye_id, ): self.config = config self.settings = settings - + # Cross-thread communication management self.capture_queue_incoming = capture_queue_incoming self.image_queue_outgoing = image_queue_outgoing self.cancellation_event = cancellation_event self.capture_event = capture_event self.eye_id = eye_id - + # Cross algo state self.lkg_projected_sphere = None self.xc = None self.yc = None - + # Image state self.previous_image = None self.current_image = None @@ -131,14 +134,14 @@ class EyeProcessor: self.current_frame_number = None self.current_fps = None self.threshold_image = None - + # Calibration Values self.xoff = 1 self.yoff = 1 # Keep large in order to recenter correctly self.calibration_frame_counter = None self.eyeoffx = 1 - + self.xmax = -69420 self.xmin = 69420 self.ymax = -69420 @@ -150,26 +153,24 @@ class EyeProcessor: self.calibration_frame_counter self.camera_model = None self.detector_3d = None - + self.camera_model = None self.detector_3d = None - + self.failed = 0 - + self.skip_blink_detect = False - - #blink + + # blink self.max_ints = [] self.max_int = 0 self.min_int = 4000000000000 - self.frames = 0 + self.frames = 0 self.blinkvalue = False self.prev_x = None self.prev_y = None - - try: min_cutoff = float(self.settings.gui_min_cutoff) # 0.0004 beta = float(self.settings.gui_speed_coefficient) # 0.9 @@ -183,7 +184,7 @@ class EyeProcessor: min_cutoff=min_cutoff, beta=beta ) - + def output_images_and_update(self, threshold_image, output_information: EyeInformation): try: image_stack = np.concatenate( @@ -196,29 +197,30 @@ class EyeProcessor: self.image_queue_outgoing.put((image_stack, output_information)) self.previous_image = self.current_image self.previous_rotation = self.config.rotation_angle - except: # If this fails it likely means that the images are not the same size for some reason. + except: # If this fails it likely means that the images are not the same size for some reason. print('\033[91m[ERROR] Size of frames to display are of unequal sizes.\033[0m') - + pass + def capture_crop_rotate_image(self): # Get our current frame try: # Get frame from capture source, crop to ROI self.current_image = self.current_image[ - int(self.config.roi_window_y): int( - self.config.roi_window_y + self.config.roi_window_h - ), - int(self.config.roi_window_x): int( - self.config.roi_window_x + self.config.roi_window_w - ), - ] - + int(self.config.roi_window_y): int( + self.config.roi_window_y + self.config.roi_window_h + ), + int(self.config.roi_window_x): int( + self.config.roi_window_x + self.config.roi_window_w + ), + ] + except: # Failure to process frame, reuse previous frame. self.current_image = self.previous_image print("\033[91m[ERROR] Frame capture issue detected.\033[0m") - + try: # Apply rotation to cropped area. For any rotation area outside of the bounds of the image, # fill with white. @@ -240,96 +242,93 @@ class EyeProcessor: return True except: pass - + def BLINKM(self): self.blinkvalue = BLINK(self) - - + def HSRACM(self): - cx, cy, thresh, gray_frame, uncropframe = External_Run_HSRACS.HSRACS(self) + # temporary implementation + cx, cy, thresh, gray_frame, uncropframe = External_Run_HSRACS().run(self.current_image_gray) self.current_image_gray = gray_frame if self.prev_x == None: self.prev_x = cx self.prev_y = cy - #print(self.prev_x, self.prev_y, cx, cy) + # print(self.prev_x, self.prev_y, cx, cy) # #filter values with too much movement - # if (cx - self.prev_x) <= 45 and (cy - self.prev_y) <= 45 : - # self.prev_x = cx - # self.prev_y = cy + # if (cx - self.prev_x) <= 45 and (cy - self.prev_y) <= 45 : + # self.prev_x = cx + # self.prev_y = cy eyeopen = intense(cx, cy, uncropframe) out_x, out_y = cal_osc(self, cx, cy) - + if cx == 0: - self.output_images_and_update(thresh, EyeInformation(InformationOrigin.HSRAC, out_x, out_y, 0, eyeopen)) #update app + self.output_images_and_update(thresh, EyeInformation(InformationOrigin.HSRAC, out_x, out_y, 0, eyeopen)) # update app else: - + self.output_images_and_update(thresh, EyeInformation(InformationOrigin.HSRAC, out_x, out_y, 0, eyeopen)) - # else: - # print("EYE MOVED TOO FAST") - # self.output_images_and_update(thresh, EyeInformation(InformationOrigin.HSRAC, 0, 0, 0, False)) + + # else: + # print("EYE MOVED TOO FAST") + # self.output_images_and_update(thresh, EyeInformation(InformationOrigin.HSRAC, 0, 0, 0, False)) def HSFM(self): - cx, cy, frame = External_Run_HSF.HSFS(self) + # temporary implementation + cx, cy, frame = External_Run_HSF().run(self.current_image_gray) eyeopen = intense(cx, cy, self.current_image_gray) out_x, out_y = cal_osc(self, cx, cy) if cx == 0: - self.output_images_and_update(frame, EyeInformation(InformationOrigin.HSF, out_x, out_y, 0, eyeopen)) #update app + self.output_images_and_update(frame, EyeInformation(InformationOrigin.HSF, out_x, out_y, 0, eyeopen)) # update app else: self.output_images_and_update(frame, EyeInformation(InformationOrigin.HSF, out_x, out_y, 0, eyeopen)) - + def RANSAC3DM(self): cx, cy, thresh = RANSAC3D(self) eyeopen = intense(cx, cy, self.current_image_gray) out_x, out_y = cal_osc(self, cx, cy) if cx == 0: - self.output_images_and_update(thresh, EyeInformation(InformationOrigin.RANSAC, out_x, out_y, 0, eyeopen)) #update app + self.output_images_and_update(thresh, EyeInformation(InformationOrigin.RANSAC, out_x, out_y, 0, eyeopen)) # update app else: self.output_images_and_update(thresh, EyeInformation(InformationOrigin.RANSAC, out_x, out_y, 0, eyeopen)) - + def BLOBM(self): cx, cy, thresh = BLOB(self) eyeopen = intense(cx, cy, self.current_image_gray) out_x, out_y = cal_osc(self, cx, cy) if cx == 0: - self.output_images_and_update(thresh, EyeInformation(InformationOrigin.HSRAC, out_x, out_y, 0, eyeopen)) #update app + self.output_images_and_update(thresh, EyeInformation(InformationOrigin.HSRAC, out_x, out_y, 0, eyeopen)) # update app else: self.output_images_and_update(thresh, EyeInformation(InformationOrigin.HSRAC, out_x, out_y, 0, eyeopen)) + + def ALGOSELECT(self): - - - def ALGOSELECT(self): - - if self.failed == 0 and self.firstalgo != None: + if self.failed == 0 and self.firstalgo != None: self.firstalgo() else: self.failed = self.failed + 1 - - if self.failed == 1 and self.secondalgo != None: #send the tracking algos previous fail number, in algo if we pass set to 0, if fail, + 1 + + if self.failed == 1 and self.secondalgo != None: # send the tracking algos previous fail number, in algo if we pass set to 0, if fail, + 1 self.secondalgo() else: self.failed = self.failed + 1 - + if self.failed == 2 and self.thirdalgo != None: self.thirdalgo() else: self.failed = self.failed + 1 - + if self.failed == 3 and self.fourthalgo != None: self.fourthalgo() else: - self.failed = 0 # we have reached last possible algo and it is disabled, move to first algo - - - - + self.failed = 0 # we have reached last possible algo and it is disabled, move to first algo + def run(self): - + self.firstalgo = None self.secondalgo = None self.thirdalgo = None self.fourthalgo = None - #set algo priorities - - if self.settings.gui_HSF and self.settings.gui_HSFP == 1: #I feel like this is super innefficient though it only runs at startup and no solution is coming to me atm + # set algo priorities + + if self.settings.gui_HSF and self.settings.gui_HSFP == 1: # I feel like this is super innefficient though it only runs at startup and no solution is coming to me atm self.firstalgo = self.HSFM elif self.settings.gui_HSF and self.settings.gui_HSFP == 2: self.secondalgo = self.HSFM @@ -337,7 +336,7 @@ class EyeProcessor: self.thirdalgo = self.HSFM elif self.settings.gui_HSF and self.settings.gui_HSFP == 4: self.fourthalgo = self.HSFM - + if self.settings.gui_RANSAC3D and self.settings.gui_RANSAC3DP == 1: self.firstalgo = self.RANSAC3DM elif self.settings.gui_RANSAC3D and self.settings.gui_RANSAC3DP == 2: @@ -346,7 +345,7 @@ class EyeProcessor: self.thirdalgo = self.RANSAC3DM elif self.settings.gui_RANSAC3D and self.settings.gui_RANSAC3DP == 4: self.fourthalgo = self.RANSAC3DM - + if self.settings.gui_HSRAC == True and self.settings.gui_HSRACP == 1: self.firstalgo = self.HSRACM elif self.settings.gui_HSRAC and self.settings.gui_HSRACP == 2: @@ -355,7 +354,7 @@ class EyeProcessor: self.thirdalgo = self.HSRACM elif self.settings.gui_HSRAC and self.settings.gui_HSRACP == 4: self.fourthalgo = self.HSRACM - + if self.settings.gui_BLOB and self.settings.gui_BLOBP == 1: self.firstalgo = self.BLOBM elif self.settings.gui_BLOB and self.settings.gui_BLOBP == 2: @@ -364,31 +363,29 @@ class EyeProcessor: self.thirdalgo = self.BLOBM elif self.settings.gui_BLOB and self.settings.gui_BLOBP == 4: self.fourthalgo = self.BLOBM - + f = True while True: - # f = True - # Check to make sure we haven't been requested to close + # f = True + # Check to make sure we haven't been requested to close if self.cancellation_event.is_set(): print("\033[94m[INFO] Exiting Tracking thread\033[0m") return - + if self.config.roi_window_w <= 0 or self.config.roi_window_h <= 0: # At this point, we're waiting for the user to set up the ROI window in the GUI. # Sleep a bit while we wait. if self.cancellation_event.wait(0.1): return continue - - # If our ROI configuration has changed, reset our model and detector if (self.camera_model is None - or self.detector_3d is None - or self.camera_model.resolution != ( - self.config.roi_window_w, - self.config.roi_window_h, - ) + or self.detector_3d is None + or self.camera_model.resolution != ( + self.config.roi_window_w, + self.config.roi_window_h, + ) ): self.camera_model = CameraModel( focal_length=self.config.focal_length, @@ -397,7 +394,7 @@ class EyeProcessor: self.detector_3d = Detector3D( camera=self.camera_model, long_term_mode=DetectorMode.blocking ) - + try: if self.capture_queue_incoming.empty(): self.capture_event.set() @@ -413,41 +410,33 @@ class EyeProcessor: if not self.capture_crop_rotate_image(): continue - self.current_image_gray = cv2.cvtColor( - self.current_image, cv2.COLOR_BGR2GRAY + self.current_image, cv2.COLOR_BGR2GRAY ) - self.current_image_gray_clean = self.current_image_gray.copy() #copy this frame to have a clean image for blink algo - # print(self.settings.gui_RANSAC3D) - - # BLINK(self) - - # cx, cy, thresh = HSRAC(self) - # out_x, out_y = cal_osc(self, cx, cy) - # if cx == 0: - # self.output_images_and_update(thresh, EyeInformation(InformationOrigin.HSRAC, out_x, out_y, 0, True)) #update app - # else: - # self.output_images_and_update(thresh, EyeInformation(InformationOrigin.HSRAC, out_x, out_y, 0, self.blinkvalue)) + self.current_image_gray_clean = self.current_image_gray.copy() # copy this frame to have a clean image for blink algo + # print(self.settings.gui_RANSAC3D) - - # cx, cy, thresh = RANSAC3D(self) - # out_x, out_y = cal_osc(self, cx, cy) - # self.output_images_and_update(thresh, EyeInformation(InformationOrigin.RANSAC, out_x, out_y, 0, False)) #update app - - - # cx, cy, larger_threshold = BLOB(self) - # out_x, out_y = cal_osc(self, cx, cy) - # self.output_images_and_update(larger_threshold, EyeInformation(InformationOrigin.BLOB, out_x, out_y, 0, False)) #update app - - #center_x, center_y, frame = HSF(self) #run algo - #out_x, out_y = cal_osc(self, center_x, center_y) #filter and calibrate - #self.output_images_and_update(frame, EyeInformation(InformationOrigin.HSF, out_x, out_y, 0, False)) #update app + # BLINK(self) - self.ALGOSELECT() #run our algos in priority order set in settings + # cx, cy, thresh = HSRAC(self) + # out_x, out_y = cal_osc(self, cx, cy) + # if cx == 0: + # self.output_images_and_update(thresh, EyeInformation(InformationOrigin.HSRAC, out_x, out_y, 0, True)) #update app + # else: + # self.output_images_and_update(thresh, EyeInformation(InformationOrigin.HSRAC, out_x, out_y, 0, self.blinkvalue)) + + # cx, cy, thresh = RANSAC3D(self) + # out_x, out_y = cal_osc(self, cx, cy) + # self.output_images_and_update(thresh, EyeInformation(InformationOrigin.RANSAC, out_x, out_y, 0, False)) #update app + + # cx, cy, larger_threshold = BLOB(self) + # out_x, out_y = cal_osc(self, cx, cy) + # self.output_images_and_update(larger_threshold, EyeInformation(InformationOrigin.BLOB, out_x, out_y, 0, False)) #update app + + # center_x, center_y, frame = HSF(self) #run algo + # out_x, out_y = cal_osc(self, center_x, center_y) #filter and calibrate + # self.output_images_and_update(frame, EyeInformation(InformationOrigin.HSF, out_x, out_y, 0, False)) #update app + + self.ALGOSELECT() # run our algos in priority order set in settings self.BLINKM() - - - - - diff --git a/EyeTrackApp/haar_surround_feature.py b/EyeTrackApp/haar_surround_feature.py index bbbe36b..e07496b 100644 --- a/EyeTrackApp/haar_surround_feature.py +++ b/EyeTrackApp/haar_surround_feature.py @@ -7,6 +7,8 @@ from functools import lru_cache import cv2 import numpy as np +from EyeTrackApp.img_utils import safe_crop + # from line_profiler_pycharm import profile video_path = "ezgif.com-gif-maker.avi" @@ -27,180 +29,6 @@ blink_init_frames = 60 * 3 # 60fps*3sec,Number of blink statistical frames # step==(x,y) default_step = (5, 5) # bigger the steps,lower the processing time! ofc acc also takes an impact -""" -Attention. -If using cv2.filter2D in this code, be careful with the kernel -https://stackoverflow.com/questions/39457468/convolution-without-any-padding-opencv-python -""" - - -def TimeitWrapper(*args, **kwargs): - """ - This decorator @TimeitWrapper() prints the function name and execution time in seconds. - :param args: - :param kwargs: - :return: - """ - - def decorator(function): - @functools.wraps(function) - def wrapper(*args, **kwargs): - start = timeit.default_timer() - results = function(*args, **kwargs) - end = timeit.default_timer() - print('{} execution time: {:.10f} s'.format(function.__name__, end - start)) - return results - - return wrapper - - return decorator - - -class TimeitResult(object): - """ - from https://github.com/ipython/ipython/blob/339c0d510a1f3cb2158dd8c6e7f4ac89aa4c89d8/IPython/core/magics/execution.py#L55 - - Object returned by the timeit magic with info about the run. - Contains the following attributes : - loops: (int) number of loops done per measurement - repeat: (int) number of times the measurement has been repeated - best: (float) best execution time / number - all_runs: (list of float) execution time of each run (in s) - """ - - def __init__(self, loops, repeat, best, worst, all_runs, precision): - self.loops = loops - self.repeat = repeat - self.best = best - self.worst = worst - self.all_runs = all_runs - self._precision = precision - self.timings = [dt / self.loops for dt in all_runs] - - @property - def average(self): - return math.fsum(self.timings) / len(self.timings) - - @property - def stdev(self): - mean = self.average - return (math.fsum([(x - mean) ** 2 for x in self.timings]) / len(self.timings)) ** 0.5 - - def __str__(self): - pm = '+-' - if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding: - try: - u'\xb1'.encode(sys.stdout.encoding) - pm = u'\xb1' - except: - pass - return "min:{best} max:{worst} mean:{mean} {pm} {std} per loop (mean {pm} std. dev. of {runs} run{run_plural}, {loops:,} loop{loop_plural} each)".format( - pm=pm, - runs=self.repeat, - loops=self.loops, - loop_plural="" if self.loops == 1 else "s", - run_plural="" if self.repeat == 1 else "s", - mean=format_time(self.average, self._precision), - std=format_time(self.stdev, self._precision), - best=format_time(self.best, self._precision), - worst=format_time(self.worst, self._precision), - ) - - def _repr_pretty_(self, p, cycle): - unic = self.__str__() - p.text(u'') - - -class FPSResult(object): - """ - base https://github.com/ipython/ipython/blob/339c0d510a1f3cb2158dd8c6e7f4ac89aa4c89d8/IPython/core/magics/execution.py#L55 - """ - - def __init__(self, loops, repeat, best, worst, all_runs, precision): - self.loops = loops - self.repeat = repeat - self.best = 1 / best - self.worst = 1 / worst - self.all_runs = all_runs - self._precision = precision - self.fps = [1 / dt for dt in all_runs] - self.unit = "fps" - - @property - def average(self): - return math.fsum(self.fps) / len(self.fps) - - @property - def stdev(self): - mean = self.average - return (math.fsum([(x - mean) ** 2 for x in self.fps]) / len(self.fps)) ** 0.5 - - def __str__(self): - pm = '+-' - if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding: - try: - u'\xb1'.encode(sys.stdout.encoding) - pm = u'\xb1' - except: - pass - return "min:{best} max:{worst} mean:{mean} {pm} {std} per loop (mean {pm} std. dev. of {runs} run{run_plural}, {loops:,} loop{loop_plural} each)".format( - pm=pm, - runs=self.repeat, - loops=self.loops, - loop_plural="" if self.loops == 1 else "s", - run_plural="" if self.repeat == 1 else "s", - mean="%.*g%s" % (self._precision, self.average, self.unit), - std="%.*g%s" % (self._precision, self.stdev, self.unit), - best="%.*g%s" % (self._precision, self.best, self.unit), - worst="%.*g%s" % (self._precision, self.worst, self.unit), - ) - - def _repr_pretty_(self, p, cycle): - unic = self.__str__() - p.text(u'') - - -def format_time(timespan, precision=3): - """ - https://github.com/ipython/ipython/blob/339c0d510a1f3cb2158dd8c6e7f4ac89aa4c89d8/IPython/core/magics/execution.py#L1473 - Formats the timespan in a human readable form - """ - - if timespan >= 60.0: - # we have more than a minute, format that in a human readable form - # Idea from http://snipplr.com/view/5713/ - parts = [("d", 60 * 60 * 24), ("h", 60 * 60), ("min", 60), ("s", 1)] - time = [] - leftover = timespan - for suffix, length in parts: - value = int(leftover / length) - if value > 0: - leftover = leftover % length - time.append(u'%s%s' % (str(value), suffix)) - if leftover < 1: - break - return " ".join(time) - - # Unfortunately the unicode 'micro' symbol can cause problems in - # certain terminals. - # See bug: https://bugs.launchpad.net/ipython/+bug/348466 - # Try to prevent crashes by being more secure than it needs to - # E.g. eclipse is able to print a µ, but has no sys.stdout.encoding set. - units = [u"s", u"ms", u'us', "ns"] # the save value - if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding: - try: - u'\xb5'.encode(sys.stdout.encoding) - units = [u"s", u"ms", u'\xb5s', "ns"] - except: - pass - scaling = [1, 1e3, 1e6, 1e9] - - if timespan > 0.0: - order = min(-int(math.floor(math.log10(timespan)) // 3), 3) - else: - order = 3 - return u"%.*g %s" % (precision, timespan * scaling[order], units[order]) - class CvParameters: # It may be a little slower because a dict named "self" is read for each function call. @@ -211,43 +39,42 @@ class CvParameters: # self.prev_step=step self._step = step self._hsf = HaarSurroundFeature(radius) - + def get_rpsh(self): return self._radius, self.pad, self._step, self._hsf # Essentially, the following would be preferable, but it would take twice as long to call. # return self.radius, self.pad, self.step, self.hsf - + @property def radius(self): return self._radius - + @radius.setter def radius(self, now_radius): # self.prev_radius=self._radius self._radius = now_radius self.pad = 2 * now_radius self.hsf = now_radius - + @property def step(self): return self._step - + @step.setter def step(self, now_step): # self.prev_step=self.step self._step = now_step - + @property def hsf(self): return self._hsf - + @hsf.setter def hsf(self, now_radius): self._hsf = HaarSurroundFeature(now_radius) class HaarSurroundFeature: - def __init__(self, r_inner, r_outer=None, val=None): if r_outer is None: r_outer = r_inner * 3 @@ -255,30 +82,33 @@ class HaarSurroundFeature: r_inner2 = r_inner * r_inner count_inner = r_inner2 count_outer = r_outer * r_outer - r_inner2 - + if val is None: val_inner = 1.0 / r_inner2 val_outer = -val_inner * count_inner / count_outer - + else: val_inner = val[0] val_outer = val[1] - + self.val_in = np.array(val_inner, dtype=np.float64) self.val_out = np.array(val_outer, dtype=np.float64) self.r_in = r_inner self.r_out = r_outer - + def get_kernel(self): # Defined here, but not yet used? # Create a kernel filled with the value of self.val_out - kernel = np.ones(shape=(2 * self.r_out - 1, 2 * self.r_out - 1), dtype=np.float64) * self.val_out - + kernel = ( + np.ones(shape=(2 * self.r_out - 1, 2 * self.r_out - 1), dtype=np.float64) + * self.val_out + ) + # Set the values of the inner area of the kernel using array slicing - start = (self.r_out - self.r_in) - end = (self.r_out + self.r_in - 1) + start = self.r_out - self.r_in + end = self.r_out + self.r_in - 1 kernel[start:end, start:end] = self.val_in - + return kernel @@ -302,10 +132,10 @@ def frameint_get_xy_step(imageshape, xysteps, pad, start_offset=None, end_offset row -= 1 col -= 1 x_step, y_step = xysteps - + # This is not beautiful. start_pad_x = start_pad_y = end_pad_x = end_pad_y = pad - + if start_offset is not None: start_pad_x += start_offset[0] start_pad_y += start_offset[1] @@ -314,9 +144,9 @@ def frameint_get_xy_step(imageshape, xysteps, pad, start_offset=None, end_offset end_pad_y += end_offset[1] y_np = np.arange(start_pad_y, row - end_pad_y, y_step) x_np = np.arange(start_pad_x, col - end_pad_x, x_step) - + xy_np = (x_np, y_np) - + return xy_np @@ -332,8 +162,14 @@ def get_hsf_empty_array(len_syx, frameint_x, frame_int_dtype, fcshape): p10 = np.empty(len_syx, dtype=frame_int_dtype) response_list = np.empty(len_syx, dtype=np.float64) frame_conv = np.zeros(shape=fcshape[0], dtype=np.uint8) - frame_conv_stride = frame_conv[::fcshape[1], ::fcshape[2]] - return (inner_sum, outer_sum), p_temp, (p00, p11, p01, p10), response_list, (frame_conv, frame_conv_stride) + frame_conv_stride = frame_conv[:: fcshape[1], :: fcshape[2]] + return ( + (inner_sum, outer_sum), + p_temp, + (p00, p11, p01, p10), + response_list, + (frame_conv, frame_conv_stride), + ) # @profile @@ -352,30 +188,39 @@ def conv_int(frame_int, kernel, xy_step, padding, xy_steps_list): # padding2 = 2 * padding f_shape = row - 2 * padding, col - 2 * padding r_in = kernel.r_in - + len_sx, len_sy = len(xy_steps_list[0]), len(xy_steps_list[1]) - inout_sum, p_temp, p_list, response_list, frameconvlist = get_hsf_empty_array((len_sy, len_sx), col + 1, - frame_int.dtype, (f_shape, y_step, x_step)) + inout_sum, p_temp, p_list, response_list, frameconvlist = get_hsf_empty_array( + (len_sy, len_sx), col + 1, frame_int.dtype, (f_shape, y_step, x_step) + ) inner_sum, outer_sum = inout_sum p00, p11, p01, p10 = p_list frame_conv, frame_conv_stride = frameconvlist - + y_rin_m = xy_steps_list[1] - r_in x_rin_m = xy_steps_list[0] - r_in y_rin_p = xy_steps_list[1] + r_in x_rin_p = xy_steps_list[0] + r_in # xx==(y,x),m==MINUS,p==PLUS, ex: mm==(y-,x-) - inarr_mm = frame_int[y_rin_m[0]:y_rin_m[-1] + 1:y_step, x_rin_m[0]:x_rin_m[-1] + 1:x_step] - inarr_mp = frame_int[y_rin_m[0]:y_rin_m[-1] + 1:y_step, x_rin_p[0]:x_rin_p[-1] + 1:x_step] - inarr_pm = frame_int[y_rin_p[0]:y_rin_p[-1] + 1:y_step, x_rin_m[0]:x_rin_m[-1] + 1:x_step] - inarr_pp = frame_int[y_rin_p[0]:y_rin_p[-1] + 1:y_step, x_rin_p[0]:x_rin_p[-1] + 1:x_step] - + inarr_mm = frame_int[ + y_rin_m[0] : y_rin_m[-1] + 1 : y_step, x_rin_m[0] : x_rin_m[-1] + 1 : x_step + ] + inarr_mp = frame_int[ + y_rin_m[0] : y_rin_m[-1] + 1 : y_step, x_rin_p[0] : x_rin_p[-1] + 1 : x_step + ] + inarr_pm = frame_int[ + y_rin_p[0] : y_rin_p[-1] + 1 : y_step, x_rin_m[0] : x_rin_m[-1] + 1 : x_step + ] + inarr_pp = frame_int[ + y_rin_p[0] : y_rin_p[-1] + 1 : y_step, x_rin_p[0] : x_rin_p[-1] + 1 : x_step + ] + # == inarr_mm + inarr_pp - inarr_mp - inarr_pm inner_sum[:, :] = inarr_mm inner_sum += inarr_pp inner_sum -= inarr_mp inner_sum -= inarr_pm - + # Bottleneck here, I want to make it smarter. Someone do it. # (y,x) # p00=max(y_ro_m,0),max(x_ro_m,0) @@ -401,37 +246,40 @@ def conv_int(frame_int, kernel, xy_step, padding, xy_steps_list): # p11=np.take(np.take(frame_int, y_ro_p, axis=0, mode="clip"), x_ro_p, axis=1, mode="clip") # p01=np.take(np.take(frame_int, y_ro_m, axis=0, mode="clip"), x_ro_p, axis=1, mode="clip") # p10=np.take(np.take(frame_int, y_ro_p, axis=0, mode="clip"), x_ro_m, axis=1, mode="clip") - + outer_sum[:, :] = p00 + p11 - p01 - p10 - inner_sum - + np.multiply(kernel.val_in, inner_sum, dtype=np.float64, out=response_list) response_list += kernel.val_out * outer_sum - + # min_response, max_val, min_loc, max_loc = cv2.minMaxLoc(response_list) min_response, _, min_loc, _ = cv2.minMaxLoc(response_list) - - center = ((xy_steps_list[0][min_loc[0]] - padding), (xy_steps_list[1][min_loc[1]] - padding)) - + + center = ( + (xy_steps_list[0][min_loc[0]] - padding), + (xy_steps_list[1][min_loc[1]] - padding), + ) + frame_conv_stride[:, :] = response_list # or # frame_conv_stride[:, :] = response_list.astype(np.uint8) - + return frame_conv, min_response, center -class Auto_Radius_Calc(object): +class AutoRadiusCalc(object): def __init__(self): self.response_list = [] self.radius_cand_list = [] self.adj_comp_flag = False - + self.radius_middle_index = None - + self.left_item = None self.right_item = None self.left_index = None self.right_index = None - + def get_radius(self): prev_res_len = len(self.response_list) # adjustment of radius @@ -451,21 +299,35 @@ class Auto_Radius_Calc(object): else: self.left_item = self.response_list[0] self.right_item = self.response_list[2] - self.radius_cand_list = [i for i in range(self.left_item[0], self.right_item[0] + auto_radius_step, auto_radius_step)] + self.radius_cand_list = [ + i + for i in range( + self.left_item[0], + self.right_item[0] + auto_radius_step, + auto_radius_step, + ) + ] self.left_index = 0 self.right_index = len(self.radius_cand_list) - 1 self.radius_middle_index = (self.left_index + self.right_index) // 2 self.adj_comp_flag = False return self.radius_cand_list[self.radius_middle_index] else: - if self.left_index <= self.right_index and self.left_index != self.radius_middle_index: - if (self.left_item[1] + self.response_list[-1][1]) < (self.right_item[1] + self.response_list[-1][1]): + if ( + self.left_index <= self.right_index + and self.left_index != self.radius_middle_index + ): + if (self.left_item[1] + self.response_list[-1][1]) < ( + self.right_item[1] + self.response_list[-1][1] + ): self.right_item = self.response_list[-1] self.right_index = self.radius_middle_index - 1 self.radius_middle_index = (self.left_index + self.right_index) // 2 self.adj_comp_flag = False return self.radius_cand_list[self.radius_middle_index] - if (self.left_item[1] + self.response_list[-1][1]) > (self.right_item[1] + self.response_list[-1][1]): + if (self.left_item[1] + self.response_list[-1][1]) > ( + self.right_item[1] + self.response_list[-1][1] + ): self.left_item = self.response_list[-1] self.left_index = self.radius_middle_index + 1 self.radius_middle_index = (self.left_index + self.right_index) // 2 @@ -473,13 +335,13 @@ class Auto_Radius_Calc(object): return self.radius_cand_list[self.radius_middle_index] self.adj_comp_flag = True return self.radius_cand_list[self.radius_middle_index] - + def get_radius_base(self): """ Use it when the new version doesn't work well. :return: """ - + prev_res_len = len(self.response_list) # adjustment of radius if prev_res_len == 1: @@ -499,11 +361,21 @@ class Auto_Radius_Calc(object): self.adj_comp_flag = True return default_radius elif sort_res[0] == auto_radius_range[0]: - self.radius_cand_list = [i for i in range(auto_radius_range[0], default_radius, auto_radius_step)][1:] + self.radius_cand_list = [ + i + for i in range( + auto_radius_range[0], default_radius, auto_radius_step + ) + ][1:] self.adj_comp_flag = False return self.radius_cand_list.pop() else: - self.radius_cand_list = [i for i in range(default_radius, auto_radius_range[1], auto_radius_step)][1:] + self.radius_cand_list = [ + i + for i in range( + default_radius, auto_radius_range[1], auto_radius_step + ) + ][1:] self.adj_comp_flag = False return self.radius_cand_list.pop() else: @@ -516,19 +388,19 @@ class Auto_Radius_Calc(object): else: self.adj_comp_flag = False return self.radius_cand_list.pop() - + def add_response(self, radius, response): self.response_list.append((radius, response)) return None -class Blink_Detector(object): +class BlinkDetector(object): def __init__(self): self.response_list = [] self.response_max = None self.enable_detect_flg = False self.quartile_1 = None - + def calc_thresh(self): # Calculate response_max by computing interquartile range, IQR # self.response_listo = np.array(self.response_listo) @@ -537,28 +409,28 @@ class Blink_Detector(object): # quartile_1, quartile_3 = np.percentile(self.response_listo, [25, 75]) # iqr = quartile_3 - quartile_1 # self.response_maxo = quartile_3 + (iqr * 1.5) - + # quartile_1, quartile_3 = np.percentile(self.response_list, [25, 75]) # or quartile_1, quartile_3 = np.percentile(np.array(self.response_list), [25, 75]) self.quartile_1 = quartile_1 iqr = quartile_3 - quartile_1 # response_min = quartile_1 - (iqr * 1.5) - + self.response_max = float(quartile_3 + (iqr * 1.5)) # or # self.response_max = quartile_3 + (iqr * 1.5) - + self.enable_detect_flg = True return None - + def detect(self, now_response): return now_response > self.response_max - + def add_response(self, response): self.response_list.append(response) return None - + def response_len(self): return len(self.response_list) @@ -569,7 +441,7 @@ class CenterCorrection(object): kernel_size = 7 # 3 or 5 or 7 self.hist_thr = float(4) # 4% self.center_q1_radius = 20 - + self.setup_comp = False self.quartile_1 = None self.radius = None @@ -577,12 +449,14 @@ class CenterCorrection(object): self.frame_mask = None self.frame_bin = None self.frame_final = None - self.morph_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_size, kernel_size)) + self.morph_kernel = cv2.getStructuringElement( + cv2.MORPH_RECT, (kernel_size, kernel_size) + ) self.morph_kernel2 = np.ones((3, 3)) self.hist_index = np.arange(256) self.hist = np.empty((256, 1)) self.hist_norm = np.empty((256, 1)) - + def init_array(self, gray_shape, quartile_1, radius): self.frame_shape = gray_shape self.frame_mask = np.empty(gray_shape, dtype=np.uint8) @@ -591,34 +465,44 @@ class CenterCorrection(object): self.quartile_1 = quartile_1 self.radius = radius self.setup_comp = True - + # def reset_array(self): # self.frame_mask.fill(0) - + def correction(self, gray_frame, orig_x, orig_y): center_x, center_y = orig_x, orig_y self.frame_mask.fill(0) - - # cv2.circle(self.frame_mask, center=(center_x, center_y), radius=int(self.radius * 2), color=255, thickness=-1) - + + # cv2.circle(self.frame_mask, center=(center_x, center_y), radius=int(self.radius * 2), color=255, thickness=-1) + # bottleneck cv2.calcHist([gray_frame], [0], None, [256], [0, 256], hist=self.hist) - + cv2.normalize(self.hist, self.hist_norm, alpha=100.0, norm_type=cv2.NORM_L1) hist_per = self.hist_norm.cumsum() hist_index_list = self.hist_index[hist_per >= self.hist_thr] - frame_thr = hist_index_list[0] if len(hist_index_list) else np.percentile(cv2.bitwise_or(255 - self.frame_mask, gray_frame), 4) - + frame_thr = ( + hist_index_list[0] + if len(hist_index_list) + else np.percentile(cv2.bitwise_or(255 - self.frame_mask, gray_frame), 4) + ) + # bottleneck - self.frame_bin = cv2.threshold(gray_frame, frame_thr, 1, cv2.THRESH_BINARY_INV)[1] + self.frame_bin = cv2.threshold(gray_frame, frame_thr, 1, cv2.THRESH_BINARY_INV)[ + 1 + ] cropped_x, cropped_y, cropped_w, cropped_h = cv2.boundingRect(self.frame_bin) - + self.frame_final = cv2.bitwise_and(self.frame_bin, self.frame_mask) - + # bottleneck - self.frame_final = cv2.morphologyEx(self.frame_final, cv2.MORPH_CLOSE, self.morph_kernel) - self.frame_final = cv2.morphologyEx(self.frame_final, cv2.MORPH_OPEN, self.morph_kernel) - + self.frame_final = cv2.morphologyEx( + self.frame_final, cv2.MORPH_CLOSE, self.morph_kernel + ) + self.frame_final = cv2.morphologyEx( + self.frame_final, cv2.MORPH_OPEN, self.morph_kernel + ) + if (cropped_h, cropped_w) == self.frame_shape: # Not detected. base_x, base_y = center_x, center_y @@ -627,36 +511,54 @@ class CenterCorrection(object): base_y = cropped_y + cropped_h // 2 if self.frame_final[base_y, base_x] != 1: if self.frame_final[center_y, center_x] != 1: - self.frame_final = cv2.morphologyEx(self.frame_final, cv2.MORPH_DILATE, self.morph_kernel2, iterations=3) + self.frame_final = cv2.morphologyEx( + self.frame_final, + cv2.MORPH_DILATE, + self.morph_kernel2, + iterations=3, + ) else: base_x, base_y = center_x, center_y - - contours, _ = cv2.findContours(self.frame_final, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) + + contours, _ = cv2.findContours( + self.frame_final, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE + ) contours_box = [cv2.boundingRect(cnt) for cnt in contours] contours_dist = np.array( - [abs(base_x - (cnt_x + cnt_w / 2)) + abs(base_y - (cnt_y + cnt_h / 2)) for cnt_x, cnt_y, cnt_w, cnt_h in contours_box]) - + [ + abs(base_x - (cnt_x + cnt_w / 2)) + abs(base_y - (cnt_y + cnt_h / 2)) + for cnt_x, cnt_y, cnt_w, cnt_h in contours_box + ] + ) + if len(contours_box): - cropped_x2, cropped_y2, cropped_w2, cropped_h2 = contours_box[contours_dist.argmin()] + cropped_x2, cropped_y2, cropped_w2, cropped_h2 = contours_box[ + contours_dist.argmin() + ] x = cropped_x2 + cropped_w2 // 2 y = cropped_y2 + cropped_h2 // 2 else: x = center_x y = center_y - + # if imshow_enable: # cv2.circle(frame, (orig_x, orig_y), 10, (255, 0, 0), -1) # cv2.circle(frame, (x, y), 7, (0, 0, 255), -1) - + # # out_x = center_x if abs(x - center_x) > radius else x # out_y = center_y if abs(y - center_y) > radius else y out_x, out_y = orig_x, orig_y - if gray_frame[int(max(y - 5, 0)):int(min(y + 5, self.frame_shape[0])), - int(max(x - 5, 0)):int(min(x + 5, self.frame_shape[1]))].min() < self.quartile_1: + if ( + gray_frame[ + int(max(y - 5, 0)) : int(min(y + 5, self.frame_shape[0])), + int(max(x - 5, 0)) : int(min(x + 5, self.frame_shape[1])), + ].min() + < self.quartile_1 + ): out_x = x out_y = y - + # if imshow_enable: # cv2.circle(frame, (out_x, out_y), 5, (0, 255, 0), -1) # @@ -665,29 +567,36 @@ class CenterCorrection(object): return out_x, out_y -class HSRAC_cls(object): +# temporary name +class HSF_cls(object): def __init__(self): # I'd like to take into account things like print, end_time - start_time processing time, etc., but it's too much trouble. - + # For measuring total processing time - + self.main_start_time = timeit.default_timer() - + self.rng = np.random.default_rng() self.cvparam = CvParameters(default_radius, default_step) - + self.cv_modeo = ["first_frame", "radius_adjust", "blink_adjust", "normal"] self.now_modeo = self.cv_modeo[0] - - self.auto_radius_calc = Auto_Radius_Calc() - self.blink_detector = Blink_Detector() - self.center_q1 = Blink_Detector() + + self.auto_radius_calc = AutoRadiusCalc() + self.blink_detector = BlinkDetector() + self.center_q1 = BlinkDetector() self.center_correct = CenterCorrection() - + self.cap = None - - self.timedict = {"to_gray": [], "int_img": [], "conv_int": [], "crop": [], "total_cv": []} - + + self.timedict = { + "to_gray": [], + "int_img": [], + "conv_int": [], + "crop": [], + "total_cv": [], + } + def open_video(self, video_path): # Temporary implementation to run cap = cv2.VideoCapture(video_path) @@ -695,7 +604,7 @@ class HSRAC_cls(object): raise IOError("Error opening video stream or file") self.cap = cap return True - + def read_frame(self): # Temporary implementation to run if not self.cap.isOpened(): @@ -706,47 +615,53 @@ class HSRAC_cls(object): self.current_image_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) return True return False - + def single_run(self): # Temporary implementation to run - + ## default_radius = 14 - + frame = self.current_image_gray if self.now_modeo == self.cv_modeo[1]: # adjustment of radius - + # debug print # if calc_print_enable: # temp_radius = self.auto_radius_calc.get_radius() # print('Now radius:', temp_radius) # self.cvparam.radius = temp_radius - + self.cvparam.radius = self.auto_radius_calc.get_radius() if self.auto_radius_calc.adj_comp_flag: - self.now_modeo = self.cv_modeo[2] if not skip_blink_detect else self.cv_modeo[3] - + self.now_modeo = ( + self.cv_modeo[2] if not skip_blink_detect else self.cv_modeo[3] + ) + radius, pad, step, hsf = self.cvparam.get_rpsh() - + # For measuring processing time of image processing cv_start_time = timeit.default_timer() - + gray_frame = frame self.timedict["to_gray"].append(timeit.default_timer() - cv_start_time) - + # Calculate the integral image of the frame int_start_time = timeit.default_timer() # BORDER_CONSTANT is faster than BORDER_REPLICATE There seems to be almost no negative impact when BORDER_CONSTANT is used. - frame_pad = cv2.copyMakeBorder(gray_frame, pad, pad, pad, pad, cv2.BORDER_CONSTANT) + frame_pad = cv2.copyMakeBorder( + gray_frame, pad, pad, pad, pad, cv2.BORDER_CONSTANT + ) frame_int = cv2.integral(frame_pad) self.timedict["int_img"].append(timeit.default_timer() - int_start_time) - + # Convolve the feature with the integral image conv_int_start_time = timeit.default_timer() - xy_step = frameint_get_xy_step(frame_int.shape, step, pad, start_offset=None, end_offset=None) + xy_step = frameint_get_xy_step( + frame_int.shape, step, pad, start_offset=None, end_offset=None + ) frame_conv, response, center_xy = conv_int(frame_int, hsf, step, pad, xy_step) self.timedict["conv_int"].append(timeit.default_timer() - conv_int_start_time) - + crop_start_time = timeit.default_timer() # Define the center point and radius center_x, center_y = center_xy @@ -754,10 +669,10 @@ class HSRAC_cls(object): lower_x = center_x - radius upper_y = center_y + radius lower_y = center_y - radius - + # Crop the image using the calculated bounds - cropped_image = gray_frame[lower_y:upper_y, lower_x:upper_x] - + cropped_image = safe_crop(gray_frame, lower_x, lower_y, upper_x, upper_y) + if self.now_modeo == self.cv_modeo[0] or self.now_modeo == self.cv_modeo[1]: # If mode is first_frame or radius_adjust, record current radius and response self.auto_radius_calc.add_response(radius, response) @@ -765,15 +680,19 @@ class HSRAC_cls(object): # Statistics for blink detection if self.blink_detector.response_len() < blink_init_frames: self.blink_detector.add_response(cv2.mean(cropped_image)[0]) - + upper_x = center_x + self.center_correct.center_q1_radius lower_x = center_x - self.center_correct.center_q1_radius upper_y = center_y + self.center_correct.center_q1_radius lower_y = center_y - self.center_correct.center_q1_radius - self.center_q1.add_response(cv2.mean(gray_frame[lower_y:upper_y, lower_x:upper_x])[0]) - + self.center_q1.add_response( + cv2.mean(safe_crop(gray_frame, lower_x, lower_y, upper_x, upper_y))[ + 0 + ] + ) + else: - + self.blink_detector.calc_thresh() self.center_q1.calc_thresh() self.now_modeo = self.cv_modeo[3] @@ -790,11 +709,15 @@ class HSRAC_cls(object): # blink pass else: - # pass + # pass if not self.center_correct.setup_comp: - self.center_correct.init_array(gray_frame.shape, self.center_q1.quartile_1, radius) - - center_x, center_y = self.center_correct.correction(gray_frame, center_x, center_y) + self.center_correct.init_array( + gray_frame.shape, self.center_q1.quartile_1, radius + ) + + center_x, center_y = self.center_correct.correction( + gray_frame, center_x, center_y + ) # Define the center point and radius center_xy = (center_x, center_y) upper_x = center_x + radius @@ -802,24 +725,29 @@ class HSRAC_cls(object): upper_y = center_y + radius lower_y = center_y - radius # Crop the image using the calculated bounds - cropped_image = gray_frame[lower_y:upper_y, lower_x:upper_x] - # if imshow_enable or save_video: - # cv2.circle(frame, (orig_x, orig_y), 6, (0, 0, 255), -1) - # cv2.circle(frame, (center_x, center_y), 3, (255, 0, 0), -1) + cropped_image = safe_crop( + gray_frame, lower_x, lower_y, upper_x, upper_y + ) + # if imshow_enable or save_video: + # cv2.circle(frame, (orig_x, orig_y), 6, (0, 0, 255), -1) + # cv2.circle(frame, (center_x, center_y), 3, (255, 0, 0), -1) # If you want to update response_max. it may be more cost-effective to rewrite response_list in the following way # https://stackoverflow.com/questions/42771110/fastest-way-to-left-cycle-a-numpy-array-like-pop-push-for-a-queue - + cv_end_time = timeit.default_timer() self.timedict["crop"].append(cv_end_time - crop_start_time) self.timedict["total_cv"].append(cv_end_time - cv_start_time) - - # if calc_print_enable: - # the lower the response the better the likelyhood of there being a pupil. you can adujst the radius and steps accordingly + + # if calc_print_enable: + # the lower the response the better the likelyhood of there being a pupil. you can adujst the radius and steps accordingly # print('Kernel response:', response) - # print('Pixel position:', center_xy) - + # print('Pixel position:', center_xy) + if imshow_enable: - if self.now_modeo != self.cv_modeo[0] and self.now_modeo != self.cv_modeo[1]: + if ( + self.now_modeo != self.cv_modeo[0] + and self.now_modeo != self.cv_modeo[1] + ): if 0 in cropped_image.shape: # If shape contains 0, it is not detected well. pass @@ -828,7 +756,7 @@ class HSRAC_cls(object): cv2.imshow("frame", frame) if cv2.waitKey(1) & 0xFF == ord("q"): pass - + if self.now_modeo == self.cv_modeo[0]: # Moving from first_frame to the next mode if skip_autoradius and skip_blink_detect: @@ -837,20 +765,22 @@ class HSRAC_cls(object): self.now_modeo = self.cv_modeo[2] else: self.now_modeo = self.cv_modeo[1] - + return center_x, center_y, frame -class External_Run_HSF: - hsrac = HSRAC_cls() +class External_Run_HSF(object): + def __init__(self): + self.algo = HSF_cls() - def HSFS(self): - External_Run_HSF.hsrac.current_image_gray = self.current_image_gray - center_x, center_y, frame = External_Run_HSF.hsrac.single_run() + def run(self, current_image_gray): + self.algo.current_image_gray = current_image_gray + center_x, center_y, frame = self.algo.single_run() return center_x, center_y, frame -if __name__ == '__main__': - hsrac = HSRAC_cls() - hsrac.open_video(video_path) - while hsrac.read_frame(): - _ = hsrac.single_run() \ No newline at end of file + +if __name__ == "__main__": + hsf = HSF_cls() + hsf.open_video(video_path) + while hsf.read_frame(): + _ = hsf.single_run() diff --git a/EyeTrackApp/hsrac.py b/EyeTrackApp/hsrac.py index e5826f8..5bb344d 100644 --- a/EyeTrackApp/hsrac.py +++ b/EyeTrackApp/hsrac.py @@ -1,4 +1,3 @@ -import functools import math import sys import timeit @@ -7,8 +6,23 @@ from functools import lru_cache import cv2 import numpy as np +from EyeTrackApp.haar_surround_feature import ( + AutoRadiusCalc, + BlinkDetector, + CenterCorrection, + CvParameters, + conv_int, + frameint_get_xy_step, +) +from EyeTrackApp.img_utils import safe_crop +from EyeTrackApp.utils import clamp + # from line_profiler_pycharm import profile +# RANSAC + +thresh_add = 10 + video_path = "ezgif.com-gif-maker.avi" imshow_enable = True calc_print_enable = True @@ -27,799 +41,6 @@ blink_init_frames = 60 * 3 # 60fps*3sec,Number of blink statistical frames # step==(x,y) default_step = (5, 5) # bigger the steps,lower the processing time! ofc acc also takes an impact -""" -Attention. -If using cv2.filter2D in this code, be careful with the kernel -https://stackoverflow.com/questions/39457468/convolution-without-any-padding-opencv-python -""" - - -def TimeitWrapper(*args, **kwargs): - """ - This decorator @TimeitWrapper() prints the function name and execution time in seconds. - :param args: - :param kwargs: - :return: - """ - - def decorator(function): - @functools.wraps(function) - def wrapper(*args, **kwargs): - start = timeit.default_timer() - results = function(*args, **kwargs) - end = timeit.default_timer() - print('{} execution time: {:.10f} s'.format(function.__name__, end - start)) - return results - - return wrapper - - return decorator - - -class TimeitResult(object): - """ - from https://github.com/ipython/ipython/blob/339c0d510a1f3cb2158dd8c6e7f4ac89aa4c89d8/IPython/core/magics/execution.py#L55 - - Object returned by the timeit magic with info about the run. - Contains the following attributes : - loops: (int) number of loops done per measurement - repeat: (int) number of times the measurement has been repeated - best: (float) best execution time / number - all_runs: (list of float) execution time of each run (in s) - """ - - def __init__(self, loops, repeat, best, worst, all_runs, precision): - self.loops = loops - self.repeat = repeat - self.best = best - self.worst = worst - self.all_runs = all_runs - self._precision = precision - self.timings = [dt / self.loops for dt in all_runs] - - @property - def average(self): - return math.fsum(self.timings) / len(self.timings) - - @property - def stdev(self): - mean = self.average - return (math.fsum([(x - mean) ** 2 for x in self.timings]) / len(self.timings)) ** 0.5 - - def __str__(self): - pm = '+-' - if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding: - try: - u'\xb1'.encode(sys.stdout.encoding) - pm = u'\xb1' - except: - pass - return "min:{best} max:{worst} mean:{mean} {pm} {std} per loop (mean {pm} std. dev. of {runs} run{run_plural}, {loops:,} loop{loop_plural} each)".format( - pm=pm, - runs=self.repeat, - loops=self.loops, - loop_plural="" if self.loops == 1 else "s", - run_plural="" if self.repeat == 1 else "s", - mean=format_time(self.average, self._precision), - std=format_time(self.stdev, self._precision), - best=format_time(self.best, self._precision), - worst=format_time(self.worst, self._precision), - ) - - def _repr_pretty_(self, p, cycle): - unic = self.__str__() - p.text(u'') - - -class FPSResult(object): - """ - base https://github.com/ipython/ipython/blob/339c0d510a1f3cb2158dd8c6e7f4ac89aa4c89d8/IPython/core/magics/execution.py#L55 - """ - - def __init__(self, loops, repeat, best, worst, all_runs, precision): - self.loops = loops - self.repeat = repeat - self.best = 1 / best - self.worst = 1 / worst - self.all_runs = all_runs - self._precision = precision - self.fps = [1 / dt for dt in all_runs] - self.unit = "fps" - - @property - def average(self): - return math.fsum(self.fps) / len(self.fps) - - @property - def stdev(self): - mean = self.average - return (math.fsum([(x - mean) ** 2 for x in self.fps]) / len(self.fps)) ** 0.5 - - def __str__(self): - pm = '+-' - if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding: - try: - u'\xb1'.encode(sys.stdout.encoding) - pm = u'\xb1' - except: - pass - return "min:{best} max:{worst} mean:{mean} {pm} {std} per loop (mean {pm} std. dev. of {runs} run{run_plural}, {loops:,} loop{loop_plural} each)".format( - pm=pm, - runs=self.repeat, - loops=self.loops, - loop_plural="" if self.loops == 1 else "s", - run_plural="" if self.repeat == 1 else "s", - mean="%.*g%s" % (self._precision, self.average, self.unit), - std="%.*g%s" % (self._precision, self.stdev, self.unit), - best="%.*g%s" % (self._precision, self.best, self.unit), - worst="%.*g%s" % (self._precision, self.worst, self.unit), - ) - - def _repr_pretty_(self, p, cycle): - unic = self.__str__() - p.text(u'') - - -def format_time(timespan, precision=3): - """ - https://github.com/ipython/ipython/blob/339c0d510a1f3cb2158dd8c6e7f4ac89aa4c89d8/IPython/core/magics/execution.py#L1473 - Formats the timespan in a human readable form - """ - - if timespan >= 60.0: - # we have more than a minute, format that in a human readable form - # Idea from http://snipplr.com/view/5713/ - parts = [("d", 60 * 60 * 24), ("h", 60 * 60), ("min", 60), ("s", 1)] - time = [] - leftover = timespan - for suffix, length in parts: - value = int(leftover / length) - if value > 0: - leftover = leftover % length - time.append(u'%s%s' % (str(value), suffix)) - if leftover < 1: - break - return " ".join(time) - - # Unfortunately the unicode 'micro' symbol can cause problems in - # certain terminals. - # See bug: https://bugs.launchpad.net/ipython/+bug/348466 - # Try to prevent crashes by being more secure than it needs to - # E.g. eclipse is able to print a µ, but has no sys.stdout.encoding set. - units = [u"s", u"ms", u'us', "ns"] # the save value - if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding: - try: - u'\xb5'.encode(sys.stdout.encoding) - units = [u"s", u"ms", u'\xb5s', "ns"] - except: - pass - scaling = [1, 1e3, 1e6, 1e9] - - if timespan > 0.0: - order = min(-int(math.floor(math.log10(timespan)) // 3), 3) - else: - order = 3 - return u"%.*g %s" % (precision, timespan * scaling[order], units[order]) - - -class CvParameters: - # It may be a little slower because a dict named "self" is read for each function call. - def __init__(self, radius, step): - # self.prev_radius=radius - self._radius = radius - self.pad = 2 * radius - # self.prev_step=step - self._step = step - self._hsf = HaarSurroundFeature(radius) - - def get_rpsh(self): - return self._radius, self.pad, self._step, self._hsf - # Essentially, the following would be preferable, but it would take twice as long to call. - # return self.radius, self.pad, self.step, self.hsf - - @property - def radius(self): - return self._radius - - @radius.setter - def radius(self, now_radius): - # self.prev_radius=self._radius - self._radius = now_radius - self.pad = 2 * now_radius - self.hsf = now_radius - - @property - def step(self): - return self._step - - @step.setter - def step(self, now_step): - # self.prev_step=self.step - self._step = now_step - - @property - def hsf(self): - return self._hsf - - @hsf.setter - def hsf(self, now_radius): - self._hsf = HaarSurroundFeature(now_radius) - - -class HaarSurroundFeature: - - def __init__(self, r_inner, r_outer=None, val=None): - if r_outer is None: - r_outer = r_inner * 3 - # print(r_outer) - r_inner2 = r_inner * r_inner - count_inner = r_inner2 - count_outer = r_outer * r_outer - r_inner2 - - if val is None: - val_inner = 1.0 / r_inner2 - val_outer = -val_inner * count_inner / count_outer - - else: - val_inner = val[0] - val_outer = val[1] - - self.val_in = np.array(val_inner, dtype=np.float64) - self.val_out = np.array(val_outer, dtype=np.float64) - self.r_in = r_inner - self.r_out = r_outer - - def get_kernel(self): - # Defined here, but not yet used? - # Create a kernel filled with the value of self.val_out - kernel = np.ones(shape=(2 * self.r_out - 1, 2 * self.r_out - 1), dtype=np.float64) * self.val_out - - # Set the values of the inner area of the kernel using array slicing - start = (self.r_out - self.r_in) - end = (self.r_out + self.r_in - 1) - kernel[start:end, start:end] = self.val_in - - return kernel - - -def to_gray(frame): - # Faster by quitting checking if the input image is already grayscale - # Perhaps it would be faster with less overhead to call cv2.cvtColor directly instead of using this function - return cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) - - -@lru_cache(maxsize=lru_maxsize_vs) -def frameint_get_xy_step(imageshape, xysteps, pad, start_offset=None, end_offset=None): - """ - :param imageshape: (height(row),width(col)). row==y,cal==x - :param xysteps: (x,y) - :param pad: int - :param start_offset: (x,y) or None - :param end_offset: (x,y) or None - :return: xy_np:tuple(x,y) - """ - row, col = imageshape - row -= 1 - col -= 1 - x_step, y_step = xysteps - - # This is not beautiful. - start_pad_x = start_pad_y = end_pad_x = end_pad_y = pad - - if start_offset is not None: - start_pad_x += start_offset[0] - start_pad_y += start_offset[1] - if end_offset is not None: - end_pad_x += end_offset[0] - end_pad_y += end_offset[1] - y_np = np.arange(start_pad_y, row - end_pad_y, y_step) - x_np = np.arange(start_pad_x, col - end_pad_x, x_step) - - xy_np = (x_np, y_np) - - return xy_np - - -@lru_cache(maxsize=lru_maxsize_vvs) -def get_hsf_empty_array(len_syx, frameint_x, frame_int_dtype, fcshape): - # Function to reduce array allocation by providing an empty array first and recycling it with lru - inner_sum = np.empty(len_syx, dtype=frame_int_dtype) - outer_sum = np.empty(len_syx, dtype=frame_int_dtype) - p_temp = np.empty((len_syx[0], frameint_x), dtype=frame_int_dtype) - p00 = np.empty(len_syx, dtype=frame_int_dtype) - p11 = np.empty(len_syx, dtype=frame_int_dtype) - p01 = np.empty(len_syx, dtype=frame_int_dtype) - p10 = np.empty(len_syx, dtype=frame_int_dtype) - response_list = np.empty(len_syx, dtype=np.float64) - frame_conv = np.zeros(shape=fcshape[0], dtype=np.uint8) - frame_conv_stride = frame_conv[::fcshape[1], ::fcshape[2]] - return (inner_sum, outer_sum), p_temp, (p00, p11, p01, p10), response_list, (frame_conv, frame_conv_stride) - - -# @profile -def conv_int(frame_int, kernel, xy_step, padding, xy_steps_list): - """ - :param frame_int: - :param kernel: hsf - :param step: (x,y) - :param padding: int - :return: - """ - row, col = frame_int.shape - row -= 1 - col -= 1 - x_step, y_step = xy_step - # padding2 = 2 * padding - f_shape = row - 2 * padding, col - 2 * padding - r_in = kernel.r_in - - len_sx, len_sy = len(xy_steps_list[0]), len(xy_steps_list[1]) - inout_sum, p_temp, p_list, response_list, frameconvlist = get_hsf_empty_array((len_sy, len_sx), col + 1, - frame_int.dtype, (f_shape, y_step, x_step)) - inner_sum, outer_sum = inout_sum - p00, p11, p01, p10 = p_list - frame_conv, frame_conv_stride = frameconvlist - - y_rin_m = xy_steps_list[1] - r_in - x_rin_m = xy_steps_list[0] - r_in - y_rin_p = xy_steps_list[1] + r_in - x_rin_p = xy_steps_list[0] + r_in - # xx==(y,x),m==MINUS,p==PLUS, ex: mm==(y-,x-) - inarr_mm = frame_int[y_rin_m[0]:y_rin_m[-1] + 1:y_step, x_rin_m[0]:x_rin_m[-1] + 1:x_step] - inarr_mp = frame_int[y_rin_m[0]:y_rin_m[-1] + 1:y_step, x_rin_p[0]:x_rin_p[-1] + 1:x_step] - inarr_pm = frame_int[y_rin_p[0]:y_rin_p[-1] + 1:y_step, x_rin_m[0]:x_rin_m[-1] + 1:x_step] - inarr_pp = frame_int[y_rin_p[0]:y_rin_p[-1] + 1:y_step, x_rin_p[0]:x_rin_p[-1] + 1:x_step] - - # == inarr_mm + inarr_pp - inarr_mp - inarr_pm - inner_sum[:, :] = inarr_mm - inner_sum += inarr_pp - inner_sum -= inarr_mp - inner_sum -= inarr_pm - - # Bottleneck here, I want to make it smarter. Someone do it. - # (y,x) - # p00=max(y_ro_m,0),max(x_ro_m,0) - # p11=min(y_ro_p,ylim),min(x_ro_p,xlim) - # p01=max(y_ro_m,0),min(x_ro_p,xlim) - # p10=min(y_ro_p,ylim),max(x_ro_m,0) - y_ro_m = xy_steps_list[1] - kernel.r_out - x_ro_m = xy_steps_list[0] - kernel.r_out - y_ro_p = xy_steps_list[1] + kernel.r_out - x_ro_p = xy_steps_list[0] + kernel.r_out - # p00 calc - np.take(frame_int, y_ro_m, axis=0, mode="clip", out=p_temp) - np.take(p_temp, x_ro_m, axis=1, mode="clip", out=p00) - # p01 calc - np.take(p_temp, x_ro_p, axis=1, mode="clip", out=p01) - # p11 calc - np.take(frame_int, y_ro_p, axis=0, mode="clip", out=p_temp) - np.take(p_temp, x_ro_p, axis=1, mode="clip", out=p11) - # p10 calc - np.take(p_temp, x_ro_m, axis=1, mode="clip", out=p10) - # the point is this - # p00=np.take(np.take(frame_int, y_ro_m, axis=0, mode="clip"), x_ro_m, axis=1, mode="clip") - # p11=np.take(np.take(frame_int, y_ro_p, axis=0, mode="clip"), x_ro_p, axis=1, mode="clip") - # p01=np.take(np.take(frame_int, y_ro_m, axis=0, mode="clip"), x_ro_p, axis=1, mode="clip") - # p10=np.take(np.take(frame_int, y_ro_p, axis=0, mode="clip"), x_ro_m, axis=1, mode="clip") - - outer_sum[:, :] = p00 + p11 - p01 - p10 - inner_sum - - np.multiply(kernel.val_in, inner_sum, dtype=np.float64, out=response_list) - response_list += kernel.val_out * outer_sum - - # min_response, max_val, min_loc, max_loc = cv2.minMaxLoc(response_list) - min_response, _, min_loc, _ = cv2.minMaxLoc(response_list) - - center = ((xy_steps_list[0][min_loc[0]] - padding), (xy_steps_list[1][min_loc[1]] - padding)) - - frame_conv_stride[:, :] = response_list - # or - # frame_conv_stride[:, :] = response_list.astype(np.uint8) - - return frame_conv, min_response, center - - -class Auto_Radius_Calc(object): - def __init__(self): - self.response_list = [] - self.radius_cand_list = [] - self.adj_comp_flag = False - - self.radius_middle_index = None - - self.left_item = None - self.right_item = None - self.left_index = None - self.right_index = None - - def get_radius(self): - prev_res_len = len(self.response_list) - # adjustment of radius - if prev_res_len == 1: - # len==1==response_list==[default_radius] - self.adj_comp_flag = False - return auto_radius_range[0] - elif prev_res_len == 2: - # len==2==response_list==[default_radius, auto_radius_range[0]] - self.adj_comp_flag = False - return auto_radius_range[1] - elif prev_res_len == 3: - # len==3==response_list==[default_radius,auto_radius_range[0],auto_radius_range[1]] - if self.response_list[1][1] < self.response_list[2][1]: - self.left_item = self.response_list[1] - self.right_item = self.response_list[0] - else: - self.left_item = self.response_list[0] - self.right_item = self.response_list[2] - self.radius_cand_list = [i for i in range(self.left_item[0], self.right_item[0] + auto_radius_step, auto_radius_step)] - self.left_index = 0 - self.right_index = len(self.radius_cand_list) - 1 - self.radius_middle_index = (self.left_index + self.right_index) // 2 - self.adj_comp_flag = False - return self.radius_cand_list[self.radius_middle_index] - else: - if self.left_index <= self.right_index and self.left_index != self.radius_middle_index: - if (self.left_item[1] + self.response_list[-1][1]) < (self.right_item[1] + self.response_list[-1][1]): - self.right_item = self.response_list[-1] - self.right_index = self.radius_middle_index - 1 - self.radius_middle_index = (self.left_index + self.right_index) // 2 - self.adj_comp_flag = False - return self.radius_cand_list[self.radius_middle_index] - if (self.left_item[1] + self.response_list[-1][1]) > (self.right_item[1] + self.response_list[-1][1]): - self.left_item = self.response_list[-1] - self.left_index = self.radius_middle_index + 1 - self.radius_middle_index = (self.left_index + self.right_index) // 2 - self.adj_comp_flag = False - return self.radius_cand_list[self.radius_middle_index] - self.adj_comp_flag = True - return self.radius_cand_list[self.radius_middle_index] - - def get_radius_base(self): - """ - Use it when the new version doesn't work well. - :return: - """ - - prev_res_len = len(self.response_list) - # adjustment of radius - if prev_res_len == 1: - # len==1==response_list==[default_radius] - self.adj_comp_flag = False - return auto_radius_range[0] - elif prev_res_len == 2: - # len==2==response_list==[default_radius, auto_radius_range[0]] - self.adj_comp_flag = False - return auto_radius_range[1] - elif prev_res_len == 3: - # len==3==response_list==[default_radius,auto_radius_range[0],auto_radius_range[1]] - sort_res = sorted(self.response_list, key=lambda x: x[1])[0] - # Extract the radius with the lowest response value - if sort_res[0] == default_radius: - # If the default value is best, change now_mode to init after setting radius to the default value. - self.adj_comp_flag = True - return default_radius - elif sort_res[0] == auto_radius_range[0]: - self.radius_cand_list = [i for i in range(auto_radius_range[0], default_radius, auto_radius_step)][1:] - self.adj_comp_flag = False - return self.radius_cand_list.pop() - else: - self.radius_cand_list = [i for i in range(default_radius, auto_radius_range[1], auto_radius_step)][1:] - self.adj_comp_flag = False - return self.radius_cand_list.pop() - else: - # Try the contents of the radius_cand_list in order until the radius_cand_list runs out - # Better make it a binary search. - if len(self.radius_cand_list) == 0: - sort_res = sorted(self.response_list, key=lambda x: x[1])[0] - self.adj_comp_flag = True - return sort_res[0] - else: - self.adj_comp_flag = False - return self.radius_cand_list.pop() - - def add_response(self, radius, response): - self.response_list.append((radius, response)) - return None - - -class Blink_Detector(object): - def __init__(self): - self.response_list = [] - self.response_max = None - self.enable_detect_flg = False - self.quartile_1 = None - - def calc_thresh(self): - # Calculate response_max by computing interquartile range, IQR - # self.response_listo = np.array(self.response_listo) - # 25%,75% - # This value may need to be adjusted depending on the environment. - # quartile_1, quartile_3 = np.percentile(self.response_listo, [25, 75]) - # iqr = quartile_3 - quartile_1 - # self.response_maxo = quartile_3 + (iqr * 1.5) - - # quartile_1, quartile_3 = np.percentile(self.response_list, [25, 75]) - # or - quartile_1, quartile_3 = np.percentile(np.array(self.response_list), [25, 75]) - self.quartile_1 = quartile_1 - iqr = quartile_3 - quartile_1 - # response_min = quartile_1 - (iqr * 1.5) - - self.response_max = float(quartile_3 + (iqr * 1.5)) - # or - # self.response_max = quartile_3 + (iqr * 1.5) - - self.enable_detect_flg = True - return None - - def detect(self, now_response): - return now_response > self.response_max - - def add_response(self, response): - self.response_list.append(response) - return None - - def response_len(self): - return len(self.response_list) - - -class CenterCorrection(object): - def __init__(self): - # Tunable parameters - kernel_size = 7 # 3 or 5 or 7 - self.hist_thr = float(4) # 4% - self.center_q1_radius = 20 - - self.setup_comp = False - self.quartile_1 = None - self.radius = None - self.frame_shape = None - self.frame_mask = None - self.frame_bin = None - self.frame_final = None - self.morph_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_size, kernel_size)) - self.morph_kernel2 = np.ones((3, 3)) - self.hist_index = np.arange(256) - self.hist = np.empty((256, 1)) - self.hist_norm = np.empty((256, 1)) - - def init_array(self, gray_shape, quartile_1, radius): - self.frame_shape = gray_shape - self.frame_mask = np.empty(gray_shape, dtype=np.uint8) - self.frame_bin = np.empty(gray_shape, dtype=np.uint8) - self.frame_final = np.empty(gray_shape, dtype=np.uint8) - self.quartile_1 = quartile_1 - self.radius = radius - self.setup_comp = True - - # def reset_array(self): - # self.frame_mask.fill(0) - - def correction(self, gray_frame, orig_x, orig_y): - center_x, center_y = orig_x, orig_y - self.frame_mask.fill(0) - - # cv2.circle(self.frame_mask, center=(center_x, center_y), radius=int(self.radius * 2), color=255, thickness=-1) - - # bottleneck - cv2.calcHist([gray_frame], [0], None, [256], [0, 256], hist=self.hist) - - cv2.normalize(self.hist, self.hist_norm, alpha=100.0, norm_type=cv2.NORM_L1) - hist_per = self.hist_norm.cumsum() - hist_index_list = self.hist_index[hist_per >= self.hist_thr] - frame_thr = hist_index_list[0] if len(hist_index_list) else np.percentile(cv2.bitwise_or(255 - self.frame_mask, gray_frame), 4) - - # bottleneck - self.frame_bin = cv2.threshold(gray_frame, frame_thr, 1, cv2.THRESH_BINARY_INV)[1] - cropped_x, cropped_y, cropped_w, cropped_h = cv2.boundingRect(self.frame_bin) - - self.frame_final = cv2.bitwise_and(self.frame_bin, self.frame_mask) - - # bottleneck - self.frame_final = cv2.morphologyEx(self.frame_final, cv2.MORPH_CLOSE, self.morph_kernel) - self.frame_final = cv2.morphologyEx(self.frame_final, cv2.MORPH_OPEN, self.morph_kernel) - - if (cropped_h, cropped_w) == self.frame_shape: - # Not detected. - base_x, base_y = center_x, center_y - else: - base_x = cropped_x + cropped_w // 2 - base_y = cropped_y + cropped_h // 2 - if self.frame_final[base_y, base_x] != 1: - if self.frame_final[center_y, center_x] != 1: - self.frame_final = cv2.morphologyEx(self.frame_final, cv2.MORPH_DILATE, self.morph_kernel2, iterations=3) - else: - base_x, base_y = center_x, center_y - - contours, _ = cv2.findContours(self.frame_final, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) - contours_box = [cv2.boundingRect(cnt) for cnt in contours] - contours_dist = np.array( - [abs(base_x - (cnt_x + cnt_w / 2)) + abs(base_y - (cnt_y + cnt_h / 2)) for cnt_x, cnt_y, cnt_w, cnt_h in contours_box]) - - if len(contours_box): - cropped_x2, cropped_y2, cropped_w2, cropped_h2 = contours_box[contours_dist.argmin()] - x = cropped_x2 + cropped_w2 // 2 - y = cropped_y2 + cropped_h2 // 2 - else: - x = center_x - y = center_y - - # if imshow_enable: - # cv2.circle(frame, (orig_x, orig_y), 10, (255, 0, 0), -1) - # cv2.circle(frame, (x, y), 7, (0, 0, 255), -1) - - # - # out_x = center_x if abs(x - center_x) > radius else x - # out_y = center_y if abs(y - center_y) > radius else y - out_x, out_y = orig_x, orig_y - if gray_frame[int(max(y - 5, 0)):int(min(y + 5, self.frame_shape[0])), - int(max(x - 5, 0)):int(min(x + 5, self.frame_shape[1]))].min() < self.quartile_1: - out_x = x - out_y = y - - # if imshow_enable: - # cv2.circle(frame, (out_x, out_y), 5, (0, 255, 0), -1) - # - # cv2.imshow("frame_bin", self.frame_bin * 255) - # cv2.imshow("frame_final", self.frame_final * 255) - return out_x, out_y - - -#RANSAC - - -video_path = "demo2.mp4" -imshow_enable = True -save_video = False - -thresh_add = 10 - - -class TimeitResult(object): - """ - from https://github.com/ipython/ipython/blob/339c0d510a1f3cb2158dd8c6e7f4ac89aa4c89d8/IPython/core/magics/execution.py#L55 - - Object returned by the timeit magic with info about the run. - Contains the following attributes : - loops: (int) number of loops done per measurement - repeat: (int) number of times the measurement has been repeated - best: (float) best execution time / number - all_runs: (list of float) execution time of each run (in s) - """ - - def __init__(self, loops, repeat, best, worst, all_runs, precision): - self.loops = loops - self.repeat = repeat - self.best = best - self.worst = worst - self.all_runs = all_runs - self._precision = precision - self.timings = [dt / self.loops for dt in all_runs] - - @property - def average(self): - return math.fsum(self.timings) / len(self.timings) - - @property - def stdev(self): - mean = self.average - return (math.fsum([(x - mean) ** 2 for x in self.timings]) / len(self.timings)) ** 0.5 - - def __str__(self): - pm = '+-' - if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding: - try: - u'\xb1'.encode(sys.stdout.encoding) - pm = u'\xb1' - except: - pass - return "min:{best} max:{worst} mean:{mean} {pm} {std} per loop (mean {pm} std. dev. of {runs} run{run_plural}, {loops:,} loop{loop_plural} each)".format( - pm=pm, - runs=self.repeat, - loops=self.loops, - loop_plural="" if self.loops == 1 else "s", - run_plural="" if self.repeat == 1 else "s", - mean=format_time(self.average, self._precision), - std=format_time(self.stdev, self._precision), - best=format_time(self.best, self._precision), - worst=format_time(self.worst, self._precision), - ) - - def _repr_pretty_(self, p, cycle): - unic = self.__str__() - p.text(u'') - - -class FPSResult(object): - """ - base https://github.com/ipython/ipython/blob/339c0d510a1f3cb2158dd8c6e7f4ac89aa4c89d8/IPython/core/magics/execution.py#L55 - """ - - def __init__(self, loops, repeat, best, worst, all_runs, precision): - self.loops = loops - self.repeat = repeat - self.best = 1 / best - self.worst = 1 / worst - self.all_runs = all_runs - self._precision = precision - self.fps = [1 / dt for dt in all_runs] - self.unit = "fps" - - @property - def average(self): - return math.fsum(self.fps) / len(self.fps) - - @property - def stdev(self): - mean = self.average - return (math.fsum([(x - mean) ** 2 for x in self.fps]) / len(self.fps)) ** 0.5 - - def __str__(self): - pm = '+-' - if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding: - try: - u'\xb1'.encode(sys.stdout.encoding) - pm = u'\xb1' - except: - pass - return "min:{best} max:{worst} mean:{mean} {pm} {std} per loop (mean {pm} std. dev. of {runs} run{run_plural}, {loops:,} loop{loop_plural} each)".format( - pm=pm, - runs=self.repeat, - loops=self.loops, - loop_plural="" if self.loops == 1 else "s", - run_plural="" if self.repeat == 1 else "s", - mean="%.*g%s" % (self._precision, self.average, self.unit), - std="%.*g%s" % (self._precision, self.stdev, self.unit), - best="%.*g%s" % (self._precision, self.best, self.unit), - worst="%.*g%s" % (self._precision, self.worst, self.unit), - ) - - def _repr_pretty_(self, p, cycle): - unic = self.__str__() - p.text(u'') - - -def format_time(timespan, precision=3): - """ - https://github.com/ipython/ipython/blob/339c0d510a1f3cb2158dd8c6e7f4ac89aa4c89d8/IPython/core/magics/execution.py#L1473 - Formats the timespan in a human readable form - """ - - if timespan >= 60.0: - # we have more than a minute, format that in a human readable form - # Idea from http://snipplr.com/view/5713/ - parts = [("d", 60 * 60 * 24), ("h", 60 * 60), ("min", 60), ("s", 1)] - time = [] - leftover = timespan - for suffix, length in parts: - value = int(leftover / length) - if value > 0: - leftover = leftover % length - time.append(u'%s%s' % (str(value), suffix)) - if leftover < 1: - break - return " ".join(time) - - # Unfortunately the unicode 'micro' symbol can cause problems in - # certain terminals. - # See bug: https://bugs.launchpad.net/ipython/+bug/348466 - # Try to prevent crashes by being more secure than it needs to - # E.g. eclipse is able to print a µ, but has no sys.stdout.encoding set. - units = [u"s", u"ms", u'us', "ns"] # the save value - if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding: - try: - u'\xb5'.encode(sys.stdout.encoding) - units = [u"s", u"ms", u'\xb5s', "ns"] - except: - pass - scaling = [1, 1e3, 1e6, 1e9] - - if timespan > 0.0: - order = min(-int(math.floor(math.log10(timespan)) // 3), 3) - else: - order = 3 - return u"%.*g %s" % (precision, timespan * scaling[order], units[order]) - def ellipse_model(data, y, f): """ @@ -835,59 +56,85 @@ def ellipse_model(data, y, f): # @profile -def fit_rotated_ellipse_ransac(data: np.ndarray, rng: np.random.Generator, iter=100, sample_num=10, offset=80 # 80.0, 10, 80 - ): # before changing these values, please read up on the ransac algorithm +def fit_rotated_ellipse_ransac( + data: np.ndarray, + rng: np.random.Generator, + iter=100, + sample_num=10, + offset=80, # 80.0, 10, 80 +): # before changing these values, please read up on the ransac algorithm # However if you want to change any value just know that higher iterations will make processing frames slower effective_sample = None - + # The array contents do not change during the loop, so only one call is needed. # They say len is faster than shape. # Reference url: https://stackoverflow.com/questions/35547853/what-is-faster-python3s-len-or-numpys-shape len_data = len(data) - + if len_data < sample_num: return None - + # Type of calculation result ret_dtype = np.float64 - + # Sorts a random number array of size (iter,len_data). After sorting, returns the index of sample_num random numbers before sorting. # If the array size is less than about 100, this is faster than rng.choice. rng_sample = rng.random((iter, len_data)).argsort()[:, :sample_num] # or # I don't see any advantage to doing this. # rng_sample = np.asarray(rng.random((iter, len_data)).argsort()[:, :sample_num], dtype=np.int32) - + # I don't think it looks beautiful. # x,y,x**2,y**2,x*y,1,-1*x**2 datamod = np.concatenate( - [data, data ** 2, (data[:, 0] * data[:, 1])[:, np.newaxis], np.ones((len_data, 1), dtype=ret_dtype), - (-1 * data[:, 0] ** 2)[:, np.newaxis]], axis=1, - dtype=ret_dtype) - + [ + data, + data**2, + (data[:, 0] * data[:, 1])[:, np.newaxis], + np.ones((len_data, 1), dtype=ret_dtype), + (-1 * data[:, 0] ** 2)[:, np.newaxis], + ], + axis=1, + dtype=ret_dtype, + ) + datamod_slim = np.array(datamod[:, :5], dtype=ret_dtype) - + datamod_rng = datamod[rng_sample] datamod_rng6 = datamod_rng[:, :, 6] datamod_rng_swap = datamod_rng[:, :, [4, 3, 0, 1, 5]] datamod_rng_swap_trans = datamod_rng_swap.transpose((0, 2, 1)) - + # These two lines are one of the bottlenecks datamod_rng_5x5 = np.matmul(datamod_rng_swap_trans, datamod_rng_swap) - datamod_rng_p5smp = np.matmul(np.linalg.inv(datamod_rng_5x5), datamod_rng_swap_trans) - - datamod_rng_p = np.matmul(datamod_rng_p5smp, datamod_rng6[:, :, np.newaxis]).reshape((-1, 5)) - + datamod_rng_p5smp = np.matmul( + np.linalg.inv(datamod_rng_5x5), datamod_rng_swap_trans + ) + + datamod_rng_p = np.matmul( + datamod_rng_p5smp, datamod_rng6[:, :, np.newaxis] + ).reshape((-1, 5)) + # I don't think it looks beautiful. ellipse_y_arr = np.asarray( - [datamod_rng_p[:, 2], datamod_rng_p[:, 3], np.ones(len(datamod_rng_p)), datamod_rng_p[:, 1], datamod_rng_p[:, 0]], dtype=ret_dtype) - - ellipse_data_arr = ellipse_model(datamod_slim, ellipse_y_arr, np.asarray(datamod_rng_p[:, 4])).transpose((1, 0)) + [ + datamod_rng_p[:, 2], + datamod_rng_p[:, 3], + np.ones(len(datamod_rng_p)), + datamod_rng_p[:, 1], + datamod_rng_p[:, 0], + ], + dtype=ret_dtype, + ) + + ellipse_data_arr = ellipse_model( + datamod_slim, ellipse_y_arr, np.asarray(datamod_rng_p[:, 4]) + ).transpose((1, 0)) ellipse_data_abs = np.abs(ellipse_data_arr) ellipse_data_index = np.argmax(np.sum(ellipse_data_abs < offset, axis=1), axis=0) effective_data_arr = ellipse_data_arr[ellipse_data_index] effective_sample_p_arr = datamod_rng_p[ellipse_data_index] - + return fit_rotated_ellipse(effective_data_arr, effective_sample_p_arr) @@ -903,60 +150,62 @@ def fit_rotated_ellipse(data, P): theta = 0.5 * np.arctan(b / (a - c), dtype=np.float64) theta_sin = np.sin(theta, dtype=np.float64) theta_cos = np.cos(theta, dtype=np.float64) - tc2 = theta_cos ** 2 - ts2 = theta_sin ** 2 + tc2 = theta_cos**2 + ts2 = theta_sin**2 b_tcs = b * theta_cos * theta_sin - + # Do the calculation only once - cxy = b ** 2 - 4 * a * c + cxy = b**2 - 4 * a * c cx = (2 * c * d - b * e) / cxy cy = (2 * a * e - b * d) / cxy - + # I just want to clear things up around here. - cu = a * cx ** 2 + b * cx * cy + c * cy ** 2 - f + cu = a * cx**2 + b * cx * cy + c * cy**2 - f cu_r = np.array([(a * tc2 + b_tcs + c * ts2), (a * ts2 - b_tcs + c * tc2)]) wh = np.sqrt(cu / cu_r) - + w, h = wh[0], wh[1] - + error_sum = np.sum(data) # print("fitting error = %.3f" % (error_sum)) - + return (cx, cy, w, h, theta) - - - - - +# temporary name class HSRAC_cls(object): def __init__(self): # I'd like to take into account things like print, end_time - start_time processing time, etc., but it's too much trouble. - + # For measuring total processing time - + self.main_start_time = timeit.default_timer() - + self.rng = np.random.default_rng() self.cvparam = CvParameters(default_radius, default_step) - + self.cv_modeo = ["first_frame", "radius_adjust", "blink_adjust", "normal"] self.now_modeo = self.cv_modeo[0] - - self.auto_radius_calc = Auto_Radius_Calc() - self.blink_detector = Blink_Detector() - self.center_q1 = Blink_Detector() + + self.auto_radius_calc = AutoRadiusCalc() + self.blink_detector = BlinkDetector() + self.center_q1 = BlinkDetector() self.center_correct = CenterCorrection() - + self.cap = None - - self.timedict = {"to_gray": [], "int_img": [], "conv_int": [], "crop": [], "total_cv": []} + + self.timedict = { + "to_gray": [], + "int_img": [], + "conv_int": [], + "crop": [], + "total_cv": [], + } # ransac self.rng = np.random.default_rng() self.kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) - + def open_video(self, video_path): # Temporary implementation to run cap = cv2.VideoCapture(video_path) @@ -964,7 +213,7 @@ class HSRAC_cls(object): raise IOError("Error opening video stream or file") self.cap = cap return True - + def read_frame(self): # Temporary implementation to run if not self.cap.isOpened(): @@ -975,49 +224,54 @@ class HSRAC_cls(object): self.current_image_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) return True return False - + def single_run(self): # Temporary implementation to run - + ## default_radius = 14 - + frame = self.current_image_gray - frame = cv2.copyMakeBorder(frame, 21, 21, 21, 21, cv2.BORDER_CONSTANT, value=[255, 255, 255]) # add a border to prevent overcropping the image. - + if self.now_modeo == self.cv_modeo[1]: # adjustment of radius - + # debug print # if calc_print_enable: # temp_radius = self.auto_radius_calc.get_radius() # print('Now radius:', temp_radius) # self.cvparam.radius = temp_radius - + self.cvparam.radius = self.auto_radius_calc.get_radius() if self.auto_radius_calc.adj_comp_flag: - self.now_modeo = self.cv_modeo[2] if not skip_blink_detect else self.cv_modeo[3] - + self.now_modeo = ( + self.cv_modeo[2] if not skip_blink_detect else self.cv_modeo[3] + ) + radius, pad, step, hsf = self.cvparam.get_rpsh() - + # For measuring processing time of image processing cv_start_time = timeit.default_timer() - + gray_frame = frame self.timedict["to_gray"].append(timeit.default_timer() - cv_start_time) - + # Calculate the integral image of the frame int_start_time = timeit.default_timer() # BORDER_CONSTANT is faster than BORDER_REPLICATE There seems to be almost no negative impact when BORDER_CONSTANT is used. - frame_pad = cv2.copyMakeBorder(gray_frame, pad, pad, pad, pad, cv2.BORDER_CONSTANT) + frame_pad = cv2.copyMakeBorder( + gray_frame, pad, pad, pad, pad, cv2.BORDER_CONSTANT + ) frame_int = cv2.integral(frame_pad) self.timedict["int_img"].append(timeit.default_timer() - int_start_time) - + # Convolve the feature with the integral image conv_int_start_time = timeit.default_timer() - xy_step = frameint_get_xy_step(frame_int.shape, step, pad, start_offset=None, end_offset=None) + xy_step = frameint_get_xy_step( + frame_int.shape, step, pad, start_offset=None, end_offset=None + ) frame_conv, response, center_xy = conv_int(frame_int, hsf, step, pad, xy_step) self.timedict["conv_int"].append(timeit.default_timer() - conv_int_start_time) - + crop_start_time = timeit.default_timer() # Define the center point and radius center_x, center_y = center_xy @@ -1025,10 +279,10 @@ class HSRAC_cls(object): lower_x = center_x - radius upper_y = center_y + radius lower_y = center_y - radius - + # Crop the image using the calculated bounds - cropped_image = gray_frame[lower_y:upper_y, lower_x:upper_x] - + cropped_image = safe_crop(gray_frame, lower_x, lower_y, upper_x, upper_y) + if self.now_modeo == self.cv_modeo[0] or self.now_modeo == self.cv_modeo[1]: # If mode is first_frame or radius_adjust, record current radius and response self.auto_radius_calc.add_response(radius, response) @@ -1036,15 +290,19 @@ class HSRAC_cls(object): # Statistics for blink detection if self.blink_detector.response_len() < blink_init_frames: self.blink_detector.add_response(cv2.mean(cropped_image)[0]) - + upper_x = center_x + self.center_correct.center_q1_radius lower_x = center_x - self.center_correct.center_q1_radius upper_y = center_y + self.center_correct.center_q1_radius lower_y = center_y - self.center_correct.center_q1_radius - self.center_q1.add_response(cv2.mean(gray_frame[lower_y:upper_y, lower_x:upper_x])[0]) - + self.center_q1.add_response( + cv2.mean(safe_crop(gray_frame, lower_x, lower_y, upper_x, upper_y))[ + 0 + ] + ) + else: - + self.blink_detector.calc_thresh() self.center_q1.calc_thresh() self.now_modeo = self.cv_modeo[3] @@ -1061,11 +319,15 @@ class HSRAC_cls(object): # blink pass else: - # pass + # pass if not self.center_correct.setup_comp: - self.center_correct.init_array(gray_frame.shape, self.center_q1.quartile_1, radius) - - center_x, center_y = self.center_correct.correction(gray_frame, center_x, center_y) + self.center_correct.init_array( + gray_frame.shape, self.center_q1.quartile_1, radius + ) + + center_x, center_y = self.center_correct.correction( + gray_frame, center_x, center_y + ) # Define the center point and radius center_xy = (center_x, center_y) upper_x = center_x + radius @@ -1073,35 +335,39 @@ class HSRAC_cls(object): upper_y = center_y + radius lower_y = center_y - radius # Crop the image using the calculated bounds - cropped_image = gray_frame[lower_y:upper_y, lower_x:upper_x] - # if imshow_enable or save_video: - # cv2.circle(frame, (orig_x, orig_y), 6, (0, 0, 255), -1) - # cv2.circle(frame, (center_x, center_y), 3, (255, 0, 0), -1) + cropped_image = safe_crop( + gray_frame, lower_x, lower_y, upper_x, upper_y + ) + # if imshow_enable or save_video: + # cv2.circle(frame, (orig_x, orig_y), 6, (0, 0, 255), -1) + # cv2.circle(frame, (center_x, center_y), 3, (255, 0, 0), -1) # If you want to update response_max. it may be more cost-effective to rewrite response_list in the following way # https://stackoverflow.com/questions/42771110/fastest-way-to-left-cycle-a-numpy-array-like-pop-push-for-a-queue - + cv_end_time = timeit.default_timer() self.timedict["crop"].append(cv_end_time - crop_start_time) self.timedict["total_cv"].append(cv_end_time - cv_start_time) - - # if calc_print_enable: - # the lower the response the better the likelyhood of there being a pupil. you can adujst the radius and steps accordingly + + # if calc_print_enable: + # the lower the response the better the likelyhood of there being a pupil. you can adujst the radius and steps accordingly # print('Kernel response:', response) - # print('Pixel position:', center_xy) - - + # print('Pixel position:', center_xy) + if imshow_enable: - if self.now_modeo != self.cv_modeo[0] and self.now_modeo != self.cv_modeo[1]: + if ( + self.now_modeo != self.cv_modeo[0] + and self.now_modeo != self.cv_modeo[1] + ): if 0 in cropped_image.shape: # If shape contains 0, it is not detected well. pass else: - + cv2.imshow("crop", cropped_image) cv2.imshow("frame", frame) if cv2.waitKey(1) & 0xFF == ord("q"): pass - + if self.now_modeo == self.cv_modeo[0]: # Moving from first_frame to the next mode if skip_autoradius and skip_blink_detect: @@ -1110,14 +376,13 @@ class HSRAC_cls(object): self.now_modeo = self.cv_modeo[2] else: self.now_modeo = self.cv_modeo[1] - newFrame2 = frame.copy() - #frame = cropped_image + # frame = cropped_image # For measuring processing time of image processing cv_start_time = timeit.default_timer() # Crop first to reduce the amount of data to process. - # frame = cropped_image[0:len(cropped_image) - 10, :] + # frame = cropped_image[0:len(cropped_image) - 10, :] # To reduce the processing data, first convert to 1-channel and then blur. # The processing results were the same when I swapped the order of blurring and 1-channelization. frame_gray = cv2.GaussianBlur(frame, (5, 5), 0) @@ -1128,21 +393,21 @@ class HSRAC_cls(object): lower_y = center_y - 20 # Crop the image using the calculated bounds - - frame_gray = frame_gray[lower_y:upper_y, lower_x:upper_x] + + frame_gray = safe_crop(frame_gray, lower_x, lower_y, upper_x, upper_y) frame = frame_gray # this will need to be adjusted everytime hardware is changed (brightness of IR, Camera postion, etc)m min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(frame_gray) - + maxloc0_hf, maxloc1_hf = int(0.5 * max_loc[0]), int(0.5 * max_loc[1]) - + # crop 15% sqare around min_loc - # frame_gray = frame_gray[max_loc[1] - maxloc1_hf:max_loc[1] + maxloc1_hf, + # frame_gray = frame_gray[max_loc[1] - maxloc1_hf:max_loc[1] + maxloc1_hf, # max_loc[0] - maxloc0_hf:max_loc[0] + maxloc0_hf] - + threshold_value = min_val + thresh_add _, thresh = cv2.threshold(frame_gray, threshold_value, 255, cv2.THRESH_BINARY) - # print(thresh.shape, frame_gray.shape) + # print(thresh.shape, frame_gray.shape) try: opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, self.kernel) closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, self.kernel) @@ -1161,10 +426,10 @@ class HSRAC_cls(object): # If empty, go to next loop pass try: - + cnt = sorted(hull, key=cv2.contourArea) maxcnt = cnt[-1] - + # ellipse = cv2.fitEllipse(maxcnt) ransac_data = fit_rotated_ellipse_ransac(maxcnt.reshape(-1, 2), self.rng) if ransac_data is None: @@ -1172,55 +437,62 @@ class HSRAC_cls(object): # go to next loop pass - crop_start_time = timeit.default_timer() cx, cy, w, h, theta = ransac_data print(cx, cy) - if w >= 2.1 * h: #new blink detection algo lmao this works pretty good actually + if ( + w >= 2.1 * h + ): # new blink detection algo lmao this works pretty good actually print("RAN BLINK") - #return center_x, center_y, frame, frame, True + # return center_x, center_y, frame, frame, True - csx = frame.shape[0] - csy = frame.shape[1] - - #cx = center_x - (csx - cx) # we find the difference between the crop size and ransac point, and subtract from the center point from HSF - # cy = center_y - (csy - cy) - - cx = (cx - 20) + center_x - cy = (cy - 20) + center_y + csy = frame.shape[0] + csx = frame.shape[1] + # cx = center_x - (csx - cx) # we find the difference between the crop size and ransac point, and subtract from the center point from HSF + # cy = center_y - (csy - cy) + cx = clamp((cx - 20) + center_x, 0, csx) + cy = clamp((cy - 20) + center_y, 0, csy) cv_end_time = timeit.default_timer() if imshow_enable or save_video: cv2.drawContours(frame_gray, contours, -1, (255, 0, 0), 1) cv2.circle(frame_gray, (cx, cy), 2, (0, 0, 255), -1) # cx1, cy1, w1, h1, theta1 = fit_rotated_ellipse(maxcnt.reshape(-1, 2)) - cv2.ellipse(frame_gray, (cx, cy), (w, h), theta * 180.0 / np.pi, 0.0, 360.0, (50, 250, 200), 1, ) - + cv2.ellipse( + frame_gray, + (cx, cy), + (w, h), + theta * 180.0 / np.pi, + 0.0, + 360.0, + (50, 250, 200), + 1, + ) + except: pass - - - # print(frame_gray.shape, thresh.shape) + + # print(frame_gray.shape, thresh.shape) try: return cx, cy, thresh, frame, gray_frame except: return center_x, center_y, thresh, frame, gray_frame -class External_Run_HSRACS: +class External_Run_HSRACS(object): + def __init__(self): + self.algo = HSRAC_cls() - hsrac = HSRAC_cls() - - def HSRACS(self): - External_Run_HSRACS.hsrac.current_image_gray = self.current_image_gray - center_x, center_y, thresh, frame, gray_frame = External_Run_HSRACS.hsrac.single_run() + def run(self, current_image_gray): + self.algo.current_image_gray = current_image_gray + center_x, center_y, thresh, frame, gray_frame = self.algo.single_run() return center_x, center_y, thresh, frame, gray_frame -if __name__ == '__main__': +if __name__ == "__main__": hsrac = HSRAC_cls() hsrac.open_video(video_path) while hsrac.read_frame(): - _ = hsrac.single_run() \ No newline at end of file + _ = hsrac.single_run() diff --git a/EyeTrackApp/img_utils.py b/EyeTrackApp/img_utils.py new file mode 100644 index 0000000..288f04d --- /dev/null +++ b/EyeTrackApp/img_utils.py @@ -0,0 +1,12 @@ +import cv2 + + +def safe_crop(img, x, y, x2, y2): + # The order of the arguments can be reconsidered. + img_h, img_w = img.shape[1::-1] + outimg = img[max(0, y) : min(img_h, y2), max(0, x) : min(img_w, x2)].copy() + reqsize_x, reqsize_y = abs(x2 - x), abs(y2 - y) + if outimg.shape[1::-1] != (reqsize_y, reqsize_x): + # If the size is different from the expected size (smaller by the amount that is out of range) + outimg = cv2.resize(outimg, (reqsize_x, reqsize_y)) + return outimg diff --git a/EyeTrackApp/ransac.py b/EyeTrackApp/ransac.py index 355600a..06d7c40 100644 --- a/EyeTrackApp/ransac.py +++ b/EyeTrackApp/ransac.py @@ -19,7 +19,7 @@ @@@@@@@@@@@@@@@@@ @@@@@@@@@@@@@( -RANSAC 3D By: Summer#2406 (Main Algorithm Engineer), Pupil Labs (pye3d), Sean.Denka (Optimization) +RANSAC 3D By: Summer#2406 (Main Algorithm Engineer), Pupil Labs (pye3d), PallasNeko (Optimization) Algorithm App Implimentations By: Prohurtz#0001, qdot (Inital App Creator) Copyright (c) 2022 EyeTrackVR <3 diff --git a/EyeTrackApp/time_utils.py b/EyeTrackApp/time_utils.py new file mode 100644 index 0000000..ec46710 --- /dev/null +++ b/EyeTrackApp/time_utils.py @@ -0,0 +1,171 @@ +import functools +import math +import sys +import timeit + +def TimeitWrapper(*args, **kwargs): + """ + This decorator @TimeitWrapper() prints the function name and execution time in seconds. + :param args: + :param kwargs: + :return: + """ + + def decorator(function): + @functools.wraps(function) + def wrapper(*args, **kwargs): + start = timeit.default_timer() + results = function(*args, **kwargs) + end = timeit.default_timer() + print('{} execution time: {:.10f} s'.format(function.__name__, end - start)) + return results + + return wrapper + + return decorator + + +class TimeitResult(object): + """ + from https://github.com/ipython/ipython/blob/339c0d510a1f3cb2158dd8c6e7f4ac89aa4c89d8/IPython/core/magics/execution.py#L55 + + Object returned by the timeit magic with info about the run. + Contains the following attributes : + loops: (int) number of loops done per measurement + repeat: (int) number of times the measurement has been repeated + best: (float) best execution time / number + all_runs: (list of float) execution time of each run (in s) + """ + + def __init__(self, loops, repeat, best, worst, all_runs, precision): + self.loops = loops + self.repeat = repeat + self.best = best + self.worst = worst + self.all_runs = all_runs + self._precision = precision + self.timings = [dt / self.loops for dt in all_runs] + + @property + def average(self): + return math.fsum(self.timings) / len(self.timings) + + @property + def stdev(self): + mean = self.average + return (math.fsum([(x - mean) ** 2 for x in self.timings]) / len(self.timings)) ** 0.5 + + def __str__(self): + pm = '+-' + if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding: + try: + u'\xb1'.encode(sys.stdout.encoding) + pm = u'\xb1' + except: + pass + return "min:{best} max:{worst} mean:{mean} {pm} {std} per loop (mean {pm} std. dev. of {runs} run{run_plural}, {loops:,} loop{loop_plural} each)".format( + pm=pm, + runs=self.repeat, + loops=self.loops, + loop_plural="" if self.loops == 1 else "s", + run_plural="" if self.repeat == 1 else "s", + mean=format_time(self.average, self._precision), + std=format_time(self.stdev, self._precision), + best=format_time(self.best, self._precision), + worst=format_time(self.worst, self._precision), + ) + + def _repr_pretty_(self, p, cycle): + unic = self.__str__() + p.text(u'') + + +class FPSResult(object): + """ + base https://github.com/ipython/ipython/blob/339c0d510a1f3cb2158dd8c6e7f4ac89aa4c89d8/IPython/core/magics/execution.py#L55 + """ + + def __init__(self, loops, repeat, best, worst, all_runs, precision): + self.loops = loops + self.repeat = repeat + self.best = 1 / best + self.worst = 1 / worst + self.all_runs = all_runs + self._precision = precision + self.fps = [1 / dt for dt in all_runs] + self.unit = "fps" + + @property + def average(self): + return math.fsum(self.fps) / len(self.fps) + + @property + def stdev(self): + mean = self.average + return (math.fsum([(x - mean) ** 2 for x in self.fps]) / len(self.fps)) ** 0.5 + + def __str__(self): + pm = '+-' + if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding: + try: + u'\xb1'.encode(sys.stdout.encoding) + pm = u'\xb1' + except: + pass + return "min:{best} max:{worst} mean:{mean} {pm} {std} per loop (mean {pm} std. dev. of {runs} run{run_plural}, {loops:,} loop{loop_plural} each)".format( + pm=pm, + runs=self.repeat, + loops=self.loops, + loop_plural="" if self.loops == 1 else "s", + run_plural="" if self.repeat == 1 else "s", + mean="%.*g%s" % (self._precision, self.average, self.unit), + std="%.*g%s" % (self._precision, self.stdev, self.unit), + best="%.*g%s" % (self._precision, self.best, self.unit), + worst="%.*g%s" % (self._precision, self.worst, self.unit), + ) + + def _repr_pretty_(self, p, cycle): + unic = self.__str__() + p.text(u'') + + +def format_time(timespan, precision=3): + """ + https://github.com/ipython/ipython/blob/339c0d510a1f3cb2158dd8c6e7f4ac89aa4c89d8/IPython/core/magics/execution.py#L1473 + Formats the timespan in a human readable form + """ + + if timespan >= 60.0: + # we have more than a minute, format that in a human readable form + # Idea from http://snipplr.com/view/5713/ + parts = [("d", 60 * 60 * 24), ("h", 60 * 60), ("min", 60), ("s", 1)] + time = [] + leftover = timespan + for suffix, length in parts: + value = int(leftover / length) + if value > 0: + leftover = leftover % length + time.append(u'%s%s' % (str(value), suffix)) + if leftover < 1: + break + return " ".join(time) + + # Unfortunately the unicode 'micro' symbol can cause problems in + # certain terminals. + # See bug: https://bugs.launchpad.net/ipython/+bug/348466 + # Try to prevent crashes by being more secure than it needs to + # E.g. eclipse is able to print a µ, but has no sys.stdout.encoding set. + units = [u"s", u"ms", u'us', "ns"] # the save value + if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding: + try: + u'\xb5'.encode(sys.stdout.encoding) + units = [u"s", u"ms", u'\xb5s', "ns"] + except: + pass + scaling = [1, 1e3, 1e6, 1e9] + + if timespan > 0.0: + order = min(-int(math.floor(math.log10(timespan)) // 3), 3) + else: + order = 3 + return u"%.*g %s" % (precision, timespan * scaling[order], units[order]) \ No newline at end of file diff --git a/EyeTrackApp/utils.py b/EyeTrackApp/utils.py new file mode 100644 index 0000000..13931a7 --- /dev/null +++ b/EyeTrackApp/utils.py @@ -0,0 +1,2 @@ +def clamp(x, low, high): + return max(low, min(x, high)) \ No newline at end of file From b736fa7f8ddb1cc69aaf7185f59dc7e57b9155aa Mon Sep 17 00:00:00 2001 From: PallasNeko <124042774+PallasNeko@users.noreply.github.com> Date: Wed, 1 Feb 2023 02:36:41 +0900 Subject: [PATCH 2/2] Cleaning == None as None ! = None as not None --- EyeTrackApp/eye_processor.py | 95 ++++++++++++++++++++++++------------ 1 file changed, 63 insertions(+), 32 deletions(-) diff --git a/EyeTrackApp/eye_processor.py b/EyeTrackApp/eye_processor.py index 9eb0731..c7d8903 100644 --- a/EyeTrackApp/eye_processor.py +++ b/EyeTrackApp/eye_processor.py @@ -1,4 +1,4 @@ -''' +""" ------------------------------------------------------------------------------------------------------ ,@@@@@@ @@ -28,7 +28,7 @@ Additional Contributors: [Assassin], Summer404NotFound, lorow, ZanzyTHEbar Copyright (c) 2022 EyeTrackVR <3 ------------------------------------------------------------------------------------------------------ -''' +""" from operator import truth from dataclasses import dataclass @@ -98,7 +98,7 @@ async def delayed_setting_change(setting, value): await asyncio.sleep(5) setting = value if sys.platform.startswith("win"): - PlaySound('Audio/compleated.wav', SND_FILENAME | SND_ASYNC) + PlaySound("Audio/compleated.wav", SND_FILENAME | SND_ASYNC) class EyeProcessor: @@ -150,7 +150,6 @@ class EyeProcessor: self.cccs = False self.ts = 10 self.previous_rotation = self.config.rotation_angle - self.calibration_frame_counter self.camera_model = None self.detector_3d = None @@ -175,17 +174,17 @@ class EyeProcessor: min_cutoff = float(self.settings.gui_min_cutoff) # 0.0004 beta = float(self.settings.gui_speed_coefficient) # 0.9 except: - print('\033[93m[WARN] OneEuroFilter values must be a legal number.\033[0m') + print("\033[93m[WARN] OneEuroFilter values must be a legal number.\033[0m") min_cutoff = 0.0004 beta = 0.9 noisy_point = np.array([1, 1]) self.one_euro_filter = OneEuroFilter( - noisy_point, - min_cutoff=min_cutoff, - beta=beta + noisy_point, min_cutoff=min_cutoff, beta=beta ) - def output_images_and_update(self, threshold_image, output_information: EyeInformation): + def output_images_and_update( + self, threshold_image, output_information: EyeInformation + ): try: image_stack = np.concatenate( ( @@ -198,7 +197,9 @@ class EyeProcessor: self.previous_image = self.current_image self.previous_rotation = self.config.rotation_angle except: # If this fails it likely means that the images are not the same size for some reason. - print('\033[91m[ERROR] Size of frames to display are of unequal sizes.\033[0m') + print( + "\033[91m[ERROR] Size of frames to display are of unequal sizes.\033[0m" + ) pass @@ -248,9 +249,11 @@ class EyeProcessor: def HSRACM(self): # temporary implementation - cx, cy, thresh, gray_frame, uncropframe = External_Run_HSRACS().run(self.current_image_gray) + cx, cy, thresh, gray_frame, uncropframe = External_Run_HSRACS().run( + self.current_image_gray + ) self.current_image_gray = gray_frame - if self.prev_x == None: + if self.prev_x is None: self.prev_x = cx self.prev_y = cy # print(self.prev_x, self.prev_y, cx, cy) @@ -262,10 +265,16 @@ class EyeProcessor: out_x, out_y = cal_osc(self, cx, cy) if cx == 0: - self.output_images_and_update(thresh, EyeInformation(InformationOrigin.HSRAC, out_x, out_y, 0, eyeopen)) # update app + self.output_images_and_update( + thresh, + EyeInformation(InformationOrigin.HSRAC, out_x, out_y, 0, eyeopen), + ) # update app else: - self.output_images_and_update(thresh, EyeInformation(InformationOrigin.HSRAC, out_x, out_y, 0, eyeopen)) + self.output_images_and_update( + thresh, + EyeInformation(InformationOrigin.HSRAC, out_x, out_y, 0, eyeopen), + ) # else: # print("EYE MOVED TOO FAST") @@ -276,46 +285,64 @@ class EyeProcessor: eyeopen = intense(cx, cy, self.current_image_gray) out_x, out_y = cal_osc(self, cx, cy) if cx == 0: - self.output_images_and_update(frame, EyeInformation(InformationOrigin.HSF, out_x, out_y, 0, eyeopen)) # update app + self.output_images_and_update( + frame, EyeInformation(InformationOrigin.HSF, out_x, out_y, 0, eyeopen) + ) # update app else: - self.output_images_and_update(frame, EyeInformation(InformationOrigin.HSF, out_x, out_y, 0, eyeopen)) + self.output_images_and_update( + frame, EyeInformation(InformationOrigin.HSF, out_x, out_y, 0, eyeopen) + ) def RANSAC3DM(self): cx, cy, thresh = RANSAC3D(self) eyeopen = intense(cx, cy, self.current_image_gray) out_x, out_y = cal_osc(self, cx, cy) if cx == 0: - self.output_images_and_update(thresh, EyeInformation(InformationOrigin.RANSAC, out_x, out_y, 0, eyeopen)) # update app + self.output_images_and_update( + thresh, + EyeInformation(InformationOrigin.RANSAC, out_x, out_y, 0, eyeopen), + ) # update app else: - self.output_images_and_update(thresh, EyeInformation(InformationOrigin.RANSAC, out_x, out_y, 0, eyeopen)) + self.output_images_and_update( + thresh, + EyeInformation(InformationOrigin.RANSAC, out_x, out_y, 0, eyeopen), + ) def BLOBM(self): cx, cy, thresh = BLOB(self) eyeopen = intense(cx, cy, self.current_image_gray) out_x, out_y = cal_osc(self, cx, cy) if cx == 0: - self.output_images_and_update(thresh, EyeInformation(InformationOrigin.HSRAC, out_x, out_y, 0, eyeopen)) # update app + self.output_images_and_update( + thresh, + EyeInformation(InformationOrigin.HSRAC, out_x, out_y, 0, eyeopen), + ) # update app else: - self.output_images_and_update(thresh, EyeInformation(InformationOrigin.HSRAC, out_x, out_y, 0, eyeopen)) + self.output_images_and_update( + thresh, + EyeInformation(InformationOrigin.HSRAC, out_x, out_y, 0, eyeopen), + ) def ALGOSELECT(self): - if self.failed == 0 and self.firstalgo != None: + if self.failed == 0 and self.firstalgo is not None: self.firstalgo() else: self.failed = self.failed + 1 - if self.failed == 1 and self.secondalgo != None: # send the tracking algos previous fail number, in algo if we pass set to 0, if fail, + 1 + if ( + self.failed == 1 and self.secondalgo is not None + ): # send the tracking algos previous fail number, in algo if we pass set to 0, if fail, + 1 self.secondalgo() else: self.failed = self.failed + 1 - if self.failed == 2 and self.thirdalgo != None: + if self.failed == 2 and self.thirdalgo is not None: self.thirdalgo() else: self.failed = self.failed + 1 - if self.failed == 3 and self.fourthalgo != None: + if self.failed == 3 and self.fourthalgo is not None: self.fourthalgo() else: self.failed = 0 # we have reached last possible algo and it is disabled, move to first algo @@ -328,7 +355,7 @@ class EyeProcessor: self.fourthalgo = None # set algo priorities - if self.settings.gui_HSF and self.settings.gui_HSFP == 1: # I feel like this is super innefficient though it only runs at startup and no solution is coming to me atm + if (self.settings.gui_HSF and self.settings.gui_HSFP == 1): # I feel like this is super innefficient though it only runs at startup and no solution is coming to me atm self.firstalgo = self.HSFM elif self.settings.gui_HSF and self.settings.gui_HSFP == 2: self.secondalgo = self.HSFM @@ -346,7 +373,7 @@ class EyeProcessor: elif self.settings.gui_RANSAC3D and self.settings.gui_RANSAC3DP == 4: self.fourthalgo = self.RANSAC3DM - if self.settings.gui_HSRAC == True and self.settings.gui_HSRACP == 1: + if self.settings.gui_HSRAC and self.settings.gui_HSRACP == 1: self.firstalgo = self.HSRACM elif self.settings.gui_HSRAC and self.settings.gui_HSRACP == 2: self.secondalgo = self.HSRACM @@ -380,12 +407,14 @@ class EyeProcessor: continue # If our ROI configuration has changed, reset our model and detector - if (self.camera_model is None + if ( + self.camera_model is None or self.detector_3d is None - or self.camera_model.resolution != ( - self.config.roi_window_w, - self.config.roi_window_h, - ) + or self.camera_model.resolution + != ( + self.config.roi_window_w, + self.config.roi_window_h, + ) ): self.camera_model = CameraModel( focal_length=self.config.focal_length, @@ -414,7 +443,9 @@ class EyeProcessor: self.current_image_gray = cv2.cvtColor( self.current_image, cv2.COLOR_BGR2GRAY ) - self.current_image_gray_clean = self.current_image_gray.copy() # copy this frame to have a clean image for blink algo + self.current_image_gray_clean = ( + self.current_image_gray.copy() + ) # copy this frame to have a clean image for blink algo # print(self.settings.gui_RANSAC3D) # BLINK(self)