diff --git a/EyeTrackApp/eye_processor.py b/EyeTrackApp/eye_processor.py index 2a4439b..dc1fe30 100644 --- a/EyeTrackApp/eye_processor.py +++ b/EyeTrackApp/eye_processor.py @@ -246,8 +246,9 @@ class EyeProcessor: def HSRACM(self): - cx, cy, thresh, gray_frame, self.blinkvalue = External_Run.HSRACS(self) + cx, cy, thresh, gray_frame = External_Run_HSRACS.HSRACS(self) self.current_image_gray = gray_frame + # thresh = gray_frame if self.prev_x == None: self.prev_x = cx diff --git a/EyeTrackApp/hsrac.py b/EyeTrackApp/hsrac.py index 7213007..d07c17c 100644 --- a/EyeTrackApp/hsrac.py +++ b/EyeTrackApp/hsrac.py @@ -7,274 +7,6 @@ from functools import lru_cache import cv2 import numpy as np - - -#RANSAC - -video_path = "demo2.mp4" -imshow_enable = False -save_video = False - -thresh_add = 10 - - -class TimeitResult(object): - """ - from https://github.com/ipython/ipython/blob/339c0d510a1f3cb2158dd8c6e7f4ac89aa4c89d8/IPython/core/magics/execution.py#L55 - - Object returned by the timeit magic with info about the run. - Contains the following attributes : - loops: (int) number of loops done per measurement - repeat: (int) number of times the measurement has been repeated - best: (float) best execution time / number - all_runs: (list of float) execution time of each run (in s) - """ - - def __init__(self, loops, repeat, best, worst, all_runs, precision): - self.loops = loops - self.repeat = repeat - self.best = best - self.worst = worst - self.all_runs = all_runs - self._precision = precision - self.timings = [dt / self.loops for dt in all_runs] - - @property - def average(self): - return math.fsum(self.timings) / len(self.timings) - - @property - def stdev(self): - mean = self.average - return (math.fsum([(x - mean) ** 2 for x in self.timings]) / len(self.timings)) ** 0.5 - - def __str__(self): - pm = '+-' - if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding: - try: - u'\xb1'.encode(sys.stdout.encoding) - pm = u'\xb1' - except: - pass - return "min:{best} max:{worst} mean:{mean} {pm} {std} per loop (mean {pm} std. dev. of {runs} run{run_plural}, {loops:,} loop{loop_plural} each)".format( - pm=pm, - runs=self.repeat, - loops=self.loops, - loop_plural="" if self.loops == 1 else "s", - run_plural="" if self.repeat == 1 else "s", - mean=format_time(self.average, self._precision), - std=format_time(self.stdev, self._precision), - best=format_time(self.best, self._precision), - worst=format_time(self.worst, self._precision), - ) - - def _repr_pretty_(self, p, cycle): - unic = self.__str__() - p.text(u'') - - -class FPSResult(object): - """ - base https://github.com/ipython/ipython/blob/339c0d510a1f3cb2158dd8c6e7f4ac89aa4c89d8/IPython/core/magics/execution.py#L55 - """ - - def __init__(self, loops, repeat, best, worst, all_runs, precision): - self.loops = loops - self.repeat = repeat - self.best = 1 / best - self.worst = 1 / worst - self.all_runs = all_runs - self._precision = precision - self.fps = [1 / dt for dt in all_runs] - self.unit = "fps" - - @property - def average(self): - return math.fsum(self.fps) / len(self.fps) - - @property - def stdev(self): - mean = self.average - return (math.fsum([(x - mean) ** 2 for x in self.fps]) / len(self.fps)) ** 0.5 - - def __str__(self): - pm = '+-' - if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding: - try: - u'\xb1'.encode(sys.stdout.encoding) - pm = u'\xb1' - except: - pass - return "min:{best} max:{worst} mean:{mean} {pm} {std} per loop (mean {pm} std. dev. of {runs} run{run_plural}, {loops:,} loop{loop_plural} each)".format( - pm=pm, - runs=self.repeat, - loops=self.loops, - loop_plural="" if self.loops == 1 else "s", - run_plural="" if self.repeat == 1 else "s", - mean="%.*g%s" % (self._precision, self.average, self.unit), - std="%.*g%s" % (self._precision, self.stdev, self.unit), - best="%.*g%s" % (self._precision, self.best, self.unit), - worst="%.*g%s" % (self._precision, self.worst, self.unit), - ) - - def _repr_pretty_(self, p, cycle): - unic = self.__str__() - p.text(u'') - - -def format_time(timespan, precision=3): - """ - https://github.com/ipython/ipython/blob/339c0d510a1f3cb2158dd8c6e7f4ac89aa4c89d8/IPython/core/magics/execution.py#L1473 - Formats the timespan in a human readable form - """ - - if timespan >= 60.0: - # we have more than a minute, format that in a human readable form - # Idea from http://snipplr.com/view/5713/ - parts = [("d", 60 * 60 * 24), ("h", 60 * 60), ("min", 60), ("s", 1)] - time = [] - leftover = timespan - for suffix, length in parts: - value = int(leftover / length) - if value > 0: - leftover = leftover % length - time.append(u'%s%s' % (str(value), suffix)) - if leftover < 1: - break - return " ".join(time) - - # Unfortunately the unicode 'micro' symbol can cause problems in - # certain terminals. - # See bug: https://bugs.launchpad.net/ipython/+bug/348466 - # Try to prevent crashes by being more secure than it needs to - # E.g. eclipse is able to print a µ, but has no sys.stdout.encoding set. - units = [u"s", u"ms", u'us', "ns"] # the save value - if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding: - try: - u'\xb5'.encode(sys.stdout.encoding) - units = [u"s", u"ms", u'\xb5s', "ns"] - except: - pass - scaling = [1, 1e3, 1e6, 1e9] - - if timespan > 0.0: - order = min(-int(math.floor(math.log10(timespan)) // 3), 3) - else: - order = 3 - return u"%.*g %s" % (precision, timespan * scaling[order], units[order]) - - -def ellipse_model(data, y, f): - """ - There is no need to make this process a function, since making the process a function will slow it down a little by calling it. - The results may be slightly different from the lambda version due to calculation errors derived from float types, but the calculation results are virtually the same. - a = 1.0,b = P[0],c = P[1],d = P[2],e = P[3],f = P[4] - :param data: - :param y: np.c_[d, e, a, c, b] - :param f: f == P[4, 0] - :return: this_return == np.array([ellipse_model(x, y) for (x, y) in data ]) - """ - return data.dot(y) + f - - -# @profile -def fit_rotated_ellipse_ransac(data: np.ndarray, rng: np.random.Generator, iter=100, sample_num=10, offset=80 # 80.0, 10, 80 - ): # before changing these values, please read up on the ransac algorithm - # However if you want to change any value just know that higher iterations will make processing frames slower - effective_sample = None - - # The array contents do not change during the loop, so only one call is needed. - # They say len is faster than shape. - # Reference url: https://stackoverflow.com/questions/35547853/what-is-faster-python3s-len-or-numpys-shape - len_data = len(data) - - if len_data < sample_num: - return None - - # Type of calculation result - ret_dtype = np.float64 - - # Sorts a random number array of size (iter,len_data). After sorting, returns the index of sample_num random numbers before sorting. - # If the array size is less than about 100, this is faster than rng.choice. - rng_sample = rng.random((iter, len_data)).argsort()[:, :sample_num] - # or - # I don't see any advantage to doing this. - # rng_sample = np.asarray(rng.random((iter, len_data)).argsort()[:, :sample_num], dtype=np.int32) - - # I don't think it looks beautiful. - # x,y,x**2,y**2,x*y,1,-1*x**2 - datamod = np.concatenate( - [data, data ** 2, (data[:, 0] * data[:, 1])[:, np.newaxis], np.ones((len_data, 1), dtype=ret_dtype), - (-1 * data[:, 0] ** 2)[:, np.newaxis]], axis=1, - dtype=ret_dtype) - - datamod_slim = np.array(datamod[:, :5], dtype=ret_dtype) - - datamod_rng = datamod[rng_sample] - datamod_rng6 = datamod_rng[:, :, 6] - datamod_rng_swap = datamod_rng[:, :, [4, 3, 0, 1, 5]] - datamod_rng_swap_trans = datamod_rng_swap.transpose((0, 2, 1)) - - # These two lines are one of the bottlenecks - datamod_rng_5x5 = np.matmul(datamod_rng_swap_trans, datamod_rng_swap) - datamod_rng_p5smp = np.matmul(np.linalg.inv(datamod_rng_5x5), datamod_rng_swap_trans) - - datamod_rng_p = np.matmul(datamod_rng_p5smp, datamod_rng6[:, :, np.newaxis]).reshape((-1, 5)) - - # I don't think it looks beautiful. - ellipse_y_arr = np.asarray( - [datamod_rng_p[:, 2], datamod_rng_p[:, 3], np.ones(len(datamod_rng_p)), datamod_rng_p[:, 1], datamod_rng_p[:, 0]], dtype=ret_dtype) - - ellipse_data_arr = ellipse_model(datamod_slim, ellipse_y_arr, np.asarray(datamod_rng_p[:, 4])).transpose((1, 0)) - ellipse_data_abs = np.abs(ellipse_data_arr) - ellipse_data_index = np.argmax(np.sum(ellipse_data_abs < offset, axis=1), axis=0) - effective_data_arr = ellipse_data_arr[ellipse_data_index] - effective_sample_p_arr = datamod_rng_p[ellipse_data_index] - - return fit_rotated_ellipse(effective_data_arr, effective_sample_p_arr) - - -# @profile -def fit_rotated_ellipse(data, P): - a = 1.0 - b = P[0] - c = P[1] - d = P[2] - e = P[3] - f = P[4] - # The cost of trigonometric functions is high. - theta = 0.5 * np.arctan(b / (a - c), dtype=np.float64) - theta_sin = np.sin(theta, dtype=np.float64) - theta_cos = np.cos(theta, dtype=np.float64) - tc2 = theta_cos ** 2 - ts2 = theta_sin ** 2 - b_tcs = b * theta_cos * theta_sin - - # Do the calculation only once - cxy = b ** 2 - 4 * a * c - cx = (2 * c * d - b * e) / cxy - cy = (2 * a * e - b * d) / cxy - - # I just want to clear things up around here. - cu = a * cx ** 2 + b * cx * cy + c * cy ** 2 - f - cu_r = np.array([(a * tc2 + b_tcs + c * ts2), (a * ts2 - b_tcs + c * tc2)]) - wh = np.sqrt(cu / cu_r) - - w, h = wh[0], wh[1] - - error_sum = np.sum(data) - # print("fitting error = %.3f" % (error_sum)) - - return (cx, cy, w, h, theta) - - - - - - - -#HSF - # from line_profiler_pycharm import profile video_path = "ezgif.com-gif-maker.avi" @@ -867,7 +599,7 @@ class CenterCorrection(object): center_x, center_y = orig_x, orig_y self.frame_mask.fill(0) - cv2.circle(self.frame_mask, center=(center_x, center_y), radius=int(self.radius * 2), color=255, thickness=-1) + # cv2.circle(self.frame_mask, center=(center_x, center_y), radius=int(self.radius * 2), color=255, thickness=-1) # bottleneck cv2.calcHist([gray_frame], [0], None, [256], [0, 256], hist=self.hist) @@ -933,6 +665,271 @@ class CenterCorrection(object): return out_x, out_y +#RANSAC + + +video_path = "demo2.mp4" +imshow_enable = False +save_video = False + +thresh_add = 10 + + +class TimeitResult(object): + """ + from https://github.com/ipython/ipython/blob/339c0d510a1f3cb2158dd8c6e7f4ac89aa4c89d8/IPython/core/magics/execution.py#L55 + + Object returned by the timeit magic with info about the run. + Contains the following attributes : + loops: (int) number of loops done per measurement + repeat: (int) number of times the measurement has been repeated + best: (float) best execution time / number + all_runs: (list of float) execution time of each run (in s) + """ + + def __init__(self, loops, repeat, best, worst, all_runs, precision): + self.loops = loops + self.repeat = repeat + self.best = best + self.worst = worst + self.all_runs = all_runs + self._precision = precision + self.timings = [dt / self.loops for dt in all_runs] + + @property + def average(self): + return math.fsum(self.timings) / len(self.timings) + + @property + def stdev(self): + mean = self.average + return (math.fsum([(x - mean) ** 2 for x in self.timings]) / len(self.timings)) ** 0.5 + + def __str__(self): + pm = '+-' + if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding: + try: + u'\xb1'.encode(sys.stdout.encoding) + pm = u'\xb1' + except: + pass + return "min:{best} max:{worst} mean:{mean} {pm} {std} per loop (mean {pm} std. dev. of {runs} run{run_plural}, {loops:,} loop{loop_plural} each)".format( + pm=pm, + runs=self.repeat, + loops=self.loops, + loop_plural="" if self.loops == 1 else "s", + run_plural="" if self.repeat == 1 else "s", + mean=format_time(self.average, self._precision), + std=format_time(self.stdev, self._precision), + best=format_time(self.best, self._precision), + worst=format_time(self.worst, self._precision), + ) + + def _repr_pretty_(self, p, cycle): + unic = self.__str__() + p.text(u'') + + +class FPSResult(object): + """ + base https://github.com/ipython/ipython/blob/339c0d510a1f3cb2158dd8c6e7f4ac89aa4c89d8/IPython/core/magics/execution.py#L55 + """ + + def __init__(self, loops, repeat, best, worst, all_runs, precision): + self.loops = loops + self.repeat = repeat + self.best = 1 / best + self.worst = 1 / worst + self.all_runs = all_runs + self._precision = precision + self.fps = [1 / dt for dt in all_runs] + self.unit = "fps" + + @property + def average(self): + return math.fsum(self.fps) / len(self.fps) + + @property + def stdev(self): + mean = self.average + return (math.fsum([(x - mean) ** 2 for x in self.fps]) / len(self.fps)) ** 0.5 + + def __str__(self): + pm = '+-' + if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding: + try: + u'\xb1'.encode(sys.stdout.encoding) + pm = u'\xb1' + except: + pass + return "min:{best} max:{worst} mean:{mean} {pm} {std} per loop (mean {pm} std. dev. of {runs} run{run_plural}, {loops:,} loop{loop_plural} each)".format( + pm=pm, + runs=self.repeat, + loops=self.loops, + loop_plural="" if self.loops == 1 else "s", + run_plural="" if self.repeat == 1 else "s", + mean="%.*g%s" % (self._precision, self.average, self.unit), + std="%.*g%s" % (self._precision, self.stdev, self.unit), + best="%.*g%s" % (self._precision, self.best, self.unit), + worst="%.*g%s" % (self._precision, self.worst, self.unit), + ) + + def _repr_pretty_(self, p, cycle): + unic = self.__str__() + p.text(u'') + + +def format_time(timespan, precision=3): + """ + https://github.com/ipython/ipython/blob/339c0d510a1f3cb2158dd8c6e7f4ac89aa4c89d8/IPython/core/magics/execution.py#L1473 + Formats the timespan in a human readable form + """ + + if timespan >= 60.0: + # we have more than a minute, format that in a human readable form + # Idea from http://snipplr.com/view/5713/ + parts = [("d", 60 * 60 * 24), ("h", 60 * 60), ("min", 60), ("s", 1)] + time = [] + leftover = timespan + for suffix, length in parts: + value = int(leftover / length) + if value > 0: + leftover = leftover % length + time.append(u'%s%s' % (str(value), suffix)) + if leftover < 1: + break + return " ".join(time) + + # Unfortunately the unicode 'micro' symbol can cause problems in + # certain terminals. + # See bug: https://bugs.launchpad.net/ipython/+bug/348466 + # Try to prevent crashes by being more secure than it needs to + # E.g. eclipse is able to print a µ, but has no sys.stdout.encoding set. + units = [u"s", u"ms", u'us', "ns"] # the save value + if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding: + try: + u'\xb5'.encode(sys.stdout.encoding) + units = [u"s", u"ms", u'\xb5s', "ns"] + except: + pass + scaling = [1, 1e3, 1e6, 1e9] + + if timespan > 0.0: + order = min(-int(math.floor(math.log10(timespan)) // 3), 3) + else: + order = 3 + return u"%.*g %s" % (precision, timespan * scaling[order], units[order]) + + +def ellipse_model(data, y, f): + """ + There is no need to make this process a function, since making the process a function will slow it down a little by calling it. + The results may be slightly different from the lambda version due to calculation errors derived from float types, but the calculation results are virtually the same. + a = 1.0,b = P[0],c = P[1],d = P[2],e = P[3],f = P[4] + :param data: + :param y: np.c_[d, e, a, c, b] + :param f: f == P[4, 0] + :return: this_return == np.array([ellipse_model(x, y) for (x, y) in data ]) + """ + return data.dot(y) + f + + +# @profile +def fit_rotated_ellipse_ransac(data: np.ndarray, rng: np.random.Generator, iter=100, sample_num=10, offset=80 # 80.0, 10, 80 + ): # before changing these values, please read up on the ransac algorithm + # However if you want to change any value just know that higher iterations will make processing frames slower + effective_sample = None + + # The array contents do not change during the loop, so only one call is needed. + # They say len is faster than shape. + # Reference url: https://stackoverflow.com/questions/35547853/what-is-faster-python3s-len-or-numpys-shape + len_data = len(data) + + if len_data < sample_num: + return None + + # Type of calculation result + ret_dtype = np.float64 + + # Sorts a random number array of size (iter,len_data). After sorting, returns the index of sample_num random numbers before sorting. + # If the array size is less than about 100, this is faster than rng.choice. + rng_sample = rng.random((iter, len_data)).argsort()[:, :sample_num] + # or + # I don't see any advantage to doing this. + # rng_sample = np.asarray(rng.random((iter, len_data)).argsort()[:, :sample_num], dtype=np.int32) + + # I don't think it looks beautiful. + # x,y,x**2,y**2,x*y,1,-1*x**2 + datamod = np.concatenate( + [data, data ** 2, (data[:, 0] * data[:, 1])[:, np.newaxis], np.ones((len_data, 1), dtype=ret_dtype), + (-1 * data[:, 0] ** 2)[:, np.newaxis]], axis=1, + dtype=ret_dtype) + + datamod_slim = np.array(datamod[:, :5], dtype=ret_dtype) + + datamod_rng = datamod[rng_sample] + datamod_rng6 = datamod_rng[:, :, 6] + datamod_rng_swap = datamod_rng[:, :, [4, 3, 0, 1, 5]] + datamod_rng_swap_trans = datamod_rng_swap.transpose((0, 2, 1)) + + # These two lines are one of the bottlenecks + datamod_rng_5x5 = np.matmul(datamod_rng_swap_trans, datamod_rng_swap) + datamod_rng_p5smp = np.matmul(np.linalg.inv(datamod_rng_5x5), datamod_rng_swap_trans) + + datamod_rng_p = np.matmul(datamod_rng_p5smp, datamod_rng6[:, :, np.newaxis]).reshape((-1, 5)) + + # I don't think it looks beautiful. + ellipse_y_arr = np.asarray( + [datamod_rng_p[:, 2], datamod_rng_p[:, 3], np.ones(len(datamod_rng_p)), datamod_rng_p[:, 1], datamod_rng_p[:, 0]], dtype=ret_dtype) + + ellipse_data_arr = ellipse_model(datamod_slim, ellipse_y_arr, np.asarray(datamod_rng_p[:, 4])).transpose((1, 0)) + ellipse_data_abs = np.abs(ellipse_data_arr) + ellipse_data_index = np.argmax(np.sum(ellipse_data_abs < offset, axis=1), axis=0) + effective_data_arr = ellipse_data_arr[ellipse_data_index] + effective_sample_p_arr = datamod_rng_p[ellipse_data_index] + + return fit_rotated_ellipse(effective_data_arr, effective_sample_p_arr) + + +# @profile +def fit_rotated_ellipse(data, P): + a = 1.0 + b = P[0] + c = P[1] + d = P[2] + e = P[3] + f = P[4] + # The cost of trigonometric functions is high. + theta = 0.5 * np.arctan(b / (a - c), dtype=np.float64) + theta_sin = np.sin(theta, dtype=np.float64) + theta_cos = np.cos(theta, dtype=np.float64) + tc2 = theta_cos ** 2 + ts2 = theta_sin ** 2 + b_tcs = b * theta_cos * theta_sin + + # Do the calculation only once + cxy = b ** 2 - 4 * a * c + cx = (2 * c * d - b * e) / cxy + cy = (2 * a * e - b * d) / cxy + + # I just want to clear things up around here. + cu = a * cx ** 2 + b * cx * cy + c * cy ** 2 - f + cu_r = np.array([(a * tc2 + b_tcs + c * ts2), (a * ts2 - b_tcs + c * tc2)]) + wh = np.sqrt(cu / cu_r) + + w, h = wh[0], wh[1] + + error_sum = np.sum(data) + # print("fitting error = %.3f" % (error_sum)) + + return (cx, cy, w, h, theta) + + + + + + + class HSRAC_cls(object): def __init__(self): # I'd like to take into account things like print, end_time - start_time processing time, etc., but it's too much trouble. @@ -955,6 +952,10 @@ class HSRAC_cls(object): self.cap = None self.timedict = {"to_gray": [], "int_img": [], "conv_int": [], "crop": [], "total_cv": []} + + # ransac + self.rng = np.random.default_rng() + self.kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) def open_video(self, video_path): # Temporary implementation to run @@ -971,24 +972,18 @@ class HSRAC_cls(object): ret, frame = self.cap.read() if ret: # I have set it to grayscale (1ch) just in case, but if the frame is 1ch, this line can be commented out. - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + self.current_image_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) return True return False - def single_run(self): # Temporary implementation to run ## default_radius = 14 - rng = np.random.default_rng() - kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) - - frame = self.current_image_gray - frame = cv2.copyMakeBorder(frame, 21, 21, 21, 21, cv2.BORDER_CONSTANT, value=[255, 255, 255]) # add a border to prevent overcropping the image. - + if self.now_modeo == self.cv_modeo[1]: # adjustment of radius @@ -1008,7 +1003,6 @@ class HSRAC_cls(object): cv_start_time = timeit.default_timer() gray_frame = frame - self.timedict["to_gray"].append(timeit.default_timer() - cv_start_time) # Calculate the integral image of the frame @@ -1027,15 +1021,13 @@ class HSRAC_cls(object): crop_start_time = timeit.default_timer() # Define the center point and radius center_x, center_y = center_xy - upper_x = center_x + 20 - lower_x = center_x - 20 - upper_y = center_y + 20 - lower_y = center_y - 20 + upper_x = center_x + radius + lower_x = center_x - radius + upper_y = center_y + radius + lower_y = center_y - radius + # Crop the image using the calculated bounds - cropped_image = frame[lower_y:upper_y, lower_x:upper_x] - intensity = np.sum(cropped_image) - print(intensity) - # frame = cropped_image + cropped_image = gray_frame[lower_y:upper_y, lower_x:upper_x] if self.now_modeo == self.cv_modeo[0] or self.now_modeo == self.cv_modeo[1]: # If mode is first_frame or radius_adjust, record current radius and response @@ -1068,26 +1060,22 @@ class HSRAC_cls(object): if self.blink_detector.detect(cv2.mean(cropped_image)[0]): # blink pass - # return center_x, center_y, frame, frame, True else: - #pass + # pass if not self.center_correct.setup_comp: self.center_correct.init_array(gray_frame.shape, self.center_q1.quartile_1, radius) center_x, center_y = self.center_correct.correction(gray_frame, center_x, center_y) # Define the center point and radius center_xy = (center_x, center_y) - upper_x = center_x + 20 - lower_x = center_x - 20 - upper_y = center_y + 20 - lower_y = center_y - 20 + upper_x = center_x + radius + lower_x = center_x - radius + upper_y = center_y + radius + lower_y = center_y - radius # Crop the image using the calculated bounds - cropped_image = frame[lower_y:upper_y, lower_x:upper_x] - intensity = np.sum(cropped_image) - print(intensity) - # frame = cropped_image + cropped_image = gray_frame[lower_y:upper_y, lower_x:upper_x] # if imshow_enable or save_video: - # cv2.circle(frame, (orig_x, orig_y), 6, (0, 255, 255), -1) + # cv2.circle(frame, (orig_x, orig_y), 6, (0, 0, 255), -1) # cv2.circle(frame, (center_x, center_y), 3, (255, 0, 0), -1) # If you want to update response_max. it may be more cost-effective to rewrite response_list in the following way # https://stackoverflow.com/questions/42771110/fastest-way-to-left-cycle-a-numpy-array-like-pop-push-for-a-queue @@ -1096,10 +1084,10 @@ class HSRAC_cls(object): self.timedict["crop"].append(cv_end_time - crop_start_time) self.timedict["total_cv"].append(cv_end_time - cv_start_time) - # if calc_print_enable: + # if calc_print_enable: # the lower the response the better the likelyhood of there being a pupil. you can adujst the radius and steps accordingly - # print('Kernel response:', response) - # print('Pixel position:', center_xy) + # print('Kernel response:', response) + # print('Pixel position:', center_xy) if imshow_enable: if self.now_modeo != self.cv_modeo[0] and self.now_modeo != self.cv_modeo[1]: @@ -1107,9 +1095,7 @@ class HSRAC_cls(object): # If shape contains 0, it is not detected well. pass else: - cv2.imshow("crop", cropped_image) - # cv2.imshow("thresh", thresh) cv2.imshow("frame", frame) if cv2.waitKey(1) & 0xFF == ord("q"): pass @@ -1124,57 +1110,45 @@ class HSRAC_cls(object): self.now_modeo = self.cv_modeo[1] - #run ransac on the HSF crop\ - frame = cropped_image - - # try: - kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) - thresh_add = 10 - rng = np.random.default_rng() - - f = False - - # Convert the image to grayscale, and set up thresholding. Thresholds here are basically a - # low-pass filter that will set any pixel < the threshold value to 0. Thresholding is user - # configurable in this utility as we're dealing with variable lighting amounts/placement, as - # well as camera positioning and lensing. Therefore everyone's cutoff may be different. - # - # The goal of thresholding settings is to make sure we can ONLY see the pupil. This is why we - # crop the image earlier; it gives us less possible dark area to get confused about in the - # next step. - + newFrame2 = frame.copy() + #frame = cropped_image # For measuring processing time of image processing + cv_start_time = timeit.default_timer() # Crop first to reduce the amount of data to process. - - #frame = frame[0:len(frame) - 5, :] - + # frame = cropped_image[0:len(cropped_image) - 10, :] # To reduce the processing data, first convert to 1-channel and then blur. # The processing results were the same when I swapped the order of blurring and 1-channelization. - try: - frame = cv2.GaussianBlur(frame, (5, 5), 0) - except: - pass + frame_gray = cv2.GaussianBlur(frame, (5, 5), 0) + upper_x = center_x + 20 + lower_x = center_x - 20 + upper_y = center_y + 20 + lower_y = center_y - 20 + + # Crop the image using the calculated bounds + + frame_gray = frame_gray[lower_y:upper_y, lower_x:upper_x] + frame = frame_gray # this will need to be adjusted everytime hardware is changed (brightness of IR, Camera postion, etc)m - min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(frame) + min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(frame_gray) maxloc0_hf, maxloc1_hf = int(0.5 * max_loc[0]), int(0.5 * max_loc[1]) # crop 15% sqare around min_loc - # frame = frame[max_loc[1] - maxloc1_hf:max_loc[1] + maxloc1_hf, - # max_loc[0] - maxloc0_hf:max_loc[0] + maxloc0_hf] + # frame_gray = frame_gray[max_loc[1] - maxloc1_hf:max_loc[1] + maxloc1_hf, + # max_loc[0] - maxloc0_hf:max_loc[0] + maxloc0_hf] threshold_value = min_val + thresh_add - _, thresh = cv2.threshold(frame, threshold_value, 255, cv2.THRESH_BINARY) + _, thresh = cv2.threshold(frame_gray, threshold_value, 255, cv2.THRESH_BINARY) + # print(thresh.shape, frame_gray.shape) try: - opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel) - closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel) + opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, self.kernel) + closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, self.kernel) th_frame = 255 - closing except: # I want to eliminate try here because try tends to be slow in execution. - th_frame = 255 - frame + th_frame = 255 - frame_gray - contours, _ = cv2.findContours(th_frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) hull = [] # This way is faster than contours[i] @@ -1188,19 +1162,21 @@ class HSRAC_cls(object): cnt = sorted(hull, key=cv2.contourArea) maxcnt = cnt[-1] + # ellipse = cv2.fitEllipse(maxcnt) - ransac_data = fit_rotated_ellipse_ransac(maxcnt.reshape(-1, 2), rng) + ransac_data = fit_rotated_ellipse_ransac(maxcnt.reshape(-1, 2), self.rng) if ransac_data is None: # ransac_data is None==maxcnt.shape[0]= 2.1 * h: #new blink detection algo lmao this works pretty good actually print("RAN BLINK") - return center_x, center_y, frame, frame, True + #return center_x, center_y, frame, frame, True csx = frame.shape[0] csy = frame.shape[1] @@ -1208,532 +1184,37 @@ class HSRAC_cls(object): cx = center_x - (csx - cx) # we find the difference between the crop size and ransac point, and subtract from the center point from HSF cy = center_y - (csy - cy) - out_x, out_y = cx, cy - prev_hsfx = center_x - prev_hsfy = center_y - prev_ranx = cx - prev_rany = cy - cx, cy, w, h = int(cx), int(cy), int(w), int(h) - - cv2.drawContours(frame, contours, -1, (255, 0, 0), 1) - cv2.circle(frame, (cx, cy), 2, (0, 0, 255), -1) - # cx1, cy1, w1, h1, theta1 = fit_rotated_ellipse(maxcnt.reshape(-1, 2)) - cv2.ellipse(frame, (cx, cy), (w, h), theta * 180.0 / np.pi, 0.0, 360.0, (50, 250, 200), 1, ) - cv2.circle(frame, min_loc, 2, (0, 0, 255),-1) # the point of the darkest area in the image - self.current_image_gray = frame - #img = newImage2[y1:y2, x1:x2] - #except: - - # print('R F') - # pass - - # print(thresh.shape, cropped_image.shape) - try: - # print(radius) - return out_x, out_y, thresh, cropped_image, False - - except: - # xoff = prev_hsfx - prev_ranx - #yoff = prev_hsfy - prev_rany - return center_x, center_y, thresh, cropped_image, False + cv_end_time = timeit.default_timer() + if imshow_enable or save_video: + cv2.drawContours(frame_gray, contours, -1, (255, 0, 0), 1) + cv2.circle(frame_gray, (cx, cy), 2, (0, 0, 255), -1) + # cx1, cy1, w1, h1, theta1 = fit_rotated_ellipse(maxcnt.reshape(-1, 2)) + cv2.ellipse(frame_gray, (cx, cy), (w, h), theta * 180.0 / np.pi, 0.0, 360.0, (50, 250, 200), 1, ) + except: - - return center_x, center_y, thresh, cropped_image, False - + pass + + + # print(frame_gray.shape, thresh.shape) + try: + return cx, cy, thresh, frame + except: + return center_x, center_y, thresh, frame -class External_Run: +class External_Run_HSRACS: hsrac = HSRAC_cls() def HSRACS(self): - External_Run.hsrac.current_image_gray = self.current_image_gray - center_x, center_y, thresh, frame, blinkv = External_Run.hsrac.single_run() - return center_x, center_y, thresh, frame, blinkv + External_Run_HSRACS.hsrac.current_image_gray = self.current_image_gray + center_x, center_y, thresh, frame = External_Run_HSRACS.hsrac.single_run() + return center_x, center_y, thresh, frame if __name__ == '__main__': hsrac = HSRAC_cls() hsrac.open_video(video_path) while hsrac.read_frame(): - _ = hsrac.single_run() - -''' -timedict = {"to_gray": [], "int_img": [], "conv_int": [], "crop": [], "total_cv": []} -# I'd like to take into account things like print, end_time - start_time processing time, etc., but it's too much trouble. -# For measuring total processing time -main_start_time = timeit.default_timer() -rng = np.random.default_rng() -cvparam = CvParameters(default_radius, default_step) -cv_mode = ["first_frame", "radius_adjust", "init", "normal"] -now_mode = cv_mode[0] -radius_cand_list = [] -# response_min=0 -response_max = None -response_list = [] -prev_hsfx = 0 -prev_hsfy = 0 -prev_ranx = 0 -prev_rany = 0 -def HSRAC(self): - global now_mode - global response_list - global radius_cand_list - global response_max - global skip_autoradius - global default_radius - global prev_rany - global prev_ranx - global prev_hsfy - global prev_hsfx - skip_autoradius = self.settings.gui_skip_autoradius - default_radius = self.settings.gui_HSF_radius - thresh_add = self.settings.gui_thresh_add - frame = frame - if now_mode == cv_mode[1]: - prev_res_len = len(response_list) - # adjustment of radius - if prev_res_len == 1: - # len==1==response_list==[default_radius] - cvparam.radius = auto_radius_range[0] - elif prev_res_len == 2: - # len==2==response_list==[default_radius, auto_radius_range[0]] - cvparam.radius = auto_radius_range[1] - elif prev_res_len == 3: - # len==3==response_list==[default_radius,auto_radius_range[0],auto_radius_range[1]] - sort_res = sorted(response_list, key=lambda x: x[1])[0] - # Extract the radius with the lowest response value - if sort_res[0] == default_radius: - # If the default value is best, change now_mode to init after setting radius to the default value. - cvparam.radius = default_radius - now_mode = cv_mode[2] if not skip_blink_detect else cv_mode[3] - response_list = [] - elif sort_res[0] == auto_radius_range[0]: - radius_cand_list = [i for i in range(auto_radius_range[0], default_radius, default_step[0])][1:] - # default_step is defined separately for xy, but radius is shared by xy, so it may be buggy - # It should be no problem to set it to anything other than default_step - cvparam.radius = radius_cand_list.pop() - else: - radius_cand_list = [i for i in range(default_radius, auto_radius_range[1], default_step[0])][1:] - # default_step is defined separately for xy, but radius is shared by xy, so it may be buggy - # It should be no problem to set it to anything other than default_step - cvparam.radius = radius_cand_list.pop() - else: - # Try the contents of the radius_cand_list in order until the radius_cand_list runs out - # Better make it a binary search. - if len(radius_cand_list) == 0: - sort_res = sorted(response_list, key=lambda x: x[1])[0] - cvparam.radius = sort_res[0] - now_mode = cv_mode[2] if not skip_blink_detect else cv_mode[3] - response_list = [] - else: - cvparam.radius = radius_cand_list.pop() - - radius, pad, step, hsf = cvparam.get_rpsh() - - # For measuring processing time of image processing - cv_start_time = timeit.default_timer() - - gray_frame = frame - timedict["to_gray"].append(timeit.default_timer() - cv_start_time) - - # Calculate the integral image of the frame - int_start_time = timeit.default_timer() - # BORDER_CONSTANT is faster than BORDER_REPLICATE There seems to be almost no negative impact when BORDER_CONSTANT is used. - frame_pad = cv2.copyMakeBorder(gray_frame, pad, pad, pad, pad, cv2.BORDER_CONSTANT) - frame_int = cv2.integral(frame_pad) - timedict["int_img"].append(timeit.default_timer() - int_start_time) - - # Convolve the feature with the integral image - conv_int_start_time = timeit.default_timer() - xy_step = frameint_get_xy_step(frame_int.shape, step, pad, start_offset=None, end_offset=None) - frame_conv, response, center_xy = conv_int(frame_int, hsf, step, pad, xy_step) - timedict["conv_int"].append(timeit.default_timer() - conv_int_start_time) - # Define the center point and radius - center_x, center_y = center_xy - upper_x = center_x + 20 - lower_x = center_x - 20 - upper_y = center_y + 20 - lower_y = center_y - 20 - - # Crop the image using the calculated bounds - cropped_image = gray_frame[lower_y:upper_y, lower_x:upper_x] - frame = cropped_image - if now_mode == cv_mode[0] or now_mode == cv_mode[1]: - # If mode is first_frame or radius_adjust, record current radius and response - response_list.append((radius, response)) - elif now_mode == cv_mode[2]: - # Statistics for blink detection - if len(response_list) < blink_init_frames: - # Record the average value of cropped_image - response_list.append(cv2.mean(cropped_image)[0]) - else: - # Calculate response_max by computing interquartile range, IQR - # Change cv_mode to normal - response_list = np.array(response_list) - # 25%,75% - # This value may need to be adjusted depending on the environment. - quartile_1, quartile_3 = np.percentile(response_list, [25, 75]) - iqr = quartile_3 - quartile_1 - # response_min = quartile_1 - (iqr * 1.5) - response_max = quartile_3 + (iqr * 1.5) - now_mode = cv_mode[3] - else: - if 0 in cropped_image.shape: - # If shape contains 0, it is not detected well. - print("Something's wrong.") - else: - # If the average value of cropped_image is greater than response_max - # (i.e., if the cropimage is whitish - if response_max is not None and cv2.mean(cropped_image)[0] > response_max: - # blink - self.blinkvalue = True - print("HSF BLINK") - # If you want to update response_max. it may be more cost-effective to rewrite response_list in the following way - # https://stackoverflow.com/questions/42771110/fastest-way-to-left-cycle-a-numpy-array-like-pop-push-for-a-queue - - - # the lower the response the better the likelyhood of there being a pupil. you can adujst the radius and steps accordingly - print('Kernel response:', response) - print('Pixel position:', center_xy) - if now_mode == cv_mode[0]: - # Moving from first_frame to the next mode - if skip_autoradius and skip_blink_detect: - now_mode = cv_mode[3] - response_list = [] - elif skip_autoradius: - now_mode = cv_mode[2] - response_list = [] - else: - now_mode = cv_mode[1] - #run ransac on the HSF crop\ - frame = cropped_image - - # try: - kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) - - # Convert the image to grayscale, and set up thresholding. Thresholds here are basically a - # low-pass filter that will set any pixel < the threshold value to 0. Thresholding is user - # configurable in this utility as we're dealing with variable lighting amounts/placement, as - # well as camera positioning and lensing. Therefore everyone's cutoff may be different. - # - # The goal of thresholding settings is to make sure we can ONLY see the pupil. This is why we - # crop the image earlier; it gives us less possible dark area to get confused about in the - # next step. - - # For measuring processing time of image processing - # Crop first to reduce the amount of data to process. - #frame = frame[0:len(frame) - 5, :] - - # To reduce the processing data, first convert to 1-channel and then blur. - # The processing results were the same when I swapped the order of blurring and 1-channelization. - try: - frame = cv2.GaussianBlur(frame, (5, 5), 0) - except: - pass - # this will need to be adjusted everytime hardware is changed (brightness of IR, Camera postion, etc)m - min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(frame) - - maxloc0_hf, maxloc1_hf = int(0.5 * max_loc[0]), int(0.5 * max_loc[1]) - - # crop 15% sqare around min_loc -# frame = frame[max_loc[1] - maxloc1_hf:max_loc[1] + maxloc1_hf, - # max_loc[0] - maxloc0_hf:max_loc[0] + maxloc0_hf] - - threshold_value = min_val + thresh_add - _, thresh = cv2.threshold(frame, threshold_value, 255, cv2.THRESH_BINARY) - try: - opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel) - closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel) - th_frame = 255 - closing - except: - # I want to eliminate try here because try tends to be slow in execution. - th_frame = 255 - frame - - contours, _ = cv2.findContours(th_frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) - hull = [] - # This way is faster than contours[i] - # But maybe this one is faster. hull = [cv2.convexHull(cnt, False) for cnt in contours] - for cnt in contours: - hull.append(cv2.convexHull(cnt, False)) - if not hull: - # If empty, go to next loop - pass - try: - - cnt = sorted(hull, key=cv2.contourArea) - maxcnt = cnt[-1] - # ellipse = cv2.fitEllipse(maxcnt) - ransac_data = fit_rotated_ellipse_ransac(maxcnt.reshape(-1, 2), rng) - if ransac_data is None: - # ransac_data is None==maxcnt.shape[0] response_max: - # blink - - cv2.circle(frame, (center_x, center_y), 10, (0, 0, 255), -1) - # If you want to update response_max. it may be more cost-effective to rewrite response_list in the following way - # https://stackoverflow.com/questions/42771110/fastest-way-to-left-cycle-a-numpy-array-like-pop-push-for-a-queue - - except: - return 0, 0, frame -#run ransac on the HSF crop\ - try: - kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) - thresh_add = 10 - rng = np.random.default_rng() - - f = False - - # Convert the image to grayscale, and set up thresholding. Thresholds here are basically a - # low-pass filter that will set any pixel < the threshold value to 0. Thresholding is user - # configurable in this utility as we're dealing with variable lighting amounts/placement, as - # well as camera positioning and lensing. Therefore everyone's cutoff may be different. - # - # The goal of thresholding settings is to make sure we can ONLY see the pupil. This is why we - # crop the image earlier; it gives us less possible dark area to get confused about in the - # next step. - frame = cropped_image - # For measuring processing time of image processing - # Crop first to reduce the amount of data to process. - #frame = frame[0:len(frame) - 5, :] - - # To reduce the processing data, first convert to 1-channel and then blur. - # The processing results were the same when I swapped the order of blurring and 1-channelization. - frame = cv2.GaussianBlur(frame, (5, 5), 0) - - - # this will need to be adjusted everytime hardware is changed (brightness of IR, Camera postion, etc)m - min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(frame) - - maxloc0_hf, maxloc1_hf = int(0.5 * max_loc[0]), int(0.5 * max_loc[1]) - - # crop 15% sqare around min_loc - # frame = frame[max_loc[1] - maxloc1_hf:max_loc[1] + maxloc1_hf, - # max_loc[0] - maxloc0_hf:max_loc[0] + maxloc0_hf] - - threshold_value = min_val + thresh_add - _, thresh = cv2.threshold(frame, threshold_value, 255, cv2.THRESH_BINARY) - try: - opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel) - closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel) - th_frame = 255 - closing - except: - # I want to eliminate try here because try tends to be slow in execution. - th_frame = 255 - frame - - detect_start_time = timeit.default_timer() - contours, _ = cv2.findContours(th_frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) - hull = [] - # This way is faster than contours[i] - # But maybe this one is faster. hull = [cv2.convexHull(cnt, False) for cnt in contours] - for cnt in contours: - hull.append(cv2.convexHull(cnt, False)) - if not hull: - # If empty, go to next loop - pass - try: - - cnt = sorted(hull, key=cv2.contourArea) - maxcnt = cnt[-1] - # ellipse = cv2.fitEllipse(maxcnt) - ransac_data = fit_rotated_ellipse_ransac(maxcnt.reshape(-1, 2), rng) - if ransac_data is None: - # ransac_data is None==maxcnt.shape[0]