From 61b1a4ad9ce5457396a01936b20693420f40c6be Mon Sep 17 00:00:00 2001 From: Prohurtz <48768484+RedHawk989@users.noreply.github.com> Date: Mon, 19 Dec 2022 17:58:52 -0800 Subject: [PATCH] initial commit, beginning to part things out --- EyeTrackApp/eye_processor.py | 988 +++++++++++++++++++++++++++++++---- 1 file changed, 894 insertions(+), 94 deletions(-) diff --git a/EyeTrackApp/eye_processor.py b/EyeTrackApp/eye_processor.py index 745dd71..f194ca7 100644 --- a/EyeTrackApp/eye_processor.py +++ b/EyeTrackApp/eye_processor.py @@ -17,11 +17,20 @@ from one_euro_filter import OneEuroFilter if sys.platform.startswith("win"): from winsound import PlaySound, SND_FILENAME, SND_ASYNC +import _thread +import functools +import math +import os +import timeit +from collections import namedtuple +from functools import lru_cache +import xxhash class InformationOrigin(Enum): RANSAC = 1 BLOB = 2 FAILURE = 3 + HSF = 4 @dataclass @@ -52,6 +61,671 @@ async def delayed_setting_change(setting, value): if sys.platform.startswith("win"): PlaySound('Audio/compleated.wav', SND_FILENAME | SND_ASYNC) +def cal_osc(self, cx, cy): + if self.eye_id == "EyeId.RIGHT": + flipx = self.settings.gui_flip_x_axis_right + else: + flipx = self.settings.gui_flip_x_axis_left + if self.calibration_frame_counter == 0: + self.calibration_frame_counter = None + self.xoff = cx + self.yoff = cy + if sys.platform.startswith("win"): + PlaySound('Audio/compleated.wav', SND_FILENAME | SND_ASYNC) + elif self.calibration_frame_counter != None: + self.settings.gui_recenter_eyes = False + if cx > self.xmax: + self.xmax = cx + if cx < self.xmin: + self.xmin = cx + if cy > self.ymax: + self.ymax = cy + if cy < self.ymin: + self.ymin = cy + self.calibration_frame_counter -= 1 + if self.settings.gui_recenter_eyes == True: + self.xoff = cx + self.yoff = cy + if self.ts == 0: + self.settings.gui_recenter_eyes = False + if sys.platform.startswith("win"): + PlaySound('Audio/compleated.wav', SND_FILENAME | SND_ASYNC) + else: + self.ts = self.ts - 1 + else: + self.ts = 10 + + xl = float( + (cx - self.xoff) / (self.xmax - self.xoff) + ) + xr = float( + (cx - self.xoff) / (self.xmin - self.xoff) + ) + yu = float( + (cy - self.yoff) / (self.ymin - self.yoff) + ) + yd = float( + (cy - self.yoff) / (self.ymax - self.yoff) + ) + + out_x = 0 + out_y = 0 + if self.settings.gui_flip_y_axis: # check config on flipped values settings and apply accordingly + if yd > 0: + out_y = max(0.0, min(1.0, yd)) + if yu > 0: + out_y = -abs(max(0.0, min(1.0, yu))) + else: + if yd > 0: + out_y = -abs(max(0.0, min(1.0, yd))) + if yu > 0: + out_y = max(0.0, min(1.0, yu)) + + if flipx: #TODO Check for working function + if xr > 0: + out_x = -abs(max(0.0, min(1.0, xr))) + if xl > 0: + out_x = max(0.0, min(1.0, xl)) + else: + if xr > 0: + out_x = max(0.0, min(1.0, xr)) + if xl > 0: + out_x = -abs(max(0.0, min(1.0, xl))) + + try: + noisy_point = np.array([out_x, out_y]) # fliter our values with a One Euro Filter + point_hat = self.one_euro_filter(noisy_point) + out_x = point_hat[0] + out_y = point_hat[1] + except: + pass + return out_x, out_y + + + + + + +#HSF \/ +# cache param +lru_maxsize_vvs = 16 +lru_maxsize_vs = 64 +lru_maxsize_s = 512 +lru_maxsize_m = 1024 +lru_maxsize_l = 2048 # For functions with a large number of calls and a small amount of output data +lru_maxsize_vl = 4096 # 8192 #For functions with a very large number of calls and a small amount of output data + + + +@lru_cache(maxsize=lru_maxsize_vs) +def _step2byte(iterable, itemsize): + """ + https://github.com/chainer/chainer/blob/a8e15cbe55a90854a3918b8b5a976abbbff9ec94/chainer/functions/array/as_strided.py#L125 + :param iterable: + :param itemsize: + :return: + """ + return tuple([i * itemsize for i in iterable]) + + +@lru_cache(maxsize=lru_maxsize_vs) +def _min_index(shape, strides, storage_offset): + """ + https://github.com/chainer/chainer/blob/a8e15cbe55a90854a3918b8b5a976abbbff9ec94/chainer/functions/array/as_strided.py#L125 + Returns the leftest index in the array (in the unit-steps) + Args: + shape (tuple of int): The shape of output. + strides (tuple of int): + The strides of output, given in the unit of steps. + storage_offset (int): + The offset between the head of allocated memory and the pointer of + first element, given in the unit of steps. + Returns: + int: The leftest pointer in the array + """ + sh_st_neg = [sh_st for sh_st in zip(shape, strides) if sh_st[1] < 0] + if not sh_st_neg: + return storage_offset + else: + return storage_offset + functools.reduce( + lambda base, sh_st: base + (sh_st[0] - 1) * sh_st[1], sh_st_neg, 0) + + +@lru_cache(maxsize=lru_maxsize_vs) +def _max_index(shape, strides, storage_offset): + """ + https://github.com/chainer/chainer/blob/a8e15cbe55a90854a3918b8b5a976abbbff9ec94/chainer/functions/array/as_strided.py#L125 + Returns the rightest index in the array + Args: + shape (tuple of int): The shape of output. + strides (tuple of int): The strides of output, given in unit-steps. + storage_offset (int): + The offset between the head of allocated memory and the pointer of + first element, given in the unit of steps. + Returns: + int: The rightest pointer in the array + """ + sh_st_pos = [sh_st for sh_st in zip(shape, strides) if sh_st[1] > 0] + if not sh_st_pos: + return storage_offset + else: + return storage_offset + functools.reduce( + lambda base, sh_st: base + (sh_st[0] - 1) * sh_st[1], sh_st_pos, 0) + +def _get_base_array(array): + """ + https://github.com/chainer/chainer/blob/a8e15cbe55a90854a3918b8b5a976abbbff9ec94/chainer/functions/array/as_strided.py#L125 + Get the founder of :class:`numpy.ndarray`. + Args: + array (:class:`numpy.ndarray`): + The view of the base array. + Returns: + :class:`numpy.ndarray`: + The base array. + """ + base_array_candidate = array + while base_array_candidate.base is not None: + base_array_candidate = base_array_candidate.base + return base_array_candidate + + +def _stride_array(array, shape, strides, storage_offset): + """ + https://github.com/chainer/chainer/blob/a8e15cbe55a90854a3918b8b5a976abbbff9ec94/chainer/functions/array/as_strided.py#L125 + Wrapper of :func:`numpy.lib.stride_tricks.as_strided`. + .. note: + ``strides`` and ``storage_offset`` is given in the unit of steps + instead the unit of bytes. This specification differs from that of + :func:`numpy.lib.stride_tricks.as_strided`. + Args: + array (:class:`numpy.ndarray` of :class:`cupy.ndarray`): + The base array for the returned view. + shape (tuple of int): + The shape of the returned view. + strides (tuple of int): + The strides of the returned view, given in the unit of steps. + storage_offset (int): + The offset from the leftest pointer of allocated memory to + the first element of returned view, given in the unit of steps. + Returns: + :class:`numpy.ndarray` or :class:`cupy.ndarray`: + The new view for the base array. + """ + + min_index = _min_index(shape, strides, storage_offset) + max_index = _max_index(shape, strides, storage_offset) + + strides = _step2byte(strides, array.itemsize) + storage_offset, = _step2byte((storage_offset,), array.itemsize) + + if min_index < 0: + raise ValueError('Out of buffer: too small index was specified') + + base_array = _get_base_array(array) + if (max_index + 1) * base_array.itemsize > base_array.nbytes: + raise ValueError('Out of buffer: too large index was specified') + + return np.ndarray(shape, base_array.dtype, base_array.data, + storage_offset, strides) + + +# From functools +_CacheInfo2 = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"]) + +class _HashedSeq2(list): + """ This class guarantees that hash() will be called no more than once + per element. This is important because the lru_cache() will hash + the key multiple times on a cache miss. + + """ + + __slots__ = 'hashvalue' + + def __init__(self, tup, hash=hash): + self[:] = tup + self.hashvalue = hash(tup) + + def __hash__(self): + return self.hashvalue + + +def _make_key2(args, kwds, typed, + kwd_mark=(object(),), + fasttypes={int, str}, + tuple=tuple, type=type, len=len): + """Make a cache key from optionally typed positional and keyword arguments + + The key is constructed in a way that is flat as possible rather than + as a nested structure that would take more memory. + + If there is only a single argument and its data type is known to cache + its hash value, then that argument is returned without a wrapper. This + saves space and improves lookup speed. + + """ + # All of code below relies on kwds preserving the order input by the user. + # Formerly, we sorted() the kwds before looping. The new way is *much* + # faster; however, it means that f(x=1, y=2) will now be treated as a + # distinct call from f(y=2, x=1) which will be cached separately. + key = args + if kwds: + key += kwd_mark + for item in kwds.items(): + key += item + key = tuple(xxhash.xxh3_128_intdigest(k) if isinstance(k, np.ndarray) else k for k in key) + if typed: + key += tuple(type(v) for v in args) + if kwds: + key += tuple(type(v) for v in kwds.values()) + elif len(key) == 1 and type(key[0]) in fasttypes: + return key[0] + return _HashedSeq2(key) + + + +def np_lru_cache(maxsize=128, typed=False): + """Least-recently-used cache decorator. + + If *maxsize* is set to None, the LRU features are disabled and the cache + can grow without bound. + + If *typed* is True, arguments of different types will be cached separately. + For example, f(3.0) and f(3) will be treated as distinct calls with + distinct results. + + Arguments to the cached function must be hashable. + + View the cache statistics named tuple (hits, misses, maxsize, currsize) + with f.cache_info(). Clear the cache and statistics with f.cache_clear(). + Access the underlying function with f.__wrapped__. + + See: https://en.wikipedia.org/wiki/Cache_replacement_policies#Least_recently_used_(LRU) + + """ + + # Users should only access the lru_cache through its public API: + # cache_info, cache_clear, and f.__wrapped__ + # The internals of the lru_cache are encapsulated for thread safety and + # to allow the implementation to change (including a possible C version). + + if isinstance(maxsize, int): + # Negative maxsize is treated as 0 + if maxsize < 0: + maxsize = 0 + elif callable(maxsize) and isinstance(typed, bool): + # The user_function was passed in directly via the maxsize argument + user_function, maxsize = maxsize, 128 + wrapper = _np_lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo2) + wrapper.cache_parameters = lambda: {'maxsize': maxsize, 'typed': typed} + return functools.update_wrapper(wrapper, user_function) + elif maxsize is not None: + raise TypeError( + 'Expected first argument to be an integer, a callable, or None') + + def decorating_function(user_function): + wrapper = _np_lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo2) + wrapper.cache_parameters = lambda: {'maxsize': maxsize, 'typed': typed} + return functools.update_wrapper(wrapper, user_function) + + return decorating_function + + + +def _np_lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo): + # Constants shared by all lru cache instances: + sentinel = object() # unique object used to signal cache misses + make_key = _make_key2 # build a key from the function arguments + PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields + + cache = {} + hits = misses = 0 + full = False + cache_get = cache.get # bound method to lookup a key or return None + cache_len = cache.__len__ # get cache size without calling len() + lock = _thread.RLock() # because linkedlist updates aren't threadsafe + root = [] # root of the circular doubly linked list + root[:] = [root, root, None, None] # initialize by pointing to self + + if maxsize == 0: + + def wrapper(*args, **kwds): + # No caching -- just a statistics update + nonlocal misses + misses += 1 + result = user_function(*args, **kwds) + return result + + elif maxsize is None: + + def wrapper(*args, **kwds): + # Simple caching without ordering or size limit + nonlocal hits, misses + key = make_key(args, kwds, typed) + result = cache_get(key, sentinel) + if result is not sentinel: + hits += 1 + return result + misses += 1 + result = user_function(*args, **kwds) + cache[key] = result + return result + + else: + + def wrapper(*args, **kwds): + # Size limited caching that tracks accesses by recency + nonlocal root, hits, misses, full + key = make_key(args, kwds, typed) + with lock: + link = cache_get(key) + if link is not None: + # Move the link to the front of the circular queue + link_prev, link_next, _key, result = link + link_prev[NEXT] = link_next + link_next[PREV] = link_prev + last = root[PREV] + last[NEXT] = root[PREV] = link + link[PREV] = last + link[NEXT] = root + hits += 1 + return result + misses += 1 + result = user_function(*args, **kwds) + with lock: + if key in cache: + # Getting here means that this same key was added to the + # cache while the lock was released. Since the link + # update is already done, we need only return the + # computed result and update the count of misses. + pass + elif full: + # Use the old root to store the new key and result. + oldroot = root + oldroot[KEY] = key + oldroot[RESULT] = result + # Empty the oldest link and make it the new root. + # Keep a reference to the old key and old result to + # prevent their ref counts from going to zero during the + # update. That will prevent potentially arbitrary object + # clean-up code (i.e. __del__) from running while we're + # still adjusting the links. + root = oldroot[NEXT] + oldkey = root[KEY] + oldresult = root[RESULT] + root[KEY] = root[RESULT] = None + # Now update the cache dictionary. + del cache[oldkey] + # Save the potentially reentrant cache[key] assignment + # for last, after the root and links have been put in + # a consistent state. + cache[key] = oldroot + else: + # Put result in a new link at the front of the queue. + last = root[PREV] + link = [last, root, key, result] + last[NEXT] = root[PREV] = cache[key] = link + # Use the cache_len bound method instead of the len() function + # which could potentially be wrapped in an lru_cache itself. + full = (cache_len() >= maxsize) + return result + + def cache_info(): + """Report cache statistics""" + with lock: + return _CacheInfo(hits, misses, maxsize, cache_len()) + + def cache_clear(): + """Clear the cache and cache statistics""" + nonlocal hits, misses, full + with lock: + cache.clear() + root[:] = [root, root, None, None] + hits = misses = 0 + full = False + + wrapper.cache_info = cache_info + wrapper.cache_clear = cache_clear + return wrapper + + +class CvParameters: + # It may be a little slower because a dict named "self" is read for each function call. + def __init__(self, radius, step): + # self.prev_radius=radius + self._radius = radius + self.pad = 2 * radius + # self.prev_step=step + self._step = step + self._hsf = HaarSurroundFeature(radius) + # self._imagesize = None + + # @lru_cache(maxsize=lru_maxsize_vs) + def get_rpsh(self): + return self.radius, self.pad, self.step, self.hsf + + @property + def radius(self): + return self._radius + + @radius.setter + def radius(self, now_radius): + # self.prev_radius=self._radius + self._radius = now_radius + self.pad = 2 * now_radius + self.hsf = now_radius + + @property + def step(self): + return self._step + + @step.setter + def step(self, now_step): + # self.prev_step=self.step + self._step = now_step + + @property + def hsf(self): + return self._hsf + + @hsf.setter + def hsf(self, now_radius): + self._hsf = HaarSurroundFeature(now_radius) + + +class HaarSurroundFeature: + + def __init__(self, r_inner, r_outer=None, val=None): + if r_outer is None: + r_outer = r_inner * 3 + + r_inner2 = r_inner * r_inner + count_inner = r_inner2 + count_outer = r_outer * r_outer - r_inner2 + + if val is None: + val_inner = 1.0 / r_inner2 + val_outer = -val_inner * count_inner / count_outer + + else: + val_inner = val[0] + val_outer = val[1] + + self.val_in = np.array(val_inner, dtype=np.float64) + self.val_out = np.array(val_outer, dtype=np.float64) + self.r_in = r_inner + self.r_out = r_outer + + def get_kernel(self): + # Defined here, but not yet used? + # Create a kernel filled with the value of self.val_out + kernel = np.ones(shape=(2 * self.r_out - 1, 2 * self.r_out - 1), dtype=np.float64) * self.val_out + + # Set the values of the inner area of the kernel using array slicing + start = (self.r_out - self.r_in) + end = (self.r_out + self.r_in - 1) + kernel[start:end, start:end] = self.val_in + + return kernel + +def to_gray(frame): + frame_len = len(frame.shape) + if frame_len == 2: + return frame + if frame_len == 3: + frame_s2 = frame.shape[2] + if frame_s2 == 3: + return cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + elif frame_s2 == 4: + return cv2.cvtColor(frame, cv2.COLOR_BGRA2GRAY) + raise ValueError('Unsupported number of channels') + + +@lru_cache(maxsize=lru_maxsize_vs) +def frameint_get_xy_step(imageshape, xysteps, pad, start_offset=None, end_offset=None): + """ + + :param imageshape: (height(row),width(col),channel) or (height(row),width(col)). row==y,cal==x + :param xysteps: (x,y) + :param pad: int + :param start_offset: (x,y) or None + :param end_offset: (x,y) or None + :return: xy_np:tuple(x,y), xy_min:tuple(x,y), xy_rin_pm:tuple(x+rin,y+rin,x-rin,y-rin), xy_rout_pm:tuple(x+rout,y+rout,x-rout,y-rout) + """ + if len(imageshape) == 2: + row, col = imageshape + else: + row, col = imageshape[0], imageshape[1] + row -= 1 + col -= 1 + x_step, y_step = xysteps + + # This is not beautiful. + start_pad_x = start_pad_y = end_pad_x = end_pad_y = pad + + if start_offset is not None: + start_pad_x += start_offset[0] + start_pad_y += start_offset[1] + if end_offset is not None: + end_pad_x += end_offset[0] + end_pad_y += end_offset[1] + y_np = np.arange(start_pad_y, row - end_pad_y, y_step) + x_np = np.arange(start_pad_x, col - end_pad_x, x_step) + + xy_np = (x_np, y_np) + + return xy_np + +@lru_cache(maxsize=lru_maxsize_vvs) +def get_emp_p_array(len_sxy, frameint_x, frame_int_dtype, fcshape): + len_sx, len_sy = len_sxy + inner_sum = np.empty((len_sy, len_sx), dtype=frame_int_dtype) + outer_sum = np.empty((len_sy, len_sx), dtype=frame_int_dtype) + p_temp = np.empty((len_sy, frameint_x), dtype=frame_int_dtype) + p00 = np.empty((len_sy, len_sx), dtype=frame_int_dtype) + p11 = np.empty((len_sy, len_sx), dtype=frame_int_dtype) + p01 = np.empty((len_sy, len_sx), dtype=frame_int_dtype) + p10 = np.empty((len_sy, len_sx), dtype=frame_int_dtype) + response_list = np.empty((len_sy, len_sx), dtype=np.float64) + frame_conv = np.zeros(shape=fcshape[0], dtype=np.uint8) + frame_conv_stride = _stride_array(frame_conv, shape=(len_sy, len_sx), strides=(fcshape[1], fcshape[2]), + storage_offset=0) + return (inner_sum, outer_sum), p_temp, ( + p00, p11, p01, p10), response_list, (frame_conv, frame_conv_stride) + + +# @profile +def conv_int(frame_int, kernel, step, padding, xy_step): + """ + + :param frame_int: + :param kernel: hsf + :param step: (x,y) + :param padding: int + :return: + """ + # Init + row_b, col_b = frame_int.shape + row, col = row_b, col_b + row -= 1 + col -= 1 + x_step, y_step = step + padding2 = 2 * padding + f_shape = row - padding2, col - padding2 + r_in = kernel.r_in + r_in3 = r_in * 3 + + len_sx, len_sy = len(xy_step[0]), len(xy_step[1]) + col_rin = col_b * kernel.r_in + col_padrin = col_b * (padding + r_in) + col_ystep = col_b * y_step + + inout_sum, p_temp, p_list, response_list, frameconvlist = get_emp_p_array((len_sx, len_sy), col_b, + frame_int.dtype, (f_shape, f_shape[1] * y_step, x_step)) + inner_sum, outer_sum = inout_sum + p00, p11, p01, p10 = p_list + frame_conv, frame_conv_stride = frameconvlist + + inarr_mm = _stride_array(frame_int, shape=(len_sy, len_sx), strides=(col_ystep, x_step), storage_offset=col_rin + r_in) + inarr_mp = _stride_array(frame_int, shape=(len_sy, len_sx), strides=(col_ystep, x_step), storage_offset=col_rin + r_in3) + inarr_pm = _stride_array(frame_int, shape=(len_sy, len_sx), strides=(col_ystep, x_step), storage_offset=(col_padrin + r_in)) + inarr_pp = _stride_array(frame_int, shape=(len_sy, len_sx), strides=(col_ystep, x_step), storage_offset=(col_padrin + r_in3)) + + # inner_sum[:, :] = inarr_mm + inarr_pp - inarr_mp - inarr_pm + inner_sum[:, :] = inarr_mm + inner_sum += inarr_pp + inner_sum -= inarr_mp + inner_sum -= inarr_pm + + y_ro_m = xy_step[1] - kernel.r_out + x_ro_m = xy_step[0] - kernel.r_out + y_ro_p = xy_step[1] + kernel.r_out + x_ro_p = xy_step[0] + kernel.r_out + + # y,x + # p00=max(y_ro_m,0),max(x_ro_m,0) + # p11=min(y_ro_p,ylim),min(x_ro_p,xlim) + # p01=max(y_ro_m,0),min(x_ro_p,xlim) + # p10=min(y_ro_p,ylim),max(x_ro_m,0) + + # Bottleneck here, I want to make it smarter. Someone do it. + # p00 calc + np.take(frame_int, y_ro_m, axis=0, mode="clip", out=p_temp) + np.take(p_temp, x_ro_m, axis=1, mode="clip", out=p00) + + # p01 calc + np.take(p_temp, x_ro_p, axis=1, mode="clip", out=p01) + + # p11 calc + np.take(frame_int, y_ro_p, axis=0, mode="clip", out=p_temp) + np.take(p_temp, x_ro_p, axis=1, mode="clip", out=p11) + + # p10 calk + np.take(p_temp, x_ro_m, axis=1, mode="clip", out=p10) + + # p00=np.take(np.take(frame_int, y_ro_m, axis=0, mode="clip"), x_ro_m, axis=1, mode="clip") + # p11=np.take(np.take(frame_int, y_ro_p, axis=0, mode="clip"), x_ro_p, axis=1, mode="clip") + # p01=np.take(np.take(frame_int, y_ro_m, axis=0, mode="clip"), x_ro_p, axis=1, mode="clip") + # p10=np.take(np.take(frame_int, y_ro_p, axis=0, mode="clip"), x_ro_m, axis=1, mode="clip") + + outer_sum[:, :] = p00 + p11 - p01 - p10 - inner_sum + + np.multiply(kernel.val_in, inner_sum, dtype=np.float64, out=response_list) + response_list += kernel.val_out * outer_sum + + # min_response, max_val, min_loc, max_loc = cv2.minMaxLoc(response_list) + min_response, _, min_loc, _ = cv2.minMaxLoc(response_list) + + center = ((xy_step[0][min_loc[0]] - padding), (xy_step[1][min_loc[1]] - padding)) + + frame_conv_stride[:, :] = response_list + # or + # frame_conv_stride[:, :] = response_list.astype(np.uint8) + + return frame_conv, min_response, center + + + + def fit_rotated_ellipse_ransac( data, iter=5, sample_num=10, offset=80 # 80.0, 10, 80 @@ -195,6 +869,12 @@ class EyeProcessor: self.previous_rotation = self.config.rotation_angle self.calibration_frame_counter + #HSF + # CV param + self.default_radius = 20 + self.default_step = (5, 5) # bigger the steps,lower the processing time! ofc acc also takes an impact + # default_step==(x,y) + try: min_cutoff = float(self.settings.gui_min_cutoff) # 0.0004 beta = float(self.settings.gui_speed_coefficient) # 0.9 @@ -423,18 +1103,151 @@ class EyeProcessor: ) print("[INFO] BLINK Detected.") - def run(self): + + + + + + + + def HSF(self): + rng = np.random.default_rng() + cvparam = CvParameters(self.default_radius, self.default_step) + + cv_mode = ["first_frame", "radius_adjust", "init", "normal"] + now_mode = cv_mode[0] + + radius_cand_list = [] + radius_range = (self.default_radius - 10, self.default_radius + 10) # (10,30) + prev_max_size = 60 * 3 # 60fps*3sec + # response_min=0 + response_max = 0 + response_list = [] + + if now_mode == cv_mode[1]: + prev_res_len = len(response_list) + # adjustment of radius + if prev_res_len == 1: + cvparam.radius = radius_range[0] + elif prev_res_len == 2: + cvparam.radius = radius_range[1] + elif prev_res_len == 3: + # response_list==[default_radius,radius_range[0],radius_range[1]] + sort_res = sorted(response_list, key=lambda x: x[1])[0] + if sort_res[0] == self.default_radius: + cvparam.radius = self.default_radius + now_mode = cv_mode[2] + response_list = [] + elif sort_res[0] == radius_range[0]: + radius_cand_list = [i for i in range(radius_range[0], self.default_radius, self.default_step[0])][1:] + cvparam.radius = radius_cand_list.pop() + else: + radius_cand_list = [i for i in range(self.default_radius, radius_range[1], self.default_step[0])][1:] + cvparam.radius = radius_cand_list.pop() + else: + # Better make it a binary search. + if len(radius_cand_list) == 0: + sort_res = sorted(response_list, key=lambda x: x[1])[0] + cvparam.radius = sort_res[0] + now_mode = cv_mode[2] + response_list = [] + else: + cvparam.radius = radius_cand_list.pop() + + radius, pad, step, hsf = cvparam.get_rpsh() + + gray_frame = to_gray(self.current_image_gray) #pretty sure we do no need this step, should already be receiving gray frame + frame = self.current_image_gray + # Calculate the integral image of the frame + int_start_time = timeit.default_timer() + frame_pad = cv2.copyMakeBorder(gray_frame, pad, pad, pad, pad, cv2.BORDER_CONSTANT) #cv2.BORDER_REPLICATE + frame_int = cv2.integral(frame_pad) + + # Convolve the feature with the integral image + conv_int_start_time = timeit.default_timer() + xy_step = frameint_get_xy_step(frame_int.shape, step, pad, start_offset=None, end_offset=None) + frame_conv, response, center_xy = conv_int(frame_int, hsf, step, pad, xy_step) + + crop_start_time = timeit.default_timer() + # Define the center point and radius + # center_y, center_x = center + center_x, center_y = center_xy + upper_x = center_x + radius + lower_x = center_x - radius + upper_y = center_y + radius + lower_y = center_y - radius + + # Crop the image using the calculated bounds + # cropped_image = gray_frame[lower_x:upper_x, lower_y:upper_y] + cropped_image = gray_frame[lower_y:upper_y, lower_x:upper_x] + + if now_mode == cv_mode[0] or now_mode == cv_mode[1]: + response_list.append((radius, response)) # , center_x, center_y)) + elif now_mode == cv_mode[2]: + if len(response_list) < prev_max_size: + response_list.append(cropped_image.mean()) + else: + response_list = np.array(response_list) + # 25%,75% + # This value may need to be adjusted depending on the environment. + quartile_1, quartile_3 = np.percentile(response_list, [25, 75]) + iqr = quartile_3 - quartile_1 + # response_min = quartile_1 - (iqr * 1.5) + response_max = quartile_3 + (iqr * 1.5) + now_mode = cv_mode[3] + else: + if cropped_image.size < 400: + print("Something's wrong.") + else: + if cropped_image.mean() > response_max: # or cropped_image.mean() < response_min: + # blink + print("BLINK") + cv2.circle(frame, (center_x, center_y), 20, (0, 0, 255), -1) + self.output_images_and_update(frame,EyeInformation(InformationOrigin.HSF, 0, 0, 0, True)) + # If you want to update response_max. it may be more cost-effective to rewrite response_list in the following way + # https://stackoverflow.com/questions/42771110/fastest-way-to-left-cycle-a-numpy-array-like-pop-push-for-a-queue + + print(center_x, center_y) + out_x, out_y = cal_osc(self, center_x, center_y) + cv2.circle(frame, (center_x, center_y), 10, (0, 0, 255), -1) + self.output_images_and_update(frame,EyeInformation(InformationOrigin.HSF, out_x, out_y, 0, False)) + + if now_mode != cv_mode[0] and now_mode != cv_mode[1]: + if cropped_image.size < 400: + pass + + if now_mode == cv_mode[0]: + now_mode = cv_mode[1] + + #self.output_images_and_update(thresh, EyeInformation(InformationOrigin.FAILURE, 0, 0, 0, False)) + # return + + #self.output_images_and_update(larger_threshold,EyeInformation(InformationOrigin.HSF, out_x, out_y, 0, False),) + # return + #self.output_images_and_update(larger_threshold, EyeInformation(InformationOrigin.HSF, 0, 0, 0, True)) + + def RANSAC3D(self): camera_model = None detector_3d = None - out_pupil_dialation = 1 + f = 0 + # If our ROI configuration has changed, reset our model and detector + if (camera_model is None + or detector_3d is None + or camera_model.resolution != ( + self.config.roi_window_w, + self.config.roi_window_h, + ) + ): + camera_model = CameraModel( + focal_length=self.config.focal_length, + resolution=(self.config.roi_window_w, self.config.roi_window_h), + ) + detector_3d = Detector3D( + camera=camera_model, long_term_mode=DetectorMode.blocking + ) - if self.eye_id == "EyeId.RIGHT": - flipx = self.settings.gui_flip_x_axis_right - else: - flipx = self.settings.gui_flip_x_axis_left - - while True: - # Check to make sure we haven't been requested to close + # Check to make sure we haven't been requested to close + try: if self.cancellation_event.is_set(): print("Exiting RANSAC thread") return @@ -444,7 +1257,7 @@ class EyeProcessor: # Sleep a bit while we wait. if self.cancellation_event.wait(0.1): return - continue + pass # If our ROI configuration has changed, reset our model and detector if (camera_model is None @@ -473,10 +1286,10 @@ class EyeProcessor: ) = self.capture_queue_incoming.get(block=True, timeout=0.2) except queue.Empty: # print("No image available") - continue + pass if not self.capture_crop_rotate_image(): - continue + pass # Convert the image to grayscale, and set up thresholding. Thresholds here are basically a # low-pass filter that will set any pixel < the threshold value to 0. Thresholding is user @@ -548,7 +1361,7 @@ class EyeProcessor: else: print("[INFO] Blob fallback disabled. Assuming blink.") self.output_images_and_update(thresh, EyeInformation(InformationOrigin.RANSAC, 0, 0, 0, True)) - continue + pass # Find our largest hull, which we expect will probably be the ellipse that represents the 2d # area for the pupil, which we can use as the search area for the eye in general. @@ -562,12 +1375,7 @@ class EyeProcessor: largest_hull.reshape(-1, 2) ) except: - if self.settings.gui_blob_fallback: - self.blob_tracking_fallback() - else: - print("[INFO] Blob fallback disabled. Assuming blink.") - self.output_images_and_update(thresh, EyeInformation(InformationOrigin.RANSAC, 0, 0, 0, True)) - continue + f = 1 # Get axis and angle of the ellipse, using pupil labs 2d algos. The next bit of code ranges # from somewhat to completely magic, as most of it happens in native libraries (hence passing @@ -600,81 +1408,8 @@ class EyeProcessor: d = result_3d["diameter_3d"] - if self.calibration_frame_counter == 0: - self.calibration_frame_counter = None - self.xoff = cx - self.yoff = cy - if sys.platform.startswith("win"): - PlaySound('Audio/compleated.wav', SND_FILENAME | SND_ASYNC) - elif self.calibration_frame_counter != None: # TODO reset calibration values on button press - if exm > self.xmax: - self.xmax = exm - if exm < self.xmin: - self.xmin = exm - if eym > self.ymax: - self.ymax = eym - if eym < self.ymin: - self.ymin = eym - self.calibration_frame_counter -= 1 - if self.settings.gui_recenter_eyes: - self.xoff = cx - self.yoff = cy - if self.ts == 0: - self.settings.gui_recenter_eyes = False - if sys.platform.startswith("win"): - PlaySound('Audio/compleated.wav', SND_FILENAME | SND_ASYNC) - else: - self.ts = self.ts - 1 - else: - self.ts = 20 - - xl = float( - (cx - self.xoff) / (self.xmax - self.xoff) - ) - xr = float( - (cx - self.xoff) / (self.xmin - self.xoff) - ) - yu = float( - (cy - self.yoff) / (self.ymin - self.yoff) - ) - yd = float( - (cy - self.yoff) / (self.ymax - self.yoff) - ) - - out_x = 0 - out_y = 0 - - if self.settings.gui_flip_y_axis: - if yd > 0: - out_y = max(0.0, min(1.0, yd)) - if yu > 0: - out_y = -abs(max(0.0, min(1.0, yu))) - else: - if yd > 0: - out_y = -abs(max(0.0, min(1.0, yd))) - if yu > 0: - out_y = max(0.0, min(1.0, yu)) - - if flipx: - if xr > 0: - out_x = -abs(max(0.0, min(1.0, xr))) - if xl > 0: - out_x = max(0.0, min(1.0, xl)) - else: - if xr > 0: - out_x = max(0.0, min(1.0, xr)) - if xl > 0: - out_x = -abs(max(0.0, min(1.0, xl))) - - try: - noisy_point = np.array([out_x, out_y]) # fliter our values with a One Euro Filter - point_hat = self.one_euro_filter(noisy_point) - out_x = point_hat[0] - out_y = point_hat[1] - except: - pass - - output_info = EyeInformation(InformationOrigin.RANSAC, out_x, out_y, out_pupil_dialation, False) + out_x, out_y = cal_osc(self, cx, cy) #filter and calibrate values + output_info = EyeInformation(InformationOrigin.RANSAC, out_x, out_y, 1, False) # Draw our image and stack it for visual output try: @@ -722,3 +1457,68 @@ class EyeProcessor: # Shove a concatenated image out to the main GUI thread for rendering self.output_images_and_update(thresh, output_info) + except: + f = 1 + return f + + + def run(self): + + while True: + # Check to make sure we haven't been requested to close + if self.cancellation_event.is_set(): + print("Exiting Tracking thread") + return + + if self.config.roi_window_w <= 0 or self.config.roi_window_h <= 0: + # At this point, we're waiting for the user to set up the ROI window in the GUI. + # Sleep a bit while we wait. + if self.cancellation_event.wait(0.1): + return + continue + try: + if self.capture_queue_incoming.empty(): + self.capture_event.set() + # Wait a bit for images here. If we don't get one, just try again. + ( + self.current_image, + self.current_frame_number, + self.current_fps, + ) = self.capture_queue_incoming.get(block=True, timeout=0.2) + except queue.Empty: + # print("No image available") + continue + self.current_image_gray = cv2.cvtColor( + self.current_image, cv2.COLOR_BGR2GRAY + ) + if not self.capture_crop_rotate_image(): + continue + + + try: + f = self.RANSAC3D() + if f == 1 and self.settings.gui_blob_fallback: #if a fail has been reported and other algo is enabled, use it. + self.gui_blob_fallback() + except: + print("[WARN] ALL ALGORITHIMS HAVE FAILED OR ARE DISABLED.") + + + #FLOW MOCK + + #if PYE3D + #RUN PYE + #receive values, if fail reported, go to next method + + #IF HSF + #RUN HSF + #receive values, if fail reported, go to next method + + #IF BLOB + #RUN BLOB (ew tbh) + #receive values, if fail reported, end here in complete fail. + + + + + +