From 7e41a65aac435242922dcb8c66c1136f5c05d47a Mon Sep 17 00:00:00 2001 From: Lorow Date: Thu, 4 Jul 2024 20:51:19 +0200 Subject: [PATCH] Feature/etvr module support (#106) * initial changes * Mostly clean up, refactor registering listeners to make sense, backport tests * Add initial implementation of VRCFTModuleSender * Add basic GUI for the modules settings * Fix tooltip descriptions # TODO: # - there's ghosts in the machine - vrc osc is not working properly # - min/maxing will require field combinators in the modules lmao * Fix type validation bugs, fix typos # TODO: # - there's ghosts in the machine - vrc osc is not working properly # - min/maxing will require field combinators in the modules lmao * Add checkbox to switch to ETVR Module # TODO: # - there's ghosts in the machine - vrc osc is not working properly # - min/maxing will require field combinators in the modules lmao * Black stuff # TODO: # - there's ghosts in the machine - vrc osc is not working properly # - min/maxing will require field combinators in the modules lmao * Remove coverage by default # TODO: # - there's ghosts in the machine - vrc osc is not working properly # - min/maxing will require field combinators in the modules lmao * Fix timeout in tests # TODO: # - there's ghosts in the machine - vrc osc is not working properly # - min/maxing will require field combinators in the modules lmao * HEAVY WIP: Refactor native output, NOTE: I brought back the entire old OSC implementation as a live reference, this will be removed once I'm done. This also lays ground for other modes as they're pretty similar # TODO: # - there's ghosts in the machine - vrc osc is not working properly # - min/maxing will require field combinators in the modules lmao * HEAVY WIP: Refactor v1 params output, # TODO: # - min/maxing will require field combinators in the modules lmao * HEAVY WIP: Refactor v2 params output # TODO: # - min/maxing will require field combinators in the modules lmao * Finish refactoring v2 and v1, fixup tests, refactor native # TODO: # - min/maxing will require field combinators in the modules lmao * Add tests for v1 params # TODO: # - min/maxing will require field combinators in the modules lmao * Add tests for native params # TODO: # - min/maxing will require field combinators in the modules lmao * Fix OSC not getting up after config reset. Remove reset command, config sends everything changed anyway, sunset the idea of using single client and thus simplify the code a bit # TODO: # - min/maxing will require field combinators in the modules lmao * Rename gui_PortNumber to gui_VRCFTModulePort for readability # TODO: # - min/maxing will require field combinators in the modules lmao * Cleanup EyeID usage # TODO: # - min/maxing will require field combinators in the modules lmao * Cleanup osc after rebase # TODO: # - min/maxing will require field combinators in the modules lmao * Make VRChatOSCSender a bit more readable # TODO: # - min/maxing will require field combinators in the modules lmao * Remove unsued VRChatOSCReceiver, this is taken care of by generic OSCReceiver # TODO: # - min/maxing will require field combinators in the modules lmao * Commit crimes with try_convert_to_float to make osc, pysimplegui and pydantic happy * Cleanup after merge * Disable emulation by default * Fix OSCReceiver crashing on unknown addresses * Adjust VRCFT Module settings to look better in game * Fix recalibrate and recenter for OSC only working for the right eye * Fix save and restart button not restarting the tracking * Fix broken tracking on v1 params for eye_x, clean up implementation * Fix regular value being passed to OSC listeners instead of OSCMessage * Add a TODO, probably to be ignored * Add support for custom ETVR VRCFT Module listening address --- EyeTrackApp/Benchmark/bench_hsrac.py | 621 ++++++++++++------ EyeTrackApp/blob.py | 9 - EyeTrackApp/camera.py | 30 +- EyeTrackApp/camera_widget.py | 85 ++- EyeTrackApp/config.py | 144 +++- EyeTrackApp/ellipse_based_pupil_dilation.py | 14 +- EyeTrackApp/enums.py | 86 +-- EyeTrackApp/eye.py | 1 + EyeTrackApp/eye_processor.py | 102 +-- EyeTrackApp/eyetrackapp.py | 115 +++- EyeTrackApp/haar_surround_feature.py | 124 +--- EyeTrackApp/intensity_based_openness.py | 14 +- EyeTrackApp/osc.py | 428 ------------ EyeTrackApp/osc/OSCMessage.py | 13 + EyeTrackApp/osc/VRCFTModuleMessenger.py | 17 + EyeTrackApp/osc/VRChatOSCSender.py | 307 +++++++++ EyeTrackApp/osc/__init__.py | 0 EyeTrackApp/osc/osc.py | 214 ++++++ EyeTrackApp/ransac.py | 9 +- EyeTrackApp/settings/BaseSettings.py | 18 +- EyeTrackApp/settings/VRCFTModuleSettings.py | 34 + .../{ => settings}/algo_settings_widget.py | 6 +- .../{ => settings}/general_settings_widget.py | 3 +- .../settings/modules/CommonFieldValidators.py | 31 +- .../settings/modules/OSCSettingsModule.py | 11 + .../settings/modules/VRCFTSettingsModule.py | 225 +++++++ EyeTrackApp/utils/eye_falloff.py | 8 +- EyeTrackApp/utils/misc_utils.py | 14 +- conftest.py | 2 +- pyproject.toml | 2 +- tests/__init__.py | 20 + tests/test_osc.py | 314 --------- tests/test_osc_native_params.py | 269 ++++++++ tests/test_osc_v1_params.py | 274 ++++++++ tests/test_osc_v2_params.py | 275 ++++++++ 35 files changed, 2521 insertions(+), 1318 deletions(-) delete mode 100644 EyeTrackApp/osc.py create mode 100644 EyeTrackApp/osc/OSCMessage.py create mode 100644 EyeTrackApp/osc/VRCFTModuleMessenger.py create mode 100644 EyeTrackApp/osc/VRChatOSCSender.py create mode 100644 EyeTrackApp/osc/__init__.py create mode 100644 EyeTrackApp/osc/osc.py create mode 100644 EyeTrackApp/settings/VRCFTModuleSettings.py rename EyeTrackApp/{ => settings}/algo_settings_widget.py (96%) rename EyeTrackApp/{ => settings}/general_settings_widget.py (98%) create mode 100644 EyeTrackApp/settings/modules/VRCFTSettingsModule.py delete mode 100644 tests/test_osc.py create mode 100644 tests/test_osc_native_params.py create mode 100644 tests/test_osc_v1_params.py create mode 100644 tests/test_osc_v2_params.py diff --git a/EyeTrackApp/Benchmark/bench_hsrac.py b/EyeTrackApp/Benchmark/bench_hsrac.py index 9e1ccc7..8e818f5 100644 --- a/EyeTrackApp/Benchmark/bench_hsrac.py +++ b/EyeTrackApp/Benchmark/bench_hsrac.py @@ -11,13 +11,14 @@ from numpy.linalg import _umath_linalg if os.environ.get("PYCHARM_HOSTED", None) is None: sys.path.append("../") - from utils.img_utils import safe_crop # noqa - from utils.misc_utils import clamp # noqa - from utils.time_utils import FPSResult, TimeitResult, format_time # noqa + from utils.img_utils import safe_crop # noqa + from utils.misc_utils import clamp # noqa + from utils.time_utils import FPSResult, TimeitResult, format_time # noqa else: - from EyeTrackApp.utils.img_utils import safe_crop - from EyeTrackApp.utils.misc_utils import clamp - from EyeTrackApp.utils.time_utils import FPSResult, TimeitResult, format_time + from utils.img_utils import safe_crop + from utils.misc_utils import clamp + from utils.time_utils import FPSResult, TimeitResult, format_time + # from line_profiler_pycharm import profile @@ -34,9 +35,9 @@ save_img = False save_video = False loop_num = 1 if imshow_enable or save_img or save_video else 100 input_video_path = "Pro_demo2.mp4" -output_img_path = f'./{this_file_name}_{alg_ver}_new.png' if not old_mode else f'./{this_file_name}_{alg_ver}_old.png' -output_video_path = f'./{this_file_name}_{alg_ver}_new.mp4' if not old_mode else f'./{this_file_name}_{alg_ver}_old.mp4' -logfilename = f'./{this_file_name}_{alg_ver}_new.log' if not old_mode else f'./{this_file_name}_{alg_ver}_old.log' +output_img_path = f"./{this_file_name}_{alg_ver}_new.png" if not old_mode else f"./{this_file_name}_{alg_ver}_old.png" +output_video_path = f"./{this_file_name}_{alg_ver}_new.mp4" if not old_mode else f"./{this_file_name}_{alg_ver}_old.mp4" +logfilename = f"./{this_file_name}_{alg_ver}_new.log" if not old_mode else f"./{this_file_name}_{alg_ver}_old.log" print_enable = False # I don't recommend changing to True. # RANSAC @@ -65,7 +66,7 @@ default_step = (5, 5) # bigger the steps,lower the processing time! ofc acc als logger = getLogger(__name__) logger.setLevel(INFO) -formatter = Formatter('%(message)s') +formatter = Formatter("%(message)s") handler = StreamHandler() handler.setLevel(INFO) handler.setFormatter(formatter) @@ -94,73 +95,72 @@ class CvParameters: # self.prev_step=step self._step = step self._hsf = HaarSurroundFeature(radius) - + def get_rpsh(self): return self._radius, self.pad, self._step, self._hsf # Essentially, the following would be preferable, but it would take twice as long to call. # return self.radius, self.pad, self.step, self.hsf - + @property def radius(self): return self._radius - + @radius.setter def radius(self, now_radius): # self.prev_radius=self._radius self._radius = now_radius self.pad = 2 * now_radius self.hsf = now_radius - + @property def step(self): return self._step - + @step.setter def step(self, now_step): # self.prev_step=self.step self._step = now_step - + @property def hsf(self): return self._hsf - + @hsf.setter def hsf(self, now_radius): self._hsf = HaarSurroundFeature(now_radius) class HaarSurroundFeature: - def __init__(self, r_inner, r_outer=None, val=None): if r_outer is None: r_outer = r_inner * 3 r_inner2 = r_inner * r_inner count_inner = r_inner2 count_outer = r_outer * r_outer - r_inner2 - + if val is None: val_inner = 1.0 / r_inner2 val_outer = -val_inner * count_inner / count_outer - + else: val_inner = val[0] val_outer = val[1] - + self.val_in = float(val_inner) # np.array(val_inner, dtype=np.float64) self.val_out = float(val_outer) # np.array(val_outer, dtype=np.float64) self.r_in = r_inner self.r_out = r_outer - + def get_kernel(self): # Defined here, but not yet used? # Create a kernel filled with the value of self.val_out kernel = np.ones(shape=(2 * self.r_out - 1, 2 * self.r_out - 1), dtype=np.float64) * self.val_out - + # Set the values of the inner area of the kernel using array slicing - start = (self.r_out - self.r_in) - end = (self.r_out + self.r_in - 1) + start = self.r_out - self.r_in + end = self.r_out + self.r_in - 1 kernel[start:end, start:end] = self.val_in - + return kernel @@ -173,38 +173,38 @@ def to_gray(frame): @lru_cache(maxsize=lru_maxsize_vvs) def get_frameint_empty_array(frame_shape, pad, x_step, y_step, r_in, r_out): frame_int_dtype = np.intc - + frame_pad = np.empty((frame_shape[0] + (pad * 2), frame_shape[1] + (pad * 2)), dtype=np.uint8) - + row, col = frame_pad.shape - + frame_int = np.empty((row + 1, col + 1), dtype=frame_int_dtype) - + y_steps_arr = np.arange(pad, row - pad, y_step, dtype=np.int16) x_steps_arr = np.arange(pad, col - pad, x_step, dtype=np.int16) len_sx, len_sy = len(x_steps_arr), len(y_steps_arr) len_syx = (len_sy, len_sx) y_end = pad + (y_step * (len_sy - 1)) x_end = pad + (x_step * (len_sx - 1)) - + y_rin_m = slice(pad - r_in, y_end - r_in + 1, y_step) y_rin_p = slice(pad + r_in, y_end + r_in + 1, y_step) x_rin_m = slice(pad - r_in, x_end - r_in + 1, x_step) x_rin_p = slice(pad + r_in, x_end + r_in + 1, x_step) - + in_p00 = frame_int[y_rin_m, x_rin_m] in_p11 = frame_int[y_rin_p, x_rin_p] in_p01 = frame_int[y_rin_m, x_rin_p] in_p10 = frame_int[y_rin_p, x_rin_m] - + y_ro_m = np.maximum(y_steps_arr - r_out, 0) # [:,np.newaxis] x_ro_m = np.maximum(x_steps_arr - r_out, 0) # [np.newaxis,:] y_ro_p = np.minimum(row, y_steps_arr + r_out) # [:,np.newaxis] x_ro_p = np.minimum(col, x_steps_arr + r_out) # [np.newaxis,:] - + inner_sum = np.empty(len_syx, dtype=frame_int_dtype) outer_sum = np.empty(len_syx, dtype=frame_int_dtype) - + out_p_temp = np.empty((len_sy, col + 1), dtype=frame_int_dtype) out_p00 = np.empty(len_syx, dtype=frame_int_dtype) out_p11 = np.empty(len_syx, dtype=frame_int_dtype) @@ -213,17 +213,57 @@ def get_frameint_empty_array(frame_shape, pad, x_step, y_step, r_in, r_out): response_list = np.empty(len_syx, dtype=np.float64) # or np.int32 frame_conv = np.zeros(shape=(row - 2 * pad, col - 2 * pad), dtype=np.uint8) # or np.float64 frame_conv_stride = frame_conv[::y_step, ::x_step] - - return frame_pad, frame_int, inner_sum, in_p00, in_p11, in_p01, in_p10, y_ro_m, x_ro_m, y_ro_p, x_ro_p, outer_sum, out_p_temp, out_p00, out_p11, out_p01, out_p10, response_list, frame_conv, frame_conv_stride + + return ( + frame_pad, + frame_int, + inner_sum, + in_p00, + in_p11, + in_p01, + in_p10, + y_ro_m, + x_ro_m, + y_ro_p, + x_ro_p, + outer_sum, + out_p_temp, + out_p00, + out_p11, + out_p01, + out_p10, + response_list, + frame_conv, + frame_conv_stride, + ) -def conv_int(frame_int, kernel, inner_sum, in_p00, in_p11, in_p01, in_p10, y_ro_m, x_ro_m, y_ro_p, x_ro_p, outer_sum, out_p_temp, - out_p00, out_p11, out_p01, out_p10, response_list, frame_conv_stride): +def conv_int( + frame_int, + kernel, + inner_sum, + in_p00, + in_p11, + in_p01, + in_p10, + y_ro_m, + x_ro_m, + y_ro_p, + x_ro_p, + outer_sum, + out_p_temp, + out_p00, + out_p11, + out_p01, + out_p10, + response_list, + frame_conv_stride, +): # inner_sum[:, :] = in_p00 + in_p11 - in_p01 - in_p10 cv2.add(in_p00, in_p11, dst=inner_sum) cv2.subtract(inner_sum, in_p01, dst=inner_sum) cv2.subtract(inner_sum, in_p10, dst=inner_sum) - + # p00 calc frame_int.take(y_ro_m, axis=0, mode="clip", out=out_p_temp) out_p_temp.take(x_ro_m, axis=1, mode="clip", out=out_p00) @@ -234,7 +274,7 @@ def conv_int(frame_int, kernel, inner_sum, in_p00, in_p11, in_p01, in_p10, y_ro_ out_p_temp.take(x_ro_p, axis=1, mode="clip", out=out_p11) # p10 calc out_p_temp.take(x_ro_m, axis=1, mode="clip", out=out_p10) - + # outer_sum[:, :] = out_p00 + out_p11 - out_p01 - out_p10 - inner_sum cv2.add(out_p00, out_p11, dst=outer_sum) cv2.subtract(outer_sum, out_p01, dst=outer_sum) @@ -242,23 +282,25 @@ def conv_int(frame_int, kernel, inner_sum, in_p00, in_p11, in_p01, in_p10, y_ro_ cv2.subtract(outer_sum, inner_sum, dst=outer_sum) # cv2.transform(np.asarray([p00, p11, -p01, -p10, -inner_sum]).transpose((1, 2, 0)), np.ones((1, 5)), # dst=outer_sum) # https://answers.opencv.org/question/3120/how-to-sum-a-3-channel-matrix-to-a-one-channel-matrix/ - + # np.multiply(kernel.val_in, inner_sum, dtype=np.float64, out=response_list) # response_list += kernel.val_out * outer_sum - cv2.addWeighted(inner_sum, - kernel.val_in, - outer_sum, # or p00 + p11 - p01 - p10 - inner_sum - kernel.val_out, - 0.0, - dtype=cv2.CV_64F, # or cv2.CV_32S - dst=response_list) - + cv2.addWeighted( + inner_sum, + kernel.val_in, + outer_sum, # or p00 + p11 - p01 - p10 - inner_sum + kernel.val_out, + 0.0, + dtype=cv2.CV_64F, # or cv2.CV_32S + dst=response_list, + ) + min_response, _, min_loc, _ = cv2.minMaxLoc(response_list) - + frame_conv_stride[:, :] = response_list # or # frame_conv_stride[:, :] = response_list.astype(np.uint8) - + return min_response, min_loc @@ -272,14 +314,14 @@ class AutoRadiusCalc(object): self.response_list = [] self.radius_cand_list = [] self.adj_comp_flag = False - + self.radius_middle_index = None - + self.left_item = None self.right_item = None self.left_index = None self.right_index = None - + def get_radius(self): prev_res_len = len(self.response_list) # adjustment of radius @@ -299,7 +341,9 @@ class AutoRadiusCalc(object): else: self.left_item = self.response_list[0] self.right_item = self.response_list[2] - self.radius_cand_list = [i for i in range(self.left_item[0], self.right_item[0] + auto_radius_step, auto_radius_step)] + self.radius_cand_list = [ + i for i in range(self.left_item[0], self.right_item[0] + auto_radius_step, auto_radius_step) + ] self.left_index = 0 self.right_index = len(self.radius_cand_list) - 1 self.radius_middle_index = (self.left_index + self.right_index) // 2 @@ -321,13 +365,13 @@ class AutoRadiusCalc(object): return self.radius_cand_list[self.radius_middle_index] self.adj_comp_flag = True return self.radius_cand_list[self.radius_middle_index] - + def get_radius_base(self): """ Use it when the new version doesn't work well. :return: """ - + prev_res_len = len(self.response_list) # adjustment of radius if prev_res_len == 1: @@ -364,7 +408,7 @@ class AutoRadiusCalc(object): else: self.adj_comp_flag = False return self.radius_cand_list.pop() - + def add_response(self, radius, response): self.response_list.append((radius, response)) return None @@ -376,7 +420,7 @@ class BlinkDetector(object): self.response_max = None self.enable_detect_flg = False self.quartile_1 = None - + def calc_thresh(self): # Calculate response_max by computing interquartile range, IQR # self.response_listo = np.array(self.response_listo) @@ -385,28 +429,28 @@ class BlinkDetector(object): # quartile_1, quartile_3 = np.percentile(self.response_listo, [25, 75]) # iqr = quartile_3 - quartile_1 # self.response_maxo = quartile_3 + (iqr * 1.5) - + # quartile_1, quartile_3 = np.percentile(self.response_list, [25, 75]) # or quartile_1, quartile_3 = np.percentile(np.array(self.response_list), [25, 75]) self.quartile_1 = quartile_1 iqr = quartile_3 - quartile_1 # response_min = quartile_1 - (iqr * 1.5) - + self.response_max = float(quartile_3 + (iqr * 1.5)) # or # self.response_max = quartile_3 + (iqr * 1.5) - + self.enable_detect_flg = True return None - + def detect(self, now_response): return now_response > self.response_max - + def add_response(self, response): self.response_list.append(response) return None - + def response_len(self): return len(self.response_list) @@ -446,64 +490,117 @@ def get_ransac_empty_array_old(iter_num, sample_num, len_data): dm_mul = datamod[:, 4] # = data[:, 0] * data[:, 1] dm_neg = datamod[:, 6] # = -datamod[:, 2] inv_ext = np.linalg.linalg.get_linalg_error_extobj(np.linalg.linalg._raise_linalgerror_singular) - return dm_rng, dm_rng_swap, dm_rng_swap_trans, dm_rng_5x5, dm_rng_p5smp, dm_rng_p, dm_rng_p_npaxis, ellipse_y_arr, swap_index, dm_brod, dm_rng_six, dm_rng_p_24, dm_rng_p_10, el_y_arr_2, el_y_arr_3, datamod, datamod_b, dm_data, dm_p2, dm_mul, dm_neg, rdm_index_init_arr, rdm_index, rdm_index_smpnum, ellipse_data_arr, th_abs, inv_ext + return ( + dm_rng, + dm_rng_swap, + dm_rng_swap_trans, + dm_rng_5x5, + dm_rng_p5smp, + dm_rng_p, + dm_rng_p_npaxis, + ellipse_y_arr, + swap_index, + dm_brod, + dm_rng_six, + dm_rng_p_24, + dm_rng_p_10, + el_y_arr_2, + el_y_arr_3, + datamod, + datamod_b, + dm_data, + dm_p2, + dm_mul, + dm_neg, + rdm_index_init_arr, + rdm_index, + rdm_index_smpnum, + ellipse_data_arr, + th_abs, + inv_ext, + ) # @profile def fit_rotated_ellipse_ransac_old(data: np.ndarray, sfc: np.random.Generator, iter_num=100, sample_num=10, offset=80): # before changing these values, please read up on the ransac algorithm # However if you want to change any value just know that higher iterations will make processing frames slower - + # The array contents do not change during the loop, so only one call is needed. # They say len is faster than shape. # Reference url: https://stackoverflow.com/questions/35547853/what-is-faster-python3s-len-or-numpys-shape len_data = len(data) - + if len_data < sample_num: return None - - dm_rng, dm_rng_swap, dm_rng_swap_trans, dm_rng_5x5, dm_rng_p5smp, dm_rng_p, dm_rng_p_npaxis, ellipse_y_arr, swap_index, dm_brod, dm_rng_six, dm_rng_p_24, dm_rng_p_10, el_y_arr_2, el_y_arr_3, datamod, datamod_b, dm_data, dm_p2, dm_mul, dm_neg, rdm_index_init_arr, rdm_index, rdm_index_smpnum, ellipse_data_arr, th_abs, inv_ext = get_ransac_empty_array_old( - iter_num, sample_num, len_data) - + + ( + dm_rng, + dm_rng_swap, + dm_rng_swap_trans, + dm_rng_5x5, + dm_rng_p5smp, + dm_rng_p, + dm_rng_p_npaxis, + ellipse_y_arr, + swap_index, + dm_brod, + dm_rng_six, + dm_rng_p_24, + dm_rng_p_10, + el_y_arr_2, + el_y_arr_3, + datamod, + datamod_b, + dm_data, + dm_p2, + dm_mul, + dm_neg, + rdm_index_init_arr, + rdm_index, + rdm_index_smpnum, + ellipse_data_arr, + th_abs, + inv_ext, + ) = get_ransac_empty_array_old(iter_num, sample_num, len_data) + dm_data[:, :] = data # [:] dm_p2[:, :] = data * data dm_mul[:] = data[:, 0] * data[:, 1] dm_neg[:] = -dm_p2[:, 0] # -1 * data[:, 0] ** 2# - + sfc.permuted(rdm_index_init_arr, axis=1, out=rdm_index) - + # np.take replaces a[ind,:] and is 3-4 times faster, https://gist.github.com/rossant/4645217 # a.take() is faster than np.take(a) datamod.take(rdm_index_smpnum, axis=0, mode="clip", out=dm_rng) - + dm_rng_swap[:, :, :] = dm_rng[:, :, swap_index] # or # dm_rng.take(swap_index, axis=2, mode="clip", out=dm_rng_swap) # or # dm_rng_swap = np.take(dm_rng,[4, 3, 0, 1, 5],axis=2) - + np.matmul(dm_rng_swap_trans, dm_rng_swap, out=dm_rng_5x5) # np.linalg.solve(np.matmul(dm_rng_swap_trans, dm_rng_swap), dm_rng_swap_trans) # solve is slow https://github.com/bogovicj/JaneliaMLCourse/issues/1 - _umath_linalg.inv(dm_rng_5x5, signature='d->d', - extobj=inv_ext, out=dm_rng_5x5) + _umath_linalg.inv(dm_rng_5x5, signature="d->d", extobj=inv_ext, out=dm_rng_5x5) np.matmul(dm_rng_5x5, dm_rng_swap_trans, out=dm_rng_p5smp) - + np.matmul(dm_rng_p5smp, dm_rng_six, out=dm_rng_p_npaxis) - + el_y_arr_2[:, :] = dm_rng_p_24 el_y_arr_3[:, :] = dm_rng_p_10 - + cv2.gemm(ellipse_y_arr, datamod_b, 1.0, dm_brod, 1.0, dst=ellipse_data_arr, flags=cv2.GEMM_2_T) - + np.abs(ellipse_data_arr, out=th_abs) cv2.threshold(th_abs, offset, 1.0, cv2.THRESH_BINARY_INV, dst=th_abs) - ellipse_data_index = \ - cv2.minMaxLoc(cv2.reduce(th_abs, 1, cv2.REDUCE_SUM))[3][1] - + ellipse_data_index = cv2.minMaxLoc(cv2.reduce(th_abs, 1, cv2.REDUCE_SUM))[3][1] + # error_num = ellipse_data_arr[ellipse_data_index].sum() error_num = cv2.sumElems(ellipse_data_arr[ellipse_data_index])[0] effective_sample_p_arr = dm_rng_p[ellipse_data_index].tolist() - + return fit_rotated_ellipse_old(error_num, effective_sample_p_arr) @@ -532,7 +629,7 @@ def fit_rotated_ellipse_old(data, P): return None error_sum = data # sum(data) # print("fitting error = %.3f" % (error_sum)) - + return cx, cy, w, h, theta @@ -571,64 +668,117 @@ def get_ransac_empty_array_new(iter_num, sample_num, len_data): dm_mul = datamod[:, 4] # = data[:, 0] * data[:, 1] dm_neg = datamod[:, 6] # = -datamod[:, 2] inv_ext = np.linalg.linalg.get_linalg_error_extobj(np.linalg.linalg._raise_linalgerror_singular) - return dm_rng, dm_rng_swap, dm_rng_swap_trans, dm_rng_5x5, dm_rng_p5smp, dm_rng_p, dm_rng_p_npaxis, ellipse_y_arr, swap_index, dm_brod, dm_rng_six, dm_rng_p_24, dm_rng_p_10, el_y_arr_2, el_y_arr_3, datamod, datamod_b, dm_data, dm_p2, dm_mul, dm_neg, rdm_index_init_arr, rdm_index, rdm_index_smpnum, ellipse_data_arr, th_abs, inv_ext + return ( + dm_rng, + dm_rng_swap, + dm_rng_swap_trans, + dm_rng_5x5, + dm_rng_p5smp, + dm_rng_p, + dm_rng_p_npaxis, + ellipse_y_arr, + swap_index, + dm_brod, + dm_rng_six, + dm_rng_p_24, + dm_rng_p_10, + el_y_arr_2, + el_y_arr_3, + datamod, + datamod_b, + dm_data, + dm_p2, + dm_mul, + dm_neg, + rdm_index_init_arr, + rdm_index, + rdm_index_smpnum, + ellipse_data_arr, + th_abs, + inv_ext, + ) # @profile def fit_rotated_ellipse_ransac_new(data: np.ndarray, sfc: np.random.Generator, iter_num=100, sample_num=10, offset=80): # before changing these values, please read up on the ransac algorithm # However if you want to change any value just know that higher iterations will make processing frames slower - + # The array contents do not change during the loop, so only one call is needed. # They say len is faster than shape. # Reference url: https://stackoverflow.com/questions/35547853/what-is-faster-python3s-len-or-numpys-shape len_data = len(data) - + if len_data < sample_num: return None - - dm_rng, dm_rng_swap, dm_rng_swap_trans, dm_rng_5x5, dm_rng_p5smp, dm_rng_p, dm_rng_p_npaxis, ellipse_y_arr, swap_index, dm_brod, dm_rng_six, dm_rng_p_24, dm_rng_p_10, el_y_arr_2, el_y_arr_3, datamod, datamod_b, dm_data, dm_p2, dm_mul, dm_neg, rdm_index_init_arr, rdm_index, rdm_index_smpnum, ellipse_data_arr, th_abs, inv_ext = get_ransac_empty_array_new( - iter_num, sample_num, len_data) - + + ( + dm_rng, + dm_rng_swap, + dm_rng_swap_trans, + dm_rng_5x5, + dm_rng_p5smp, + dm_rng_p, + dm_rng_p_npaxis, + ellipse_y_arr, + swap_index, + dm_brod, + dm_rng_six, + dm_rng_p_24, + dm_rng_p_10, + el_y_arr_2, + el_y_arr_3, + datamod, + datamod_b, + dm_data, + dm_p2, + dm_mul, + dm_neg, + rdm_index_init_arr, + rdm_index, + rdm_index_smpnum, + ellipse_data_arr, + th_abs, + inv_ext, + ) = get_ransac_empty_array_new(iter_num, sample_num, len_data) + dm_data[:, :] = data # [:] dm_p2[:, :] = data * data dm_mul[:] = data[:, 0] * data[:, 1] dm_neg[:] = -dm_p2[:, 0] # -1 * data[:, 0] ** 2# - + sfc.permuted(rdm_index_init_arr, axis=1, out=rdm_index) - + # np.take replaces a[ind,:] and is 3-4 times faster, https://gist.github.com/rossant/4645217 # a.take() is faster than np.take(a) datamod.take(rdm_index_smpnum, axis=0, mode="clip", out=dm_rng) - + dm_rng_swap[:, :, :] = dm_rng[:, :, swap_index] # or # dm_rng.take(swap_index, axis=2, mode="clip", out=dm_rng_swap) # or # dm_rng_swap = np.take(dm_rng,[4, 3, 0, 1, 5],axis=2) - + np.matmul(dm_rng_swap_trans, dm_rng_swap, out=dm_rng_5x5) # np.linalg.solve(np.matmul(dm_rng_swap_trans, dm_rng_swap), dm_rng_swap_trans) # solve is slow https://github.com/bogovicj/JaneliaMLCourse/issues/1 - _umath_linalg.inv(dm_rng_5x5, signature='d->d', - extobj=inv_ext, out=dm_rng_5x5) + _umath_linalg.inv(dm_rng_5x5, signature="d->d", extobj=inv_ext, out=dm_rng_5x5) np.matmul(dm_rng_5x5, dm_rng_swap_trans, out=dm_rng_p5smp) - + np.matmul(dm_rng_p5smp, dm_rng_six, out=dm_rng_p_npaxis) - + el_y_arr_2[:, :] = dm_rng_p_24 el_y_arr_3[:, :] = dm_rng_p_10 - + cv2.gemm(ellipse_y_arr, datamod_b, 1.0, dm_brod, 1.0, dst=ellipse_data_arr, flags=cv2.GEMM_2_T) - + np.abs(ellipse_data_arr, out=th_abs) cv2.threshold(th_abs, offset, 1.0, cv2.THRESH_BINARY_INV, dst=th_abs) - ellipse_data_index = \ - cv2.minMaxLoc(cv2.reduce(th_abs, 1, cv2.REDUCE_SUM))[3][1] - + ellipse_data_index = cv2.minMaxLoc(cv2.reduce(th_abs, 1, cv2.REDUCE_SUM))[3][1] + # error_num = ellipse_data_arr[ellipse_data_index].sum() error_num = cv2.sumElems(ellipse_data_arr[ellipse_data_index])[0] effective_sample_p_arr = dm_rng_p[ellipse_data_index].tolist() - + return fit_rotated_ellipse_new(error_num, effective_sample_p_arr) @@ -645,7 +795,7 @@ def fit_rotated_ellipse_new(data, P): cxy = b * b - 4 * a * c cx = (2 * c * d - b * e) / cxy cy = (2 * a * e - b * d) / cxy - + cu = a * cx * cx + b * cx * cy + c * cy * cy - P[4] # cu = c * cy * cy + cx * (a * cx + b * cy) - P[4] # here: https://stackoverflow.com/questions/327002/which-is-faster-in-python-x-5-or-math-sqrtx @@ -658,7 +808,7 @@ def fit_rotated_ellipse_new(data, P): return None error_sum = data # sum(data) # print("fitting error = %.3f" % (error_sum)) - + return cx, cy, w, h, theta @@ -674,23 +824,35 @@ def get_center_noclamp(center_xy, radius): lower_x = center_x - radius upper_y = center_y + radius lower_y = center_y - radius - + ransac_upper_x = center_x + max(20, radius) ransac_lower_x = center_x - max(20, radius) ransac_upper_y = center_y + max(20, radius) ransac_lower_y = center_y - max(20, radius) ransac_xy_offset = (ransac_lower_x, ransac_lower_y) - return center_x, center_y, upper_x, lower_x, upper_y, lower_y, ransac_lower_x, ransac_lower_y, ransac_upper_x, ransac_upper_y, ransac_xy_offset + return ( + center_x, + center_y, + upper_x, + lower_x, + upper_y, + lower_y, + ransac_lower_x, + ransac_lower_y, + ransac_upper_x, + ransac_upper_y, + ransac_xy_offset, + ) class HSRAC_cls(object): def __init__(self): # I'd like to take into account things like print, end_time - start_time processing time, etc., but it's too much trouble. - + # For measuring total processing time - + self.main_start_time = timeit.default_timer() - + # self.rng = np.random.default_rng() # if old_mode: # self.cvparam = CvParameters_old(default_radius, default_step) @@ -698,22 +860,22 @@ class HSRAC_cls(object): # # os.environ["OPENBLAS_NUM_THREADS"]="1" # https://github.com/numpy/numpy/issues/22928 # self.cvparam = CvParameters_new(default_radius, default_step) self.cvparam = CvParameters(default_radius, default_step) - + self.cv_modeo = ["first_frame", "radius_adjust", "blink_adjust", "normal"] self.now_modeo = self.cv_modeo[0] - + self.auto_radius_calc = AutoRadiusCalc() self.blink_detector = BlinkDetector() self.center_q1 = BlinkDetector() - + self.cap = None - + self.timedict = {"to_gray": [], "int_img": [], "hsf": [], "crop": [], "ransac": [], "total_cv": []} - + # ransac # self.rng = np.random.default_rng() self.sfc = np.random.default_rng(np.random.SFC64()) - + # self.kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) # or # https://stackoverflow.com/questions/31025368/erode-is-too-slow-opencv @@ -725,7 +887,7 @@ class HSRAC_cls(object): # cv2.getGaussianKernel(kernel size, sigma) # Increasing the kernel size improves accuracy but slows down performance. # Increasing sigma improves accuracy a little, but has less effect than kernel size. - + def open_video(self, video_path): # Temporary implementation to run cap = cv2.VideoCapture(video_path) @@ -733,7 +895,7 @@ class HSRAC_cls(object): raise IOError("Error opening video stream or file") self.cap = cap return True - + def read_frame(self): # Temporary implementation to run if not self.cap.isOpened(): @@ -746,44 +908,64 @@ class HSRAC_cls(object): self.current_image_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) return True return False - + # @profile def single_run(self): # Temporary implementation to run if imsave_flg: ori_frame = self.current_image_gray.copy() # debug code - + blink_bd = False if self.now_modeo == self.cv_modeo[1]: # adjustment of radius - + # debug print # if calc_print_enable: # temp_radius = self.auto_radius_calc.get_radius() # print('Now radius:', temp_radius) # self.cvparam.radius = temp_radius - + self.cvparam.radius = self.auto_radius_calc.get_radius() if self.auto_radius_calc.adj_comp_flag: self.now_modeo = self.cv_modeo[2] if not skip_blink_detect else self.cv_modeo[3] - + radius, pad, step, hsf = self.cvparam.get_rpsh() - + # For measuring processing time of image processing cv_start_time = timeit.default_timer() frame = self.current_image_gray gray_frame = frame self.timedict["to_gray"].append(timeit.default_timer() - cv_start_time) - + # Calculate the integral image of the frame int_start_time = timeit.default_timer() - frame_pad, frame_int, inner_sum, in_p00, in_p11, in_p01, in_p10, y_ro_m, x_ro_m, y_ro_p, x_ro_p, outer_sum, out_p_temp, out_p00, out_p11, out_p01, out_p10, response_list, frame_conv, frame_conv_stride = get_frameint_empty_array( - gray_frame.shape, pad, step[0], step[1], hsf.r_in, hsf.r_out) + ( + frame_pad, + frame_int, + inner_sum, + in_p00, + in_p11, + in_p01, + in_p10, + y_ro_m, + x_ro_m, + y_ro_p, + x_ro_p, + outer_sum, + out_p_temp, + out_p00, + out_p11, + out_p01, + out_p10, + response_list, + frame_conv, + frame_conv_stride, + ) = get_frameint_empty_array(gray_frame.shape, pad, step[0], step[1], hsf.r_in, hsf.r_out) cv2.copyMakeBorder(gray_frame, pad, pad, pad, pad, cv2.BORDER_CONSTANT, dst=frame_pad) cv2.integral(frame_pad, sum=frame_int, sdepth=cv2.CV_32S) - + self.timedict["int_img"].append(timeit.default_timer() - int_start_time) - + # Convolve the feature with the integral image conv_int_start_time = timeit.default_timer() # if old_mode: @@ -794,63 +976,101 @@ class HSRAC_cls(object): # response, hsf_min_loc = conv_int_new(frame_int, hsf, inner_sum, in_p00, in_p11, in_p01, in_p10, y_ro_m, x_ro_m, y_ro_p, x_ro_p, # outer_sum, out_p_temp, out_p00, out_p11, out_p01, out_p10, response_list, # frame_conv_stride) - response, hsf_min_loc = conv_int(frame_int, hsf, inner_sum, in_p00, in_p11, in_p01, in_p10, y_ro_m, x_ro_m, y_ro_p, x_ro_p, - outer_sum, out_p_temp, out_p00, out_p11, out_p01, out_p10, response_list, - frame_conv_stride) + response, hsf_min_loc = conv_int( + frame_int, + hsf, + inner_sum, + in_p00, + in_p11, + in_p01, + in_p10, + y_ro_m, + x_ro_m, + y_ro_p, + x_ro_p, + outer_sum, + out_p_temp, + out_p00, + out_p11, + out_p01, + out_p10, + response_list, + frame_conv_stride, + ) center_xy = get_hsf_center(pad, step[0], step[1], hsf_min_loc) # visualization of HSF # cv2.normalize(cv2.filter2D(cv2.filter2D(frame_pad, cv2.CV_64F, hsf.get_kernel()[hsf.get_kernel().shape[0]//2,:].reshape(1,-1), borderType=cv2.BORDER_CONSTANT), cv2.CV_64F, hsf.get_kernel()[:,hsf.get_kernel().shape[1]//2].reshape(-1,1), borderType=cv2.BORDER_CONSTANT),None,0,255,cv2.NORM_MINMAX,dtype=cv2.CV_8U)) - + self.timedict["hsf"].append(timeit.default_timer() - conv_int_start_time) - + crop_start_time = timeit.default_timer() # Define the center point and radius - - center_x, center_y, upper_x, lower_x, upper_y, lower_y, ransac_lower_x, ransac_lower_y, ransac_upper_x, ransac_upper_y, ransac_xy_offset = get_center_noclamp( - center_xy, radius) - + + ( + center_x, + center_y, + upper_x, + lower_x, + upper_y, + lower_y, + ransac_lower_x, + ransac_lower_y, + ransac_upper_x, + ransac_upper_y, + ransac_xy_offset, + ) = get_center_noclamp(center_xy, radius) + if self.now_modeo == self.cv_modeo[0] or self.now_modeo == self.cv_modeo[1]: # If mode is first_frame or radius_adjust, record current radius and response self.auto_radius_calc.add_response(radius, response) elif self.now_modeo == self.cv_modeo[2]: # Statistics for blink detection if self.blink_detector.response_len() < blink_init_frames: - self.blink_detector.add_response(cv2.mean(safe_crop(gray_frame, lower_x, lower_y, upper_x, upper_y, 1))[0]) - self.center_q1.add_response( - cv2.mean(safe_crop(gray_frame, center_x - max(20, radius), center_y - max(20, radius), center_x + max(20, radius), - center_y + max(20, radius), keepsize=False))[ - 0 - ] + self.blink_detector.add_response( + cv2.mean(safe_crop(gray_frame, lower_x, lower_y, upper_x, upper_y, 1))[0] ) - + self.center_q1.add_response( + cv2.mean( + safe_crop( + gray_frame, + center_x - max(20, radius), + center_y - max(20, radius), + center_x + max(20, radius), + center_y + max(20, radius), + keepsize=False, + ) + )[0] + ) + else: - + self.blink_detector.calc_thresh() self.center_q1.calc_thresh() self.now_modeo = self.cv_modeo[3] else: if self.blink_detector.enable_detect_flg and self.blink_detector.detect( - cv2.mean(safe_crop(gray_frame, lower_x, lower_y, upper_x, upper_y, 1))[0]): + cv2.mean(safe_crop(gray_frame, lower_x, lower_y, upper_x, upper_y, 1))[0] + ): # If the average value of cropped_image is greater than response_max # (i.e., if the cropimage is whitish blink blink_bd = True - + # if imshow_enable or save_video: # cv2.circle(frame, (orig_x, orig_y), 6, (0, 0, 255), -1) # cv2.circle(ori_frame, (center_x, center_y), 7, (255, 0, 0), -1) # If you want to update response_max. it may be more cost-effective to rewrite response_list in the following way # https://stackoverflow.com/questions/42771110/fastest-way-to-left-cycle-a-numpy-array-like-pop-push-for-a-queue - + # cv_end_time = timeit.default_timer() self.timedict["crop"].append(timeit.default_timer() - crop_start_time) # self.timedict["total_cv"].append(cv_end_time - cv_start_time) - + # if calc_print_enable: # the lower the response the better the likelyhood of there being a pupil. you can adujst the radius and steps accordingly # print('Kernel response:', response) # print('Pixel position:', center_xy) - + # # if imshow_enable: # if self.now_modeo != self.cv_modeo[0] and self.now_modeo != self.cv_modeo[1]: @@ -862,7 +1082,7 @@ class HSRAC_cls(object): # cv2.imshow("frame", frame) # if cv2.waitKey(1) & 0xFF == ord("q"): # pass - + if self.now_modeo == self.cv_modeo[0]: # Moving from first_frame to the next mode if skip_autoradius and skip_blink_detect: @@ -871,10 +1091,10 @@ class HSRAC_cls(object): self.now_modeo = self.cv_modeo[2] else: self.now_modeo = self.cv_modeo[1] - + # For measuring processing time of image processing ransac_start_time = timeit.default_timer() - + # frame_gray = cv2.GaussianBlur(frame, (5, 5), 0) # cv2.GaussianBlur is slow (uses 10% of the time of all this script) # use cv2.blur() @@ -888,24 +1108,21 @@ class HSRAC_cls(object): else: frame_gray = cv2.sepFilter2D(frame, -1, self.gauss_k, self.gauss_k) - # Crop the image using the calculated bounds # todo:safecrop tune frame_gray_crop = safe_crop(frame_gray, ransac_lower_x, ransac_lower_y, ransac_upper_x, ransac_upper_y, 1) th_frame, fic_frame = get_ransac_frame(frame_gray_crop.shape) frame = frame_gray_crop # todo: It can cause bugs. - + # this will need to be adjusted everytime hardware is changed (brightness of IR, Camera postion, etc)m # min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(frame_gray_crop) min_val = cv2.minMaxLoc(frame_gray_crop)[0] # threshold_value = min_val + thresh_add - - if old_mode: cv2.threshold(frame_gray_crop, min_val + thresh_add, 255, cv2.THRESH_BINARY_INV, dst=th_frame) # print(thresh.shape, frame_gray.shape) - + # cv2.morphologyEx(th_frame, cv2.MORPH_OPEN, self.kernel, dst=fic_frame) # cv2.morphologyEx(fic_frame, cv2.MORPH_CLOSE, self.kernel, dst=fic_frame) # cv2.bitwise_not(fic_frame, fic_frame) @@ -914,12 +1131,18 @@ class HSRAC_cls(object): cv2.morphologyEx(th_frame, cv2.MORPH_OPEN, self.kernel, dst=fic_frame) # or cv2.MORPH_CLOSE else: if not blink_bd and self.blink_detector.enable_detect_flg: - cv2.threshold(frame_gray_crop, (min_val + thresh_add + self.center_q1.quartile_1) / 2, 255, cv2.THRESH_BINARY_INV, dst=th_frame) - cv2.morphologyEx(th_frame, cv2.MORPH_OPEN, self.kernel, dst=fic_frame) - # cv2.morphologyEx(fic_frame, cv2.MORPH_CLOSE, self.kernel, dst=fic_frame) - # cv2.erode(fic_frame,self.kernel,dst=fic_frame) - # cv2.bitwise_not(fic_frame, fic_frame) - # cv2.morphologyEx(th_frame, cv2.MORPH_OPEN, self.kernel, dst=fic_frame) # or cv2.MORPH_CLOSE + cv2.threshold( + frame_gray_crop, + (min_val + thresh_add + self.center_q1.quartile_1) / 2, + 255, + cv2.THRESH_BINARY_INV, + dst=th_frame, + ) + cv2.morphologyEx(th_frame, cv2.MORPH_OPEN, self.kernel, dst=fic_frame) + # cv2.morphologyEx(fic_frame, cv2.MORPH_CLOSE, self.kernel, dst=fic_frame) + # cv2.erode(fic_frame,self.kernel,dst=fic_frame) + # cv2.bitwise_not(fic_frame, fic_frame) + # cv2.morphologyEx(th_frame, cv2.MORPH_OPEN, self.kernel, dst=fic_frame) # or cv2.MORPH_CLOSE else: cv2.threshold(frame_gray_crop, min_val + thresh_add, 255, cv2.THRESH_BINARY, dst=th_frame) # print(thresh.shape, frame_gray.shape) @@ -947,7 +1170,7 @@ class HSRAC_cls(object): # contours = (*contours, *cv2.findContours(fic_frame, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[0]) # # or # # contours = (*contours, *cv2.findContours(fic_frame, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)[0]) - + if not contours: # If empty, go to next loop return int(center_x), int(center_y), th_frame, frame, gray_frame @@ -968,30 +1191,30 @@ class HSRAC_cls(object): # go to next loop # pass return int(center_x), int(center_y), th_frame, frame, gray_frame - + # crop_start_time = timeit.default_timer() cx, cy, w, h, theta = ransac_data # print(cx, cy) - # if w >= 2.1 * h: # new blink detection algo lmao this works pretty good actually - # pass - # return center_x, center_y, frame, frame, True - + # if w >= 2.1 * h: # new blink detection algo lmao this works pretty good actually + # pass + # return center_x, center_y, frame, frame, True + # cx = center_x - (csx - cx) # we find the difference between the crop size and ransac point, and subtract from the center point from HSF # cy = center_y - (csy - cy) - + # csy = frame.shape[0] # csx = frame.shape[1] csy = gray_frame.shape[0] csx = gray_frame.shape[1] - + # cx = clamp((cx - 20) + center_x, 0, csx) # cy = clamp((cy - 20) + center_y, 0, csy) cx = int(clamp(cx + ransac_xy_offset[0], 0, csx)) cy = int(clamp(cy + ransac_xy_offset[1], 0, csy)) - + # cv_end_time = timeit.default_timer() if imsave_flg: - + cv2.circle(ori_frame, (int(center_x), int(center_y)), 3, (128, 0, 0), -1) cv2.drawContours(ori_frame, contours, -1, (255, 0, 0), 1) cv2.circle(ori_frame, (int(cx), int(cy)), 2, (255, 0, 0), -1) @@ -1013,11 +1236,11 @@ class HSRAC_cls(object): cv2.imshow("fic", fic_frame) if cv2.waitKey(1) & 0xFF == ord("q"): pass - + cv_end_time = timeit.default_timer() self.timedict["ransac"].append(cv_end_time - ransac_start_time) self.timedict["total_cv"].append(cv_end_time - cv_start_time) - + try: return int(cx), int(cy), th_frame, frame, gray_frame except: @@ -1036,34 +1259,40 @@ if __name__ == "__main__": raise FileNotFoundError(input_video_path) logger.info("video name: {}".format(os.path.basename(input_video_path))) cap = cv2.VideoCapture(input_video_path) - logger.info("video info: size:{}x{} fps:{} frames:{} total:{:.3f} sec".format(int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), - int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), - cap.get(cv2.CAP_PROP_FPS), - int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), - cap.get(cv2.CAP_PROP_FRAME_COUNT) / cap.get( - cv2.CAP_PROP_FPS))) + logger.info( + "video info: size:{}x{} fps:{} frames:{} total:{:.3f} sec".format( + int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), + int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), + cap.get(cv2.CAP_PROP_FPS), + int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), + cap.get(cv2.CAP_PROP_FRAME_COUNT) / cap.get(cv2.CAP_PROP_FPS), + ) + ) if save_img: - all_point_img = np.zeros((int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), 3), dtype=np.uint8) + all_point_img = np.zeros( + (int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), 3), dtype=np.uint8 + ) cap.release() - + if not print_enable: + def print(*args, **kwargs): pass - + hsrac = HSRAC_cls() # For measuring total processing time main_start_time = timeit.default_timer() - + for i in range(loop_num): hsrac.open_video(input_video_path) - + while hsrac.read_frame(): if imsave_flg: base_gray = hsrac.current_image_gray.copy() base_img = hsrac.current_image.copy() hsf_x, hsf_y, hsf_cropbox, *_ = hsrac.single_run() - + # # hsrac_x, hsrac_y, hsrac_cropbox, *_ = er_hsracs.run(base_gray) # if 0:#random.random()<0.1: # hsrac_x, hsrac_y, hsrac_cropbox, *_ = er_hsracs.run(cv2.resize(base_gray,None,fx=0.75,fy=0.75).copy()) @@ -1090,7 +1319,7 @@ if __name__ == "__main__": # video_wr.write(cv2.resize(base_img, (200, 150))) else: _ = hsrac.single_run() - + if save_video: video_wr.release() logger.info("video output: {}".format(output_video_path)) diff --git a/EyeTrackApp/blob.py b/EyeTrackApp/blob.py index 292a66c..613e049 100644 --- a/EyeTrackApp/blob.py +++ b/EyeTrackApp/blob.py @@ -28,15 +28,6 @@ LICENSE: GNU GPLv3 """ import cv2 -import numpy as np -from enum import IntEnum - - -class EyeId(IntEnum): - RIGHT = 0 - LEFT = 1 - BOTH = 2 - SETTINGS = 3 def BLOB(self): diff --git a/EyeTrackApp/camera.py b/EyeTrackApp/camera.py index f22eca3..6bfa5a7 100644 --- a/EyeTrackApp/camera.py +++ b/EyeTrackApp/camera.py @@ -31,9 +31,8 @@ import serial import serial.tools.list_ports import threading import time -import platform from colorama import Fore -from config import EyeTrackConfig +from config import EyeTrackCameraConfig from enum import Enum import psutil, os import sys @@ -71,16 +70,15 @@ def is_serial_capture_source(addr: str) -> bool: """ Returns True if the capture source address is a serial port. """ - return (addr.startswith("COM") # Windows - or addr.startswith("/dev/cu") # macOS - or addr.startswith("/dev/tty") # Linux + return ( + addr.startswith("COM") or addr.startswith("/dev/cu") or addr.startswith("/dev/tty") # Windows # macOS # Linux ) class Camera: def __init__( self, - config: EyeTrackConfig, + config: EyeTrackCameraConfig, camera_index: int, cancellation_event: "threading.Event", capture_event: "threading.Event", @@ -129,7 +127,9 @@ class Camera: while True: if self.cancellation_event.is_set(): print(f"{Fore.CYAN}[INFO] Exiting Capture thread{Fore.RESET}") - + # openCV won't switch to a new source if provided with one + # so, we have to manually release the camera on exit + self.cv2_camera.release() return should_push = True # If things aren't open, retry until they are. Don't let read requests come in any earlier @@ -257,20 +257,14 @@ class Camera: jpeg = self.get_next_jpeg_frame() if jpeg: # Create jpeg frame from byte string - image = cv2.imdecode( - np.fromstring(jpeg, dtype=np.uint8), cv2.IMREAD_UNCHANGED - ) + image = cv2.imdecode(np.fromstring(jpeg, dtype=np.uint8), cv2.IMREAD_UNCHANGED) if image is None: - print( - f"{Fore.YELLOW}[WARN] Frame drop. Corrupted JPEG.{Fore.RESET}" - ) + print(f"{Fore.YELLOW}[WARN] Frame drop. Corrupted JPEG.{Fore.RESET}") return # Discard the serial buffer. This is due to the fact that it # may build up some outdated frames. A bit of a workaround here tbh. if conn.in_waiting >= 32768: - print( - f"{Fore.CYAN}[INFO] Discarding the serial buffer ({conn.in_waiting} bytes){Fore.RESET}" - ) + print(f"{Fore.CYAN}[INFO] Discarding the serial buffer ({conn.in_waiting} bytes){Fore.RESET}") conn.reset_input_buffer() self.buffer = b"" # Calculate the fps. @@ -314,9 +308,7 @@ class Camera: return try: rate = 115200 if sys.platform == "darwin" else 3000000 # Higher baud rate not working on macOS - conn = serial.Serial( - baudrate=rate, port=port, xonxoff=False, dsrdtr=False, rtscts=False - ) + conn = serial.Serial(baudrate=rate, port=port, xonxoff=False, dsrdtr=False, rtscts=False) # Set explicit buffer size for serial. if sys.platform == "win32": buffer_size = 32768 diff --git a/EyeTrackApp/camera_widget.py b/EyeTrackApp/camera_widget.py index 92bc0d5..35a2ad7 100644 --- a/EyeTrackApp/camera_widget.py +++ b/EyeTrackApp/camera_widget.py @@ -26,18 +26,18 @@ LICENSE: GNU GPLv3 import PySimpleGUI as sg from config import EyeTrackConfig -from config import EyeTrackSettingsConfig from collections import deque from threading import Event, Thread + +from eye import EyeId from eye_processor import EyeProcessor, EyeInfoOrigin -from enum import Enum from queue import Queue, Empty from camera import Camera, CameraState -from osc import EyeId import cv2 + +from osc.OSCMessage import OSCMessageType, OSCMessage from utils.misc_utils import PlaySound, SND_FILENAME, SND_ASYNC, resource_path import numpy as np -import time class CameraWidget: @@ -69,8 +69,6 @@ class CameraWidget: self.main_config = main_config self.eye_id = widget_id self.settings_config = main_config.settings - self.configl = main_config.left_eye - self.configr = main_config.right_eye self.settings = main_config.settings if self.eye_id == EyeId.RIGHT: self.config = main_config.right_eye @@ -156,12 +154,12 @@ class CameraWidget: button_color="#6f4ca1", tooltip="Start eye calibration. Look all arround to all extreams without blinking until sound is heard.", ), - sg.Button( - "3D Calibration", + sg.Button( + "3D Calibration", key=self.gui_restart_3d_calibration, - button_color="#6f4ca1", - tooltip="Start 3d eye calibration, must have steamvr open and eyes in hmd", - ), + button_color="#6f4ca1", + tooltip="Start 3d eye calibration, must have steamvr open and eyes in hmd", + ), sg.Button( "Stop Calibration", key=self.gui_stop_calibration, @@ -276,9 +274,9 @@ class CameraWidget: def start(self): # If we're already running, bail - if not self.cancellation_event.is_set(): return + self.cancellation_event.clear() self.ransac_thread = Thread(target=self.ransac.run) self.ransac_thread.start() @@ -286,7 +284,6 @@ class CameraWidget: self.camera_thread.start() def stop(self): - # If we're not running yet, bail if self.cancellation_event.is_set(): return @@ -296,6 +293,15 @@ class CameraWidget: self.ransac_thread.join() self.camera_thread.join() + def on_config_update(self, data): + keys = set(data.keys()) + model_keys = set(self.config.model_fields.keys()) + # we only want to restart our stuff, if our stuff got updated + # at the model level + if model_keys.intersection(keys): + self.stop() + self.start() + def render(self, window, event, values): if self.image_queue.qsize() > 2: with self.image_queue.mutex: @@ -305,25 +311,14 @@ class CameraWidget: changed = False # If anything has changed in our configuration settings, change/update those. - if event == self.gui_save_tracking_button and values[self.gui_camera_addr] != self.config.capture_source: - print("\033[94m[INFO] New value: {}\033[0m".format(values[self.gui_camera_addr])) - try: - # Try storing ints as ints, for those using wired cameras. - self.config.capture_source = int(values[self.gui_camera_addr]) - except ValueError: - if values[self.gui_camera_addr] == "": - self.config.capture_source = None - else: - if ( - len(values[self.gui_camera_addr]) > 5 - and "http" not in values[self.gui_camera_addr] - and ".mp4" not in values[self.gui_camera_addr] - and not values[self.gui_camera_addr].startswith('/dev') # For MacOS and Linux users - ): # If http is not in camera address, add it. - self.config.capture_source = f"http://{values[self.gui_camera_addr]}/" - else: - self.config.capture_source = values[self.gui_camera_addr] - changed = True + # it's a save *and* restart button, we should just forward the event and let the manager handle it + if event == self.gui_save_tracking_button: + new_camera_address = values[self.gui_camera_addr] + print("\033[94m[INFO] New value: {}\033[0m".format(new_camera_address)) + # we don't want to save yet, we can notify the listeners though + changed = self.main_config.update_eye_model_config( + self.eye_id, {"capture_source": new_camera_address}, should_save=False + ) if self.config.rotation_angle != values[self.gui_rotation_slider]: self.config.rotation_angle = int(values[self.gui_rotation_slider]) @@ -372,6 +367,7 @@ class CameraWidget: self.x1, self.y1 = values[self.gui_roi_selection] + # todo, this is now kinda duplicated with the OSC implementation if event == self.gui_restart_3d_calibration: self.ransac.calibration_3d_frame_counter = -621 self.settings.gui_3d_calibration = True @@ -444,7 +440,6 @@ class CameraWidget: graph.erase() graph.draw_image(data=imgbytes, location=(0, 0)) if None not in (self.x0, self.y0, self.x1, self.y1): - self.figure = graph.draw_rectangle((self.x0, self.y0), (self.x1, self.y1), line_color="#6f4ca1") except Empty: @@ -469,7 +464,6 @@ class CameraWidget: if eye_info.info_type != EyeInfoOrigin.FAILURE: # and not eye_info.blink: graph.update(background_color="white") if not np.isnan(eye_info.x) and not np.isnan(eye_info.y): - graph.draw_circle( (eye_info.x * -100, eye_info.y * -100), eye_info.pupil_dilation * 25, @@ -485,7 +479,6 @@ class CameraWidget: ) if not np.isnan(eye_info.blink): - graph.draw_line( (-100, abs(eye_info.blink) * 2 * 200), (-100, 100), @@ -502,6 +495,26 @@ class CameraWidget: graph.update(background_color="red") # Relay information to OSC if eye_info.info_type != EyeInfoOrigin.FAILURE: - self.osc_queue.put((self.eye_id, eye_info)) + osc_message = OSCMessage( + type=OSCMessageType.EYE_INFO, + data=(self.eye_id, eye_info), + ) + self.osc_queue.put(osc_message) except Empty: pass + + def recenter_eyes(self, osc_message: OSCMessage): + if osc_message.data is not bool: + return # just incase we get anything other than bool + + if osc_message.data: + self.settings.gui_recenter_eyes = True + + def recalibrate_eyes(self, osc_message: OSCMessage): + if osc_message.data is not bool: + return # just incase we get anything other than bool + + if osc_message.data: + self.ransac.ibo.clear_filter() + self.ransac.calibration_frame_counter = self.config.calibration_samples + PlaySound("Audio/start.wav", SND_FILENAME | SND_ASYNC) diff --git a/EyeTrackApp/config.py b/EyeTrackApp/config.py index 0660c7b..f0152f3 100644 --- a/EyeTrackApp/config.py +++ b/EyeTrackApp/config.py @@ -27,11 +27,13 @@ LICENSE: GNU GPLv3 import json import os.path import shutil -from eye import EyeId + +from colorama import Fore from pydantic import BaseModel -from typing import Union, List +from typing import Any, Union, List import os +from eye import EyeId CONFIG_FILE_NAME: str = "eyetrack_settings.json" BACKUP_CONFIG_FILE_NAME: str = "eyetrack_settings.backup" @@ -55,6 +57,68 @@ class EyeTrackCameraConfig(BaseModel): calibration_points_3d: List[List[Union[float, None]]] = [] + def update_capture_source(self, new_camera_address: str): + if not new_camera_address: + self.capture_source = None + return + + if new_camera_address.isnumeric(): + self.capture_source = int(new_camera_address) + return + + # we were passed an IP, probably, lets add HTTP:// to it + if len(new_camera_address) > 5 and not ( + not new_camera_address.startswith(("http", "/dev")) or not new_camera_address.endswith(".mp4") + ): + self.capture_source = f"http://{new_camera_address}" + return + + self.capture_source = new_camera_address + + def update(self, data: dict[str, Any]) -> bool: + """ + Updates the model one field at a time based on the provided data dict. + The dict has to be defined like + ``` + data = { + "model_field": value + } + ``` + + If stale data is provided, + ex. User clicked on save and restart but didn't provide a new field + + we skip it, assuming that it was just a call to restart the tracking, or a miss-click. + + Some fields may require more validation, we take care of that with special methods. + defining a method like + + ``` + def update_custom_field(value: type): + pass + ``` + + will cause it to be picked up by this method and called with the current value. + Return values are ignored. + + """ + for key, value in data.items(): + old_value = getattr(self, key, None) + # no reason to update if it's the same value + if old_value == value: + return False + + if hasattr(self, f"update_{key}"): + update_attr = getattr(self, f"update_{key}") + if callable(update_attr): + update_attr(value) + else: + setattr(self, "key", value) + return True + else: + print(f"\033[93m[WARN] Field {key} does not exist on {self}.\033[0m") + return False + class EyeTrackSettingsConfig(BaseModel): gui_flip_x_axis_left: bool = False @@ -127,6 +191,24 @@ class EyeTrackSettingsConfig(BaseModel): gui_vrc_native: bool = True gui_pupil_dilation: bool = True + gui_VRCFTModulePort: int = 8889 + gui_VRCFTModuleIPAddress: str = "127.0.0.1" + gui_ShouldEmulateEyeWiden: bool = False + gui_ShouldEmulateEyeSquint: bool = False + gui_ShouldEmulateEyebrows: bool = False + gui_WidenThresholdV1_min: float = 0.60 + gui_WidenThresholdV1_max: float = 1 + gui_WidenThresholdV2_min: float = 0.60 + gui_WidenThresholdV2_max: float = 1.05 + gui_SqueezeThresholdV1_min: float = 0.07 + gui_SqueezeThresholdV1_max: float = 0.5 + gui_SqueezeThresholdV2_min: float = 0.07 + gui_SqueezeThresholdV2_max: float = -1 + gui_EyebrowThresholdRising: float = 0.8 + gui_EyebrowThresholdLowering: float = 0.15 + gui_OutputMultiplier: float = 1 + gui_use_module: bool = False + class EyeTrackConfig(BaseModel): version: int = 1 @@ -159,10 +241,62 @@ class EyeTrackConfig(BaseModel): load_config = EyeTrackConfig() return load_config + def validate_camera_address_conflict(self, eye_id, capture_source): + match eye_id: + case EyeId.RIGHT: + if self.left_eye.capture_source == capture_source: + print( + f"{Fore.YELLOW}[WARN] Capture source {capture_source} already in use by the left camera.{Fore.RESET}" + ) + return False + case EyeId.LEFT: + if self.right_eye.capture_source == capture_source: + print( + f"{Fore.YELLOW}[WARN] Capture source {capture_source} already in use by the right camera.{Fore.RESET}" + ) + return False + case _: + return False + return True + + def update_eye_model_config(self, eye_id: EyeId, data: dict, should_save=True, should_notify=True) -> bool: + """ + A more granular method for updating a particular model so that everything that relies on it + will get notified about any changes. Note, it acts a bit like pub-sub, + we don't care what changes got passed, we will notify the listeners with them. + + It's the listeners job to check if they want that update. + """ + + # The app really doesn't like address clashes, so we have to validate it as soon as possible + # otherwise we crash + if "capture_source" in data and not self.validate_camera_address_conflict(eye_id, data["capture_source"]): + return False + + match eye_id: + case EyeId.RIGHT: + changed = self.right_eye.update(data) + case EyeId.LEFT: + changed = self.left_eye.update(data) + case _: + return False + + if should_save: + self.save() + + if should_notify: + self.__notify_listeners(data) + + return changed + def update(self, data, save=False): + """ + More of an internal method for modules to be able to update the config + and have other parts of the system react to changes + """ for field, value in data.items(): setattr(self.settings, field, value) - self.__notify_listeners() + self.__notify_listeners(data) if save: self.save() @@ -186,6 +320,6 @@ class EyeTrackConfig(BaseModel): print(f"[DEBUG] Registering listener {callback}") self.__listeners.append(callback) - def __notify_listeners(self): + def __notify_listeners(self, data: dict): for listener in self.__listeners: - listener() + listener(data) diff --git a/EyeTrackApp/ellipse_based_pupil_dilation.py b/EyeTrackApp/ellipse_based_pupil_dilation.py index d9fd53a..84650a1 100644 --- a/EyeTrackApp/ellipse_based_pupil_dilation.py +++ b/EyeTrackApp/ellipse_based_pupil_dilation.py @@ -31,21 +31,11 @@ import numpy as np import time import os import cv2 -from enums import EyeLR + +from eye import EyeId from one_euro_filter import OneEuroFilter -from utils.img_utils import safe_crop -from enum import IntEnum -import os - os.environ["OMP_NUM_THREADS"] = "1" -class EyeId(IntEnum): - RIGHT = 0 - LEFT = 1 - BOTH = 2 - SETTINGS = 3 - - # Note. # OpenCV on Windows will generate an error if the file path contains non-ASCII characters when using cv2.imread(), cv2.imwrite(), etc. # https://stackoverflow.com/questions/43185605/how-do-i-read-an-image-from-a-path-with-unicode-characters diff --git a/EyeTrackApp/enums.py b/EyeTrackApp/enums.py index 25177da..d0af33e 100644 --- a/EyeTrackApp/enums.py +++ b/EyeTrackApp/enums.py @@ -28,12 +28,22 @@ from __future__ import annotations import types from collections import namedtuple -from typing import Any, ClassVar, Dict, List, Optional, TYPE_CHECKING, Tuple, Type, TypeVar, Iterator, Mapping +from typing import ( + Any, + ClassVar, + Dict, + List, + TYPE_CHECKING, + Tuple, + Type, + TypeVar, + Iterator, + Mapping, +) __all__ = ( - 'Enum', - # 'EyeId', - 'EyeLR', + "Enum", + "EyeLR", ) if TYPE_CHECKING: @@ -43,9 +53,9 @@ if TYPE_CHECKING: def _create_value_cls(name: str, comparable: bool): # All the type ignores here are due to the type checker being unable to recognise # Runtime type creation without exploding. - cls = namedtuple('_EnumValue_' + name, 'name value') - cls.__repr__ = lambda self: f'<{name}.{self.name}: {self.value!r}>' # type: ignore - cls.__str__ = lambda self: f'{name}.{self.name}' # type: ignore + cls = namedtuple("_EnumValue_" + name, "name value") + cls.__repr__ = lambda self: f"<{name}.{self.name}: {self.value!r}>" # type: ignore + cls.__str__ = lambda self: f"{name}.{self.name}" # type: ignore if comparable: cls.__le__ = lambda self, other: isinstance(other, self.__class__) and self.value <= other.value # type: ignore cls.__ge__ = lambda self, other: isinstance(other, self.__class__) and self.value >= other.value # type: ignore @@ -55,7 +65,9 @@ def _create_value_cls(name: str, comparable: bool): def _is_descriptor(obj): - return hasattr(obj, '__get__') or hasattr(obj, '__set__') or hasattr(obj, '__delete__') + return ( + hasattr(obj, "__get__") or hasattr(obj, "__set__") or hasattr(obj, "__delete__") + ) class EnumMeta(type): @@ -65,7 +77,14 @@ class EnumMeta(type): _enum_member_map_: ClassVar[Dict[str, Any]] _enum_value_map_: ClassVar[Dict[Any, Any]] - def __new__(cls, name: str, bases: Tuple[type, ...], attrs: Dict[str, Any], *, comparable: bool = False) -> Self: + def __new__( + cls, + name: str, + bases: Tuple[type, ...], + attrs: Dict[str, Any], + *, + comparable: bool = False, + ): value_mapping = {} member_mapping = {} member_names = [] @@ -73,7 +92,7 @@ class EnumMeta(type): value_cls = _create_value_cls(name, comparable) for key, value in list(attrs.items()): is_descriptor = _is_descriptor(value) - if key[0] == '_' and not is_descriptor: + if key[0] == "_" and not is_descriptor: continue # Special case classmethod to just pass through @@ -95,10 +114,10 @@ class EnumMeta(type): member_mapping[key] = new_value attrs[key] = new_value - attrs['_enum_value_map_'] = value_mapping - attrs['_enum_member_map_'] = member_mapping - attrs['_enum_member_names_'] = member_names - attrs['_enum_value_cls_'] = value_cls + attrs["_enum_value_map_"] = value_mapping + attrs["_enum_member_map_"] = member_mapping + attrs["_enum_member_names_"] = member_names + attrs["_enum_value_cls_"] = value_cls actual_cls = super().__new__(cls, name, bases, attrs) value_cls._actual_enum_cls_ = actual_cls # type: ignore # Runtime attribute isn't understood return actual_cls @@ -107,13 +126,15 @@ class EnumMeta(type): return (cls._enum_member_map_[name] for name in cls._enum_member_names_) def __reversed__(cls) -> Iterator[Any]: - return (cls._enum_member_map_[name] for name in reversed(cls._enum_member_names_)) + return ( + cls._enum_member_map_[name] for name in reversed(cls._enum_member_names_) + ) def __len__(cls) -> int: return len(cls._enum_member_names_) def __repr__(cls) -> str: - return f'' + return f"" @property def __members__(cls) -> Mapping[str, Any]: @@ -129,10 +150,10 @@ class EnumMeta(type): return cls._enum_member_map_[key] def __setattr__(cls, name: str, value: Any) -> None: - raise TypeError('Enums are immutable.') + raise TypeError("Enums are immutable.") def __delattr__(cls, attr: str) -> None: - raise TypeError('Enums are immutable') + raise TypeError("Enums are immutable") def __instancecheck__(self, instance: Any) -> bool: # isinstance(x, Y) @@ -144,8 +165,9 @@ class EnumMeta(type): if TYPE_CHECKING: - from enum import Enum + from enum import Enum, IntEnum else: + class Enum(metaclass=EnumMeta): @classmethod def try_value(cls, value): @@ -154,11 +176,13 @@ else: except (KeyError, TypeError): return value -E = TypeVar('E', bound='Enum') + +E = TypeVar("E", bound="Enum") + def create_unknown_value(cls: Type[E], val: Any) -> E: value_cls = cls._enum_value_cls_ # type: ignore # This is narrowed below - name = f'unknown_{val}' + name = f"unknown_{val}" return value_cls(name=name, value=val) @@ -171,26 +195,12 @@ def try_enum(cls: Type[E], val: Any) -> E: return cls._enum_value_map_[val] # type: ignore # All errors are caught below except (KeyError, TypeError, AttributeError): return create_unknown_value(cls, val) - - + + # The line above is based on the code in the following url # https://github.com/Rapptz/discord.py/blob/f7e97954950ffb0e34238d70813454caa6f1a3ae/discord/enums.py -# class EyeId(Enum): -# # https://docs.python.org/3.9/library/enum.html#functional-api -# # > The reason for defaulting to 1 as the starting number and not 0 is that 0 is False in a boolean sense, but enum members all evaluate to True. -# RIGHT = 1 -# LEFT = 2 -# BOTH = 3 -# SETTINGS = 4 -# -# def __str__(self) -> str: -# return self.name -# -# def __int__(self) -> int: -# return self.value - class EyeLR(Enum): LEFT = 1 RIGHT = 2 @@ -199,4 +209,4 @@ class EyeLR(Enum): return self.name def __int__(self) -> int: - return self.value \ No newline at end of file + return self.value diff --git a/EyeTrackApp/eye.py b/EyeTrackApp/eye.py index 46eda61..c1a516b 100644 --- a/EyeTrackApp/eye.py +++ b/EyeTrackApp/eye.py @@ -34,6 +34,7 @@ class EyeId(IntEnum): BOTH = 2 SETTINGS = 3 ALGOSETTINGS = 4 + VRCFTMODULESETTINGS = 5 class EyeInfoOrigin(Enum): diff --git a/EyeTrackApp/eye_processor.py b/EyeTrackApp/eye_processor.py index 60d5d5a..3e034a6 100644 --- a/EyeTrackApp/eye_processor.py +++ b/EyeTrackApp/eye_processor.py @@ -28,8 +28,6 @@ LICENSE: GNU GPLv3 ------------------------------------------------------------------------------------------------------ """ -from operator import truth -from dataclasses import dataclass import sys import asyncio import os @@ -41,14 +39,7 @@ from config import EyeTrackSettingsConfig from pye3d.camera import CameraModel from pye3d.detector_3d import Detector3D, DetectorMode import queue -import threading -import numpy as np -import cv2 -from enum import Enum -from one_euro_filter import OneEuroFilter -from utils.misc_utils import PlaySound, SND_FILENAME, SND_ASYNC, resource_path -import importlib -from osc import EyeId +from eye import EyeId from osc_calibrate_filter import * from daddy import External_Run_DADDY from leap import External_Run_LEAP @@ -63,7 +54,6 @@ from ellipse_based_pupil_dilation import * from AHSF import * - def run_once(f): def wrapper(*args, **kwargs): if not wrapper.has_run: @@ -184,9 +174,7 @@ class EyeProcessor: min_cutoff = 0.0004 beta = 0.9 noisy_point = np.array([1, 1]) - self.one_euro_filter = OneEuroFilter( - noisy_point, min_cutoff=min_cutoff, beta=beta - ) + self.one_euro_filter = OneEuroFilter(noisy_point, min_cutoff=min_cutoff, beta=beta) def output_images_and_update(self, threshold_image, output_information: EyeInfo): try: @@ -201,11 +189,7 @@ class EyeProcessor: self.previous_image = self.current_image self.previous_rotation = self.config.rotation_angle except: # If this fails it likely means that the images are not the same size for some reason. - print( - "\033[91m[ERROR] Size of frames to display are of unequal sizes.\033[0m" - ) - - # pass + print("\033[91m[ERROR] Size of frames to display are of unequal sizes.\033[0m") def capture_crop_rotate_image(self): # Get our current frame @@ -213,12 +197,8 @@ class EyeProcessor: try: # Get frame from capture source, crop to ROI self.current_image = self.current_image[ - int(self.config.roi_window_y) : int( - self.config.roi_window_y + self.config.roi_window_h - ), - int(self.config.roi_window_x) : int( - self.config.roi_window_x + self.config.roi_window_w - ), + int(self.config.roi_window_y) : int(self.config.roi_window_y + self.config.roi_window_h), + int(self.config.roi_window_x) : int(self.config.roi_window_x + self.config.roi_window_w), ] self.ibo.change_roi(self.config.dict(include=self.roi_include_set)) @@ -287,9 +267,8 @@ class EyeProcessor: self.settings.ibo_filter_samples, self.settings.ibo_average_output_samples, ) - if self.eyeopen < float( - self.settings.ibo_fully_close_eye_threshold - ): # threshold so the eye fully closes + # threshold so the eye fully closes + if self.eyeopen < float(self.settings.ibo_fully_close_eye_threshold): self.eyeopen = 0.0 if self.bd_blink == True: @@ -314,9 +293,7 @@ class EyeProcessor: ) = self.er_leap.run(self.current_image_gray, self.current_image_gray_clean) # print(self.eyeopen) - if ( - len(self.prev_y_list) >= 100 - ): # "lock" eye when close/blink IN TESTING, kinda broke + if len(self.prev_y_list) >= 100: # "lock" eye when close/blink IN TESTING, kinda broke self.prev_y_list.pop(0) self.prev_y_list.append(self.out_y) else: @@ -369,18 +346,12 @@ class EyeProcessor: def LEAPM(self): self.thresh = self.current_image_gray.copy() - ( - self.current_image_gray, - self.rawx, - self.rawy, - self.eyeopen, - ) = self.er_leap.run( + (self.current_image_gray, self.rawx, self.rawy, self.eyeopen,) = self.er_leap.run( self.current_image_gray, self.current_image_gray_clean ) # TODO: make own self var and LEAP toggle self.thresh = self.current_image_gray.copy() - self.out_x, self.out_y, self.avg_velocity = cal.cal_osc( - self, self.rawx, self.rawy, self.angle - ) + # todo: lorow, fix this as well + self.out_x, self.out_y, self.avg_velocity = cal.cal_osc(self, self.rawx, self.rawy, self.angle) self.current_algorithm = EyeInfoOrigin.LEAP # print(self.eyeopen) @@ -391,9 +362,7 @@ class EyeProcessor: self.thresh = self.current_image_gray.copy() self.rawx, self.rawy, self.radius = self.er_daddy.run(self.current_image_gray) # Daddy also uses a one euro filter, so I'll have to use it twice, but I'm not going to think too much about it. - self.out_x, self.out_y, self.avg_velocity = cal.cal_osc( - self, self.rawx, self.rawy, self.angle - ) + self.out_x, self.out_y, self.avg_velocity = cal.cal_osc(self, self.rawx, self.rawy, self.angle) self.current_algorithm = EyeInfoOrigin.DADDY def AHSFRACM(self): @@ -438,9 +407,7 @@ class EyeProcessor: # if self.prev_x is None: # self.prev_x = self.rawx # self.prev_y = self.rawy - self.out_x, self.out_y, self.avg_velocity = cal.cal_osc( - self, self.rawx, self.rawy, self.angle - ) + self.out_x, self.out_y, self.avg_velocity = cal.cal_osc(self, self.rawx, self.rawy, self.angle) self.current_algorithm = EyeInfoOrigin.HSRAC def HSRACM(self): @@ -458,9 +425,7 @@ class EyeProcessor: pass self.hasrac_en = True - self.rawx, self.rawy, self.thresh, self.radius = self.er_hsf.run( - self.current_image_gray - ) + self.rawx, self.rawy, self.thresh, self.radius = self.er_hsf.run(self.current_image_gray) ( self.rawx, self.rawy, @@ -478,9 +443,7 @@ class EyeProcessor: # if self.prev_x is None: # self.prev_x = self.rawx # self.prev_y = self.rawy - self.out_x, self.out_y, self.avg_velocity = cal.cal_osc( - self, self.rawx, self.rawy, self.angle - ) + self.out_x, self.out_y, self.avg_velocity = cal.cal_osc(self, self.rawx, self.rawy, self.angle) self.current_algorithm = EyeInfoOrigin.HSRAC def HSFM(self): @@ -497,12 +460,8 @@ class EyeProcessor: else: pass # todo: add process to initialise er_hsf when resolution changes - self.rawx, self.rawy, self.thresh, self.radius = self.er_hsf.run( - self.current_image_gray - ) - self.out_x, self.out_y, self.avg_velocity = cal.cal_osc( - self, self.rawx, self.rawy, self.angle - ) + self.rawx, self.rawy, self.thresh, self.radius = self.er_hsf.run(self.current_image_gray) + self.out_x, self.out_y, self.avg_velocity = cal.cal_osc(self, self.rawx, self.rawy, self.angle) self.current_algorithm = EyeInfoOrigin.HSF def RANSAC3DM(self): @@ -519,9 +478,7 @@ class EyeProcessor: else: pass self.hasrac_en = False - current_image_gray_copy = ( - self.current_image_gray.copy() - ) # Duplicate before overwriting in RANSAC3D. + current_image_gray_copy = self.current_image_gray.copy() # Duplicate before overwriting in RANSAC3D. ( self.rawx, self.rawy, @@ -533,9 +490,7 @@ class EyeProcessor: ) = RANSAC3D(self, True) if self.settings.gui_RANSACBLINK: self.eyeopen = ranblink - self.out_x, self.out_y, self.avg_velocity = cal.cal_osc( - self, self.rawx, self.rawy, self.angle - ) + self.out_x, self.out_y, self.avg_velocity = cal.cal_osc(self, self.rawx, self.rawy, self.angle) self.current_algorithm = EyeInfoOrigin.RANSAC def AHSFM(self): @@ -559,9 +514,7 @@ class EyeProcessor: self.radius, ) = External_Run_AHSF(self.current_image_gray) self.thresh = self.current_image_gray - self.out_x, self.out_y, self.avg_velocity = cal.cal_osc( - self, self.rawx, self.rawy, self.angle - ) + self.out_x, self.out_y, self.avg_velocity = cal.cal_osc(self, self.rawx, self.rawy, self.angle) self.current_algorithm = EyeInfoOrigin.HSF def BLOBM(self): @@ -668,9 +621,7 @@ class EyeProcessor: if self.settings.gui_HSRAC: if self.er_hsf is None: - if self.eye_id in [EyeId.LEFT]: - self.er_hsf = External_Run_HSF( self.settings.gui_skip_autoradius, self.settings.gui_HSF_radius_left, @@ -727,7 +678,7 @@ class EyeProcessor: f = True while True: # f = True - #print(self.capture_queue_incoming.qsize()) + # print(self.capture_queue_incoming.qsize()) # Check to make sure we haven't been requested to close if self.cancellation_event.is_set(): print("\033[94m[INFO] Exiting Tracking thread\033[0m") @@ -753,9 +704,7 @@ class EyeProcessor: focal_length=self.config.focal_length, resolution=(self.config.roi_window_w, self.config.roi_window_h), ) - self.detector_3d = Detector3D( - camera=self.camera_model, long_term_mode=DetectorMode.blocking - ) + self.detector_3d = Detector3D(camera=self.camera_model, long_term_mode=DetectorMode.blocking) try: if self.capture_queue_incoming.empty(): @@ -767,20 +716,17 @@ class EyeProcessor: self.current_fps, ) = self.capture_queue_incoming.get(block=True, timeout=0.1) except queue.Empty: - #print("No image available") + # print("No image available") continue if not self.capture_crop_rotate_image(): continue - self.current_image_gray = cv2.cvtColor( - self.current_image, cv2.COLOR_BGR2GRAY - ) + self.current_image_gray = cv2.cvtColor(self.current_image, cv2.COLOR_BGR2GRAY) self.current_image_gray_clean = ( self.current_image_gray.copy() ) # copy this frame to have a clean image for blink algo - if self.cancellation_event.is_set(): print("\033[94m[INFO] Exiting Tracking thread\033[0m") return diff --git a/EyeTrackApp/eyetrackapp.py b/EyeTrackApp/eyetrackapp.py index 386fcaa..0bf6d1b 100644 --- a/EyeTrackApp/eyetrackapp.py +++ b/EyeTrackApp/eyetrackapp.py @@ -32,9 +32,11 @@ import threading from camera_widget import CameraWidget from config import EyeTrackConfig from eye import EyeId -from osc import VRChatOSCReceiver, VRChatOSC -from general_settings_widget import SettingsWidget -from algo_settings_widget import AlgoSettingsWidget +from settings.VRCFTModuleSettings import VRCFTSettingsWidget +from settings.general_settings_widget import SettingsWidget +from settings.algo_settings_widget import AlgoSettingsWidget +from osc.osc import OSCManager +from osc.OSCMessage import OSCMessage from utils.misc_utils import is_nt, resource_path if is_nt: @@ -49,11 +51,13 @@ RIGHT_EYE_NAME = "-RIGHTEYEWIDGET-" LEFT_EYE_NAME = "-LEFTEYEWIDGET-" SETTINGS_NAME = "-SETTINGSWIDGET-" ALGO_SETTINGS_NAME = "-ALGOSETTINGSWIDGET-" +VRCFT_MODULE_SETTINGS_NAME = "-VRCFTSETTINGSWIDGET-" LEFT_EYE_RADIO_NAME = "-LEFTEYERADIO-" RIGHT_EYE_RADIO_NAME = "-RIGHTEYERADIO-" BOTH_EYE_RADIO_NAME = "-BOTHEYERADIO-" SETTINGS_RADIO_NAME = "-SETTINGSRADIO-" ALGO_SETTINGS_RADIO_NAME = "-ALGOSETTINGSRADIO-" +VRCFT_MODULE_SETTINGS_RADIO_NAME = "-VRCFTSETTINGSRADIO-" page_url = "https://github.com/RedHawk989/EyeTrackVR/releases/latest" appversion = "EyeTrackApp 0.2.0 BETA 11" @@ -65,15 +69,12 @@ def main(): config.save() cancellation_event = threading.Event() - ROSC = False # Check to see if we can connect to our video source first. If not, bring up camera finding # dialog. try: if config.settings.gui_update_check: - response = requests.get( - "https://api.github.com/repos/EyeTrackVR/EyeTrackVR/releases/latest" - ) + response = requests.get("https://api.github.com/repos/EyeTrackVR/EyeTrackVR/releases/latest") latestversion = response.json()["name"] if ( appversion == latestversion @@ -104,14 +105,7 @@ def main(): except: print("\033[91m[INFO] Could not check for updates. Please try again later.\033[0m") - # Check to see if we have an ROI. If not, bring up ROI finder GUI. - - # Spawn worker threads - osc_queue: queue.Queue[tuple[bool, int, int]] = queue.Queue() - osc = VRChatOSC(cancellation_event, osc_queue, config) - osc_thread = threading.Thread(target=osc.run) - # start worker threads - osc_thread.start() + osc_queue: queue.Queue[OSCMessage] = queue.Queue() eyes = [ CameraWidget(EyeId.RIGHT, config, osc_queue), @@ -121,8 +115,34 @@ def main(): settings = [ SettingsWidget(EyeId.SETTINGS, config), AlgoSettingsWidget(EyeId.ALGOSETTINGS, config), + VRCFTSettingsWidget(EyeId.VRCFTMODULESETTINGS, config, osc_queue), ] + osc_manager = OSCManager( + osc_message_in_queue=osc_queue, + config=config, + ) + config.register_listener_callback(osc_manager.update) + config.register_listener_callback(eyes[0].on_config_update) + config.register_listener_callback(eyes[1].on_config_update) + + osc_manager.register_listeners( + config.settings.gui_osc_recenter_address, + [ + eyes[0].recenter_eyes, + eyes[1].recenter_eyes, + ], + ) + osc_manager.register_listeners( + config.settings.gui_osc_recalibrate_address, + [ + eyes[0].recalibrate_eyes, + eyes[1].recalibrate_eyes, + ], + ) + + osc_manager.start() + layout = [ [ sg.Radio( @@ -160,6 +180,13 @@ def main(): default=(config.eye_display_id == EyeId.ALGOSETTINGS), key=ALGO_SETTINGS_RADIO_NAME, ), + sg.Radio( + "VRCFT Module Settings", + "EYESELECTRADIO", + background_color="#292929", + default=(config.eye_display_id == EyeId.VRCFTMODULESETTINGS), + key=VRCFT_MODULE_SETTINGS_RADIO_NAME, + ), ], [ sg.Column( @@ -190,6 +217,13 @@ def main(): visible=(config.eye_display_id in [EyeId.ALGOSETTINGS]), background_color="#424042", ), + sg.Column( + settings[2].get_layout(), + vertical_alignment="top", + key=VRCFT_MODULE_SETTINGS_NAME, + visible=(config.eye_display_id in [EyeId.VRCFTMODULESETTINGS]), + background_color="#424042", + ), ], ] @@ -201,15 +235,10 @@ def main(): settings[0].start() if config.eye_display_id in [EyeId.ALGOSETTINGS]: settings[1].start() - # self.main_config.eye_display_id + if config.eye_display_id in [EyeId.VRCFTMODULESETTINGS]: + settings[2].start() # the eye's needs to be running before it is passed to the OSC - if config.settings.gui_ROSC: - osc_receiver = VRChatOSCReceiver(cancellation_event, config, eyes) - osc_receiver_thread = threading.Thread(target=osc_receiver.run) - osc_receiver_thread.start() - ROSC = True - # Create the window window = sg.Window( f"{appversion}", @@ -228,15 +257,7 @@ def main(): for eye in eyes: eye.stop() cancellation_event.set() - # shut down worker threads - osc_thread.join() - # TODO: find a way to have this function run on join maybe?? - # threading.Event() wont work because pythonosc spawns its own thread. - # only way i can see to get around this is an ugly while loop that only checks if a threading event is triggered - # and then call the pythonosc shutdown function - if ROSC: - osc_receiver.shutdown() - osc_receiver_thread.join() + osc_manager.shutdown() print("\033[94m[INFO] Exiting EyeTrackApp\033[0m") return @@ -245,9 +266,11 @@ def main(): eyes[1].stop() settings[0].stop() settings[1].stop() + settings[2].stop() window[RIGHT_EYE_NAME].update(visible=True) window[LEFT_EYE_NAME].update(visible=False) window[SETTINGS_NAME].update(visible=False) + window[VRCFT_MODULE_SETTINGS_NAME].update(visible=False) window[ALGO_SETTINGS_NAME].update(visible=False) config.eye_display_id = EyeId.RIGHT config.settings.tracker_single_eye = 2 @@ -256,11 +279,13 @@ def main(): elif values[LEFT_EYE_RADIO_NAME] and config.eye_display_id != EyeId.LEFT: settings[0].stop() settings[1].stop() + settings[2].stop() eyes[0].stop() eyes[1].start() window[RIGHT_EYE_NAME].update(visible=False) window[LEFT_EYE_NAME].update(visible=True) window[SETTINGS_NAME].update(visible=False) + window[VRCFT_MODULE_SETTINGS_NAME].update(visible=False) window[ALGO_SETTINGS_NAME].update(visible=False) config.eye_display_id = EyeId.LEFT config.settings.tracker_single_eye = 1 @@ -269,11 +294,13 @@ def main(): elif values[BOTH_EYE_RADIO_NAME] and config.eye_display_id != EyeId.BOTH: settings[0].stop() settings[1].stop() + settings[2].stop() eyes[1].start() eyes[0].start() window[LEFT_EYE_NAME].update(visible=True) window[RIGHT_EYE_NAME].update(visible=True) window[SETTINGS_NAME].update(visible=False) + window[VRCFT_MODULE_SETTINGS_NAME].update(visible=False) window[ALGO_SETTINGS_NAME].update(visible=False) config.eye_display_id = EyeId.BOTH config.settings.tracker_single_eye = 0 @@ -284,28 +311,43 @@ def main(): eyes[1].stop() settings[1].stop() settings[0].start() + settings[2].stop() window[RIGHT_EYE_NAME].update(visible=False) window[LEFT_EYE_NAME].update(visible=False) window[SETTINGS_NAME].update(visible=True) + window[VRCFT_MODULE_SETTINGS_NAME].update(visible=False) window[ALGO_SETTINGS_NAME].update(visible=False) config.eye_display_id = EyeId.SETTINGS config.save() - elif ( - values[ALGO_SETTINGS_RADIO_NAME] - and config.eye_display_id != EyeId.ALGOSETTINGS - ): + elif values[ALGO_SETTINGS_RADIO_NAME] and config.eye_display_id != EyeId.ALGOSETTINGS: eyes[0].stop() eyes[1].stop() settings[0].stop() settings[1].start() + settings[2].stop() window[RIGHT_EYE_NAME].update(visible=False) window[LEFT_EYE_NAME].update(visible=False) window[SETTINGS_NAME].update(visible=False) + window[VRCFT_MODULE_SETTINGS_NAME].update(visible=False) window[ALGO_SETTINGS_NAME].update(visible=True) config.eye_display_id = EyeId.ALGOSETTINGS config.save() + elif values[VRCFT_MODULE_SETTINGS_RADIO_NAME] and config.eye_display_id != EyeId.VRCFTMODULESETTINGS: + eyes[0].stop() + eyes[1].stop() + settings[0].stop() + settings[1].stop() + settings[2].start() + window[RIGHT_EYE_NAME].update(visible=False) + window[LEFT_EYE_NAME].update(visible=False) + window[SETTINGS_NAME].update(visible=False) + window[VRCFT_MODULE_SETTINGS_NAME].update(visible=True) + window[ALGO_SETTINGS_NAME].update(visible=False) + config.eye_display_id = EyeId.VRCFTMODULESETTINGS + config.save() + else: # Otherwise, render all for eye in eyes: @@ -315,5 +357,6 @@ def main(): if setting.started(): setting.render(window, event, values) + if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/EyeTrackApp/haar_surround_feature.py b/EyeTrackApp/haar_surround_feature.py index ed579b7..cab2821 100644 --- a/EyeTrackApp/haar_surround_feature.py +++ b/EyeTrackApp/haar_surround_feature.py @@ -32,9 +32,7 @@ from functools import lru_cache import cv2 import numpy as np -from utils.misc_utils import clamp from utils.img_utils import safe_crop -from enum import IntEnum import psutil import sys import os @@ -49,16 +47,9 @@ else: process.nice(psutil.BELOW_NORMAL_PRIORITY_CLASS) # Windows process.nice() - -class EyeId(IntEnum): - RIGHT = 0 - LEFT = 1 - BOTH = 2 - SETTINGS = 3 - - # from line_profiler_pycharm import profile + video_path = "ezgif.com-gif-maker.avi" imshow_enable = False calc_print_enable = False @@ -151,10 +142,7 @@ class HaarSurroundFeature: def get_kernel(self): # Defined here, but not yet used? # Create a kernel filled with the value of self.val_out - kernel = ( - np.ones(shape=(2 * self.r_out - 1, 2 * self.r_out - 1), dtype=np.float64) - * self.val_out - ) + kernel = np.ones(shape=(2 * self.r_out - 1, 2 * self.r_out - 1), dtype=np.float64) * self.val_out # Set the values of the inner area of the kernel using array slicing start = self.r_out - self.r_in @@ -173,9 +161,7 @@ def to_gray(frame): @lru_cache(maxsize=lru_maxsize_vvs) def get_frameint_empty_array(frame_shape, pad, x_step, y_step, r_in, r_out): frame_int_dtype = np.intc - frame_pad = np.empty( - (frame_shape[0] + (pad * 2), frame_shape[1] + (pad * 2)), dtype=np.uint8 - ) + frame_pad = np.empty((frame_shape[0] + (pad * 2), frame_shape[1] + (pad * 2)), dtype=np.uint8) row, col = frame_pad.shape @@ -212,9 +198,7 @@ def get_frameint_empty_array(frame_shape, pad, x_step, y_step, r_in, r_out): out_p01 = np.empty(len_syx, dtype=frame_int_dtype) out_p10 = np.empty(len_syx, dtype=frame_int_dtype) response_list = np.empty(len_syx, dtype=np.float64) # or np.int32 - frame_conv = np.zeros( - shape=(row - 2 * pad, col - 2 * pad), dtype=np.uint8 - ) # or np.float64 + frame_conv = np.zeros(shape=(row - 2 * pad, col - 2 * pad), dtype=np.uint8) # or np.float64 frame_conv_stride = frame_conv[::y_step, ::x_step] return ( @@ -361,21 +345,14 @@ class AutoRadiusCalc(object): self.adj_comp_flag = False return self.radius_cand_list[self.radius_middle_index] else: - if ( - self.left_index <= self.right_index - and self.left_index != self.radius_middle_index - ): - if (self.left_item[1] + self.response_list[-1][1]) < ( - self.right_item[1] + self.response_list[-1][1] - ): + if self.left_index <= self.right_index and self.left_index != self.radius_middle_index: + if (self.left_item[1] + self.response_list[-1][1]) < (self.right_item[1] + self.response_list[-1][1]): self.right_item = self.response_list[-1] self.right_index = self.radius_middle_index - 1 self.radius_middle_index = (self.left_index + self.right_index) // 2 self.adj_comp_flag = False return self.radius_cand_list[self.radius_middle_index] - if (self.left_item[1] + self.response_list[-1][1]) > ( - self.right_item[1] + self.response_list[-1][1] - ): + if (self.left_item[1] + self.response_list[-1][1]) > (self.right_item[1] + self.response_list[-1][1]): self.left_item = self.response_list[-1] self.left_index = self.radius_middle_index + 1 self.radius_middle_index = (self.left_index + self.right_index) // 2 @@ -409,21 +386,11 @@ class AutoRadiusCalc(object): self.adj_comp_flag = True return default_radius elif sort_res[0] == auto_radius_range[0]: - self.radius_cand_list = [ - i - for i in range( - auto_radius_range[0], default_radius, auto_radius_step - ) - ][1:] + self.radius_cand_list = [i for i in range(auto_radius_range[0], default_radius, auto_radius_step)][1:] self.adj_comp_flag = False return self.radius_cand_list.pop() else: - self.radius_cand_list = [ - i - for i in range( - default_radius, auto_radius_range[1], auto_radius_step - ) - ][1:] + self.radius_cand_list = [i for i in range(default_radius, auto_radius_range[1], auto_radius_step)][1:] self.adj_comp_flag = False return self.radius_cand_list.pop() else: @@ -497,9 +464,7 @@ class CenterCorrection(object): self.frame_mask = None self.frame_bin = None self.frame_final = None - self.morph_kernel = cv2.getStructuringElement( - cv2.MORPH_RECT, (kernel_size, kernel_size) - ) + self.morph_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_size, kernel_size)) self.morph_kernel2 = np.ones((3, 3)) self.hist_index = np.arange(256) self.hist = np.empty((256, 1)) @@ -536,20 +501,14 @@ class CenterCorrection(object): ) # bottleneck - self.frame_bin = cv2.threshold(gray_frame, frame_thr, 1, cv2.THRESH_BINARY_INV)[ - 1 - ] + self.frame_bin = cv2.threshold(gray_frame, frame_thr, 1, cv2.THRESH_BINARY_INV)[1] cropped_x, cropped_y, cropped_w, cropped_h = cv2.boundingRect(self.frame_bin) self.frame_final = cv2.bitwise_and(self.frame_bin, self.frame_mask) # bottleneck - self.frame_final = cv2.morphologyEx( - self.frame_final, cv2.MORPH_CLOSE, self.morph_kernel - ) - self.frame_final = cv2.morphologyEx( - self.frame_final, cv2.MORPH_OPEN, self.morph_kernel - ) + self.frame_final = cv2.morphologyEx(self.frame_final, cv2.MORPH_CLOSE, self.morph_kernel) + self.frame_final = cv2.morphologyEx(self.frame_final, cv2.MORPH_OPEN, self.morph_kernel) if (cropped_h, cropped_w) == self.frame_shape: # Not detected. @@ -568,9 +527,7 @@ class CenterCorrection(object): else: base_x, base_y = center_x, center_y - contours, _ = cv2.findContours( - self.frame_final, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE - ) + contours, _ = cv2.findContours(self.frame_final, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) contours_box = [cv2.boundingRect(cnt) for cnt in contours] contours_dist = np.array( [ @@ -580,9 +537,7 @@ class CenterCorrection(object): ) if len(contours_box): - cropped_x2, cropped_y2, cropped_w2, cropped_h2 = contours_box[ - contours_dist.argmin() - ] + cropped_x2, cropped_y2, cropped_w2, cropped_h2 = contours_box[contours_dist.argmin()] x = cropped_x2 + cropped_w2 // 2 y = cropped_y2 + cropped_h2 // 2 else: @@ -688,9 +643,7 @@ class HSF_cls(object): self.cvparam.radius = self.auto_radius_calc.get_radius() if self.auto_radius_calc.adj_comp_flag: - self.now_modeo = ( - self.cv_modeo[2] if not skip_blink_detect else self.cv_modeo[3] - ) + self.now_modeo = self.cv_modeo[2] if not skip_blink_detect else self.cv_modeo[3] radius, pad, step, hsf = self.cvparam.get_rpsh() @@ -723,13 +676,9 @@ class HSF_cls(object): response_list, frame_conv, frame_conv_stride, - ) = get_frameint_empty_array( - gray_frame.shape, pad, step[0], step[1], hsf.r_in, hsf.r_out - ) + ) = get_frameint_empty_array(gray_frame.shape, pad, step[0], step[1], hsf.r_in, hsf.r_out) # BORDER_CONSTANT is faster than BORDER_REPLICATE There seems to be almost no negative impact when BORDER_CONSTANT is used. - cv2.copyMakeBorder( - gray_frame, pad, pad, pad, pad, cv2.BORDER_CONSTANT, dst=frame_pad - ) + cv2.copyMakeBorder(gray_frame, pad, pad, pad, pad, cv2.BORDER_CONSTANT, dst=frame_pad) cv2.integral(frame_pad, sum=frame_int, sdepth=cv2.CV_32S) self.timedict["int_img"].append(timeit.default_timer() - int_start_time) @@ -785,18 +734,10 @@ class HSF_cls(object): if self.blink_detector.response_len() < blink_init_frames: self.blink_detector.add_response(cv2.mean(cropped_image)[0]) - upper_x = center_x + max( - 20, radius - ) # self.center_correct.center_q1_radius - lower_x = center_x - max( - 20, radius - ) # self.center_correct.center_q1_radius - upper_y = center_y + max( - 20, radius - ) # self.center_correct.center_q1_radius - lower_y = center_y - max( - 20, radius - ) # self.center_correct.center_q1_radius + upper_x = center_x + max(20, radius) # self.center_correct.center_q1_radius + lower_x = center_x - max(20, radius) # self.center_correct.center_q1_radius + upper_y = center_y + max(20, radius) # self.center_correct.center_q1_radius + lower_y = center_y - max(20, radius) # self.center_correct.center_q1_radius self.center_q1.add_response( cv2.mean( @@ -831,19 +772,13 @@ class HSF_cls(object): else: # pass if not self.center_correct.setup_comp: - self.center_correct.init_array( - gray_frame.shape, self.center_q1.quartile_1, radius - ) + self.center_correct.init_array(gray_frame.shape, self.center_q1.quartile_1, radius) elif self.center_correct.frame_shape != gray_frame.shape: """The resolution should have changed and the statistics should have changed, so essentially the statistics need to be reworked, but implementation will be postponed as viability is the highest priority.""" - self.center_correct.init_array( - gray_frame.shape, self.center_q1.quartile_1, radius - ) + self.center_correct.init_array(gray_frame.shape, self.center_q1.quartile_1, radius) - center_x, center_y = self.center_correct.correction( - gray_frame, center_x, center_y - ) + center_x, center_y = self.center_correct.correction(gray_frame, center_x, center_y) # Define the center point and radius center_xy = (center_x, center_y) upper_x = center_x + radius @@ -851,9 +786,7 @@ class HSF_cls(object): upper_y = center_y + radius lower_y = center_y - radius # Crop the image using the calculated bounds - cropped_image = safe_crop( - gray_frame, lower_x, lower_y, upper_x, upper_y - ) + cropped_image = safe_crop(gray_frame, lower_x, lower_y, upper_x, upper_y) # cropbox = [clamp(val, 0, gray_frame.shape[i]) for i, val in # zip([1, 0, 1, 0], [lower_x, lower_y, upper_x, upper_y])] # debug code @@ -874,10 +807,7 @@ class HSF_cls(object): # print('Pixel position:', center_xy) if imshow_enable: - if ( - self.now_modeo != self.cv_modeo[0] - and self.now_modeo != self.cv_modeo[1] - ): + if self.now_modeo != self.cv_modeo[0] and self.now_modeo != self.cv_modeo[1]: if 0 in cropped_image.shape: # If shape contains 0, it is not detected well. pass diff --git a/EyeTrackApp/intensity_based_openness.py b/EyeTrackApp/intensity_based_openness.py index a12b5a4..5519e7a 100644 --- a/EyeTrackApp/intensity_based_openness.py +++ b/EyeTrackApp/intensity_based_openness.py @@ -30,10 +30,8 @@ import numpy as np import time import os import cv2 -from enums import EyeLR +from eye import EyeId from one_euro_filter import OneEuroFilter -from utils.img_utils import safe_crop -from enum import IntEnum import psutil import sys @@ -48,13 +46,6 @@ else: process.nice() -class EyeId(IntEnum): - RIGHT = 0 - LEFT = 1 - BOTH = 2 - SETTINGS = 3 - - # higher intensity means more closed/ more white/less pupil # Hm I need an acronym for this, any ideas? @@ -392,9 +383,6 @@ class IntensityBasedOpeness: self.prev_val = eyeopen try: - noisy_point = np.array( - [float(eyeopen), float(eyeopen)] - ) # fliter our values with a One Euro Filter point_hat = self.one_euro_filter(noisy_point) eyeopenx = point_hat[0] eyeopeny = point_hat[1] diff --git a/EyeTrackApp/osc.py b/EyeTrackApp/osc.py deleted file mode 100644 index e8f53cf..0000000 --- a/EyeTrackApp/osc.py +++ /dev/null @@ -1,428 +0,0 @@ -""" ------------------------------------------------------------------------------------------------------- - - ,@@@@@@ - @@@@@@@@@@@ @@@ - @@@@@@@@@@@@ @@@@@@@@@@@ - @@@@@@@@@@@@@ @@@@@@@@@@@@@@ - @@@@@@@/ ,@@@@@@@@@@@@@ - /@@@@@@@@@@@@@@@ @@@@@@@@ - @@@@@@@@@@@@@@@@@@@@@@@@ @@@@@ - @@@@@@@@ @@@@@ - ,@@@ @@@@& - @@@@@@. @@@@ - @@@ @@@@@@@@@/ @@@@@ - ,@@@. @@@@@@((@ @@@@( - //@@@ ,, @@@@ @@@@@ - @@@( @@@@@@@ - @@@ @ @@@@@@@@# - @@@@@@@@@@@@@@@@@ - @@@@@@@@@@@@@( - -Copyright (c) 2023 EyeTrackVR <3 -LICENSE: GNU GPLv3 ------------------------------------------------------------------------------------------------------- -""" - -from pythonosc import udp_client -from pythonosc import osc_server -from pythonosc import dispatcher -from config import EyeTrackConfig -from utils.misc_utils import PlaySound, SND_FILENAME, SND_ASYNC -from enum import IntEnum -import queue -import threading -import time - - -class EyeId(IntEnum): - RIGHT = 0 - LEFT = 1 - BOTH = 2 - SETTINGS = 3 - ALGOSETTINGS = 4 - - -def eyelid_transformer(self, eye_blink): - if self.config.osc_invert_eye_close: - return float(1 - eye_blink) - else: - return float(eye_blink) - - -se = False -falloff = False - - -def output_osc(eye_x, eye_y, eye_blink, last_blink, pupil_dilation, avg_velocity, self): - global se, falloff - - - if self.config.gui_osc_vrcft_v1: - - - if self.main_config.eye_display_id in [ - EyeId.RIGHT, - EyeId.LEFT, - ]: # we are in single eye mode - se = True - - self.client.send_message(self.config.osc_left_eye_x_address, eye_x) - self.client.send_message(self.config.osc_right_eye_x_address, eye_x) - self.client.send_message(self.config.osc_eyes_y_address, eye_y) - self.client.send_message( - self.config.osc_right_eye_close_address, - eyelid_transformer(self, eye_blink), - ) - self.client.send_message( - self.config.osc_left_eye_close_address, - eyelid_transformer(self, eye_blink), - ) - else: - se = False - - if self.eye_id in [EyeId.LEFT] and not se: # left eye, send data to left - self.l_eye_x = eye_x - self.l_eye_blink = eye_blink - self.l_eye_velocity = avg_velocity - - if self.l_eye_blink == 0.0: - if last_blink > 0.15: # when binary blink is on, blinks may be too fast for OSC so we repeat them. - for i in range(4): - self.client.send_message( - self.config.osc_left_eye_close_address, - eyelid_transformer(self, self.l_eye_blink), - ) - last_blink = time.time() - last_blink - - self.l_eye_x = self.r_eye_x - - self.client.send_message(self.config.osc_left_eye_x_address, self.l_eye_x) - self.left_y = eye_y - - self.client.send_message( - self.config.osc_left_eye_close_address, - eyelid_transformer(self, self.l_eye_blink), - ) - - elif self.eye_id in [EyeId.RIGHT] and not se: # Right eye, send data to right - self.r_eye_x = eye_x - self.r_eye_blink = eye_blink - self.l_eye_velocity = avg_velocity - - if self.r_eye_blink == 0.0: - if last_blink > 0.15: # when binary blink is on, blinks may be too fast for OSC so we repeat them. - # print("REPEATING R BLINK") - for i in range(4): - self.client.send_message( - self.config.osc_right_eye_close_address, - eyelid_transformer(self, self.r_eye_blink), - ) - last_blink = time.time() - last_blink - if self.config.gui_outer_side_falloff: - if self.l_eye_blink == 0.0: # if both eyes closed and DEF is enables, blink - self.client.send_message( - self.config.osc_left_eye_close_address, - eyelid_transformer(self, self.r_eye_blink), - ) - self.client.send_message( - self.config.osc_right_eye_close_address, - eyelid_transformer(self, self.r_eye_blink), - ) - - self.r_eye_x = self.l_eye_x - - self.client.send_message(self.config.osc_right_eye_x_address, eye_x) - self.right_y = eye_y - - self.client.send_message( - self.config.osc_right_eye_close_address, - eyelid_transformer(self, self.r_eye_blink), - ) - - if self.main_config.eye_display_id in [EyeId.BOTH] and self.right_y != 621 and self.left_y != 621: - y = (self.right_y + self.left_y) / 2 - self.client.send_message(self.config.osc_eyes_y_address, y) - - if self.config.gui_osc_vrcft_v2: - - if self.main_config.eye_display_id in [ - EyeId.RIGHT, - EyeId.LEFT, - ]: # we are in single eye mode - se = True - - self.client.send_message("/avatar/parameters/v2/EyeX", eye_x) - self.client.send_message("/avatar/parameters/v2/EyeY", eye_y) - self.client.send_message( - "/avatar/parameters/v2/EyeLid", - eyelid_transformer(self, eye_blink), - ) - else: - se = False - - if self.eye_id in [EyeId.LEFT] and not se: # left eye, send data to left - self.l_eye_x = eye_x - self.l_eye_blink = eye_blink - self.r_eye_velocity = avg_velocity - - if self.l_eye_blink == 0.0: - if last_blink > 0.15: # when binary blink is on, blinks may be too fast for OSC so we repeat them. - for i in range(4): - self.client.send_message( - "/avatar/parameters/v2/EyeLidLeft", - eyelid_transformer(self, self.l_eye_blink), - ) - last_blink = time.time() - last_blink - if self.config.gui_outer_side_falloff: - if self.r_eye_blink == 0.0: # if both eyes closed and DEF is enables, blink - self.client.send_message( - "/avatar/parameters/v2/EyeLidLeft", - eyelid_transformer(self, self.l_eye_blink), - ) - self.client.send_message( - "/avatar/parameters/v2/EyeLidRight", - eyelid_transformer(self, self.l_eye_blink), - ) - self.l_eye_x = self.r_eye_x - - self.client.send_message("/avatar/parameters/v2/EyeLeftX", self.l_eye_x) - self.left_y = eye_y - - if self.left_y != 621: - self.client.send_message("/avatar/parameters/v2/EyeLeftY", self.left_y) - - self.client.send_message( - "/avatar/parameters/v2/EyeLidLeft", - eyelid_transformer(self, self.l_eye_blink), - ) - - elif self.eye_id in [EyeId.RIGHT] and not se: # Right eye, send data to right - self.r_eye_x = eye_x - self.r_eye_blink = eye_blink - self.r_eye_velocity = avg_velocity - - if self.r_eye_blink == 0.0: - if last_blink > 0.15: # when binary blink is on, blinks may be too fast for OSC so we repeat them. - for i in range(4): - self.client.send_message( - "/avatar/parameters/v2/EyeLidRight", - eyelid_transformer(self, self.r_eye_blink), - ) - last_blink = time.time() - last_blink - if self.config.gui_outer_side_falloff: - if self.l_eye_blink == 0.0: # if both eyes closed and DEF is enables, blink - self.client.send_message( - "/avatar/parameters/v2/EyeLidLeft", - eyelid_transformer(self, self.r_eye_blink), - ) - self.client.send_message( - "/avatar/parameters/v2/EyeLidRight", - eyelid_transformer(self, self.r_eye_blink), - ) - - self.r_eye_x = self.l_eye_x - - self.client.send_message("/avatar/parameters/v2/EyeRightX", self.r_eye_x) - self.right_y = eye_y - - if self.right_y != 621: - self.client.send_message("/avatar/parameters/v2/EyeRightY", self.right_y) - - self.client.send_message( - "/avatar/parameters/v2/EyeLidRight", - eyelid_transformer(self, self.r_eye_blink), - ) - - if self.config.gui_vrc_native: # VRC NATIVE - - - if self.main_config.eye_display_id in [ - EyeId.RIGHT, - EyeId.LEFT, - ]: # we are in single eye mode - - se = True - if eye_blink == 0.0: - if last_blink > 0.2: # when binary blink is on, blinks may be too fast for OSC so we repeat them. - for i in range(5): - self.client.send_message("/tracking/eye/EyesClosedAmount", float(1 - eye_blink)) - eye_blink += 0.02 # TODO finish tuning value - last_blink = time.time() - last_blink - - else: - self.client.send_message("/tracking/eye/EyesClosedAmount", float(1 - eye_blink)) - self.client.send_message( - "/tracking/eye/LeftRightVec", - [float(eye_x), float(eye_y), 1.0, float(eye_x), float(eye_y), 1.0], - ) # vrc native ET - - else: - se = False - - if self.eye_id in [EyeId.LEFT] and not se: # left eye, send data to left - - self.l_eye_x = eye_x - self.l_eye_blink = eye_blink - self.left_y = eye_y - self.l_eye_velocity = avg_velocity - self.client.send_message( - self.config.osc_left_eye_close_address, - eyelid_transformer(self, eye_blink), - ) - - if self.l_eye_blink == 0.0: - if last_blink > 0.2: # when binary blink is on, blinks may be too fast for OSC so we repeat them. - for i in range(5): - self.client.send_message("/tracking/eye/EyesClosedAmount", float(1 - eye_blink)) - last_blink = time.time() - last_blink - if self.config.gui_outer_side_falloff: - if self.r_eye_blink == 0.0: # if both eyes closed and DEF is enables, blink - self.client.send_message("/tracking/eye/EyesClosedAmount", float(1 - eye_blink)) - self.l_eye_x = self.r_eye_x - - elif self.eye_id in [EyeId.RIGHT] and not se: # Right eye, send data to right - self.r_eye_x = eye_x - self.r_eye_blink = eye_blink - self.right_y = eye_y - self.r_eye_velocity = avg_velocity - self.client.send_message( - self.config.osc_right_eye_close_address, - eyelid_transformer(self, eye_blink), - ) - - if self.r_eye_blink == 0.0: - if last_blink > 0.2: # when binary blink is on, blinks may be too fast for OSC so we repeat them. - for i in range(5): - self.client.send_message("/tracking/eye/EyesClosedAmount", float(1 - eye_blink)) - last_blink = time.time() - last_blink - if self.config.gui_outer_side_falloff: - if self.l_eye_blink == 0.0: # if both eyes closed and DEF is enables, blink - self.client.send_message("/tracking/eye/EyesClosedAmount", float(0)) - - self.r_eye_x = self.l_eye_x - - if self.main_config.eye_display_id in [EyeId.BOTH] and self.r_eye_blink != 621 and self.r_eye_blink != 621: - if self.r_eye_blink == 0.0 or self.l_eye_blink == 0.0: - if last_blink > 0.2: # when binary blink is on, blinks may be too fast for OSC so we repeat them. - for i in range(5): - self.client.send_message("/tracking/eye/EyesClosedAmount", float(1)) - last_blink = time.time() - last_blink - eye_blink = (self.r_eye_blink + self.l_eye_blink) / 2 - self.client.send_message("/tracking/eye/EyesClosedAmount", float(1 - eye_blink)) - - if self.main_config.eye_display_id in [EyeId.BOTH] and self.right_y != 621 and self.left_y != 621: - eye_y = (self.right_y + self.left_y) / 2 - - if not se: - - # vrc native ET (z values may need tweaking, they act like a scalar) - self.client.send_message( - "/tracking/eye/LeftRightVec", - [ - float(self.l_eye_x), - float(self.left_y), - 1.0, - float(self.r_eye_x), - float(self.right_y), - 1.0, - ], - ) - - -class VRChatOSC: - # Use a tuple of blink (true, blinking, false, not), x, y for now. - def __init__( - self, - cancellation_event: threading.Event, - msg_queue: queue.Queue[tuple[bool, int, int]], - main_config: EyeTrackConfig, - ): - self.main_config = main_config - self.config = main_config.settings - self.client = udp_client.SimpleUDPClient( - self.config.gui_osc_address, int(self.config.gui_osc_port) - ) # use OSC port and address that was set in the config - self.cancellation_event = cancellation_event - self.msg_queue = msg_queue - self.eye_id = EyeId.RIGHT - self.left_y = 621 - self.right_y = 621 - self.r_eye_x = 0 - self.l_eye_x = 0 - self.r_eye_blink = 0.7 - self.l_eye_blink = 0.7 - self.l_eye_velocity = 0 - self.r_eye_velocity = 1 - - def run(self): - start = time.time() - last_blink = time.time() - while True: - if self.cancellation_event.is_set(): - print("\033[94m[INFO] Exiting OSC Queue\033[0m") - return - try: - (self.eye_id, eye_info) = self.msg_queue.get(block=True, timeout=0.1) - except: - continue - - output_osc( - eye_info.x, - eye_info.y, - eye_info.blink, - last_blink, - eye_info.pupil_dilation, - eye_info.avg_velocity, - self, - ) - - -class VRChatOSCReceiver: - def __init__(self, cancellation_event: threading.Event, main_config: EyeTrackConfig, eyes: []): - self.config = main_config.settings - self.cancellation_event = cancellation_event - self.dispatcher = dispatcher.Dispatcher() - self.eyes = eyes # we cant import CameraWidget so any type it is - try: - self.server = osc_server.OSCUDPServer( - (self.config.gui_osc_address, int(self.config.gui_osc_receiver_port)), - self.dispatcher, - ) - except: - print(f"\033[91m[ERROR] OSC Receive port: {self.config.gui_osc_receiver_port} occupied.\033[0m") - - def shutdown(self): - print("\033[94m[INFO] Exiting OSC Receiver\033[0m") - try: - self.server.shutdown() - except: - pass - - def recenter_eyes(self, address, osc_value): - if type(osc_value) != bool: - return # just incase we get anything other than bool - if osc_value: - for eye in self.eyes: - eye.settings.gui_recenter_eyes = True - - def recalibrate_eyes(self, address, osc_value): - if type(osc_value) != bool: - return # just incase we get anything other than bool - if osc_value: - for eye in self.eyes: - eye.ransac.ibo.clear_filter() - eye.ransac.calibration_frame_counter = self.config.calibration_samples - PlaySound("Audio/start.wav", SND_FILENAME | SND_ASYNC) - - def run(self): - # bind what function to run when specified OSC message is received - try: - self.dispatcher.map(self.config.gui_osc_recalibrate_address, self.recalibrate_eyes) - self.dispatcher.map(self.config.gui_osc_recenter_address, self.recenter_eyes) - # start the server - print("\033[92m[INFO] VRChatOSCReceiver serving on {}\033[0m".format(self.server.server_address)) - self.server.serve_forever() - - except: - print(f"\033[91m[ERROR] OSC Receive port: {self.config.gui_osc_receiver_port} occupied.\033[0m") diff --git a/EyeTrackApp/osc/OSCMessage.py b/EyeTrackApp/osc/OSCMessage.py new file mode 100644 index 0000000..be4e0e8 --- /dev/null +++ b/EyeTrackApp/osc/OSCMessage.py @@ -0,0 +1,13 @@ +import dataclasses +from enum import IntEnum + + +class OSCMessageType(IntEnum): + EYE_INFO = 1 + VRCFT_MODULE_INFO = 2 + + +@dataclasses.dataclass +class OSCMessage: + type: OSCMessageType + data: any diff --git a/EyeTrackApp/osc/VRCFTModuleMessenger.py b/EyeTrackApp/osc/VRCFTModuleMessenger.py new file mode 100644 index 0000000..46c6340 --- /dev/null +++ b/EyeTrackApp/osc/VRCFTModuleMessenger.py @@ -0,0 +1,17 @@ +from pythonosc.udp_client import SimpleUDPClient +from osc.OSCMessage import OSCMessage + + +class VRCFTModuleSender: + set_command_pattern = "/command/{}/{}/" + + def send(self, osc_message: OSCMessage, client: SimpleUDPClient): + command = osc_message.data.get("command", None) + field_to_send = osc_message.data.get("field", None) + value_to_send = osc_message.data.get("value", None) + + if not command or not all([field_to_send, value_to_send is not None]): + print("[ERROR] Misconfiguration in received OSC message for the VRCFT Module") + return + + client.send_message(self.set_command_pattern.format(command, field_to_send), value_to_send) diff --git a/EyeTrackApp/osc/VRChatOSCSender.py b/EyeTrackApp/osc/VRChatOSCSender.py new file mode 100644 index 0000000..612350c --- /dev/null +++ b/EyeTrackApp/osc/VRChatOSCSender.py @@ -0,0 +1,307 @@ +from pythonosc.udp_client import SimpleUDPClient + +from eye import EyeId +from osc.OSCMessage import OSCMessage + +from config import EyeTrackConfig, EyeTrackSettingsConfig +from enum import IntEnum +import time + + +def _eyelid_transformer(config, eye_blink): + if config.osc_invert_eye_close: + return float(1 - eye_blink) + else: + return float(eye_blink) + + +class OutputType(IntEnum): + V1_PARAMS = 1 + V2_PARAMS = 2 + NATIVE_PARAMS = 3 + + +class VRChatOSCSender: + def __init__(self): + self.is_single_eye = False + self.falloff_enabled = False + self.left_y = 621 + self.right_y = 621 + self.r_eye_x = 0 + self.l_eye_x = 0 + self.r_eye_blink = 0.7 + self.l_eye_blink = 0.7 + self.l_eye_velocity = 0 + self.r_eye_velocity = 1 + self.left_last_blink = time.time() + self.right_last_blink = time.time() + + def output_osc_info( + self, + osc_message: OSCMessage, + client: SimpleUDPClient, + main_config: EyeTrackConfig, + config: EyeTrackSettingsConfig, + ): + eye_id, eye_info = osc_message.data + self.is_single_eye = self.get_is_single_eye(main_config.eye_display_id) + + output_method = None + + if config.gui_vrc_native: + output_method = self.output_native + if config.gui_osc_vrcft_v1: + output_method = self.output_v1_params + if config.gui_osc_vrcft_v2: + output_method = self.output_v2_params + + if output_method: + output_method( + main_config=main_config, + config=config, + client=client, + eye_x=eye_info.x, + eye_y=eye_info.y, + eye_blink=eye_info.blink, + avg_velocity=eye_info.avg_velocity, + eye_id=eye_id, + ) + + @staticmethod + def get_is_single_eye(eye_display_id): + return eye_display_id in [EyeId.RIGHT, EyeId.LEFT] + + def update_eye_state(self, eye_id, eye_x, eye_y, eye_blink, avg_velocity): + if eye_id == EyeId.LEFT: + self.l_eye_x = eye_x + self.l_eye_blink = eye_blink + self.left_y = eye_y + self.l_eye_velocity = avg_velocity + if eye_id == EyeId.RIGHT: + self.r_eye_x = eye_x + self.r_eye_blink = eye_blink + self.right_y = eye_y + self.r_eye_velocity = avg_velocity + + def output_native(self, main_config, config, client, eye_x, eye_y, eye_blink, avg_velocity, eye_id): + default_eye_blink_params = { + "eye_id": eye_id, + "client": client, + "config": config, + } + + self.update_eye_state( + eye_id=eye_id, + eye_x=eye_x, + eye_y=eye_y, + eye_blink=eye_blink, + avg_velocity=avg_velocity, + ) + + if self.is_single_eye: + self.output_osc_native_blink( + **default_eye_blink_params, + ) + client.send_message( + "/tracking/eye/LeftRightVec", + [float(eye_x), float(eye_y), 1.0, float(eye_x), float(eye_y), 1.0], + ) + + if eye_id in [EyeId.LEFT, EyeId.RIGHT] and not self.is_single_eye: + self.output_osc_native_blink(**default_eye_blink_params, single_eye_mode=False) + + if main_config.eye_display_id == EyeId.BOTH and self.r_eye_blink != 621 and self.r_eye_blink != 621: + self.output_osc_native_blink(**default_eye_blink_params) + + if not self.is_single_eye: + # vrc native ET (z values may need tweaking, they act like a scalar) + client.send_message( + "/tracking/eye/LeftRightVec", + [ + float(self.l_eye_x), + float(self.left_y), + 1.0, + float(self.r_eye_x), + float(self.right_y), + 1.0, + ], + ) + + def output_v1_params(self, main_config, config, client, eye_x, eye_y, eye_blink, avg_velocity, eye_id): + default_eye_blink_params = { + "eye_id": eye_id, + "client": client, + "config": config, + "left_eye_blink_address": config.osc_left_eye_close_address, + "right_eye_blink_address": config.osc_right_eye_close_address, + } + + self.update_eye_state( + eye_id=eye_id, + eye_x=eye_x, + eye_y=eye_y, + eye_blink=eye_blink, + avg_velocity=avg_velocity, + ) + + if self.is_single_eye: + client.send_message(config.osc_left_eye_x_address, eye_x) + client.send_message(config.osc_right_eye_x_address, eye_x) + client.send_message(config.osc_eyes_y_address, eye_y) + self.output_vrcft_blink_data(**default_eye_blink_params) + + if eye_id in [EyeId.LEFT, EyeId.RIGHT] and not self.is_single_eye: + self.output_vrcft_blink_data(**default_eye_blink_params, single_eye_mode=False) + + if eye_id == EyeId.LEFT: + client.send_message(config.osc_left_eye_x_address, self.l_eye_x) + self.left_y = eye_y + client.send_message( + config.osc_left_eye_close_address, + _eyelid_transformer(config, self.l_eye_blink), + ) + + if eye_id == EyeId.RIGHT: + client.send_message(config.osc_right_eye_x_address, self.r_eye_x) + self.right_y = eye_y + + client.send_message( + config.osc_right_eye_close_address, + _eyelid_transformer(config, self.r_eye_blink), + ) + + if main_config.eye_display_id == EyeId.BOTH and self.right_y != 621 and self.left_y != 621: + y = (self.right_y + self.left_y) / 2 + client.send_message(config.osc_eyes_y_address, y) + + def output_v2_params(self, main_config, config, client, eye_x, eye_y, eye_blink, avg_velocity, eye_id): + default_eye_blink_params = { + "eye_id": eye_id, + "client": client, + "config": config, + } + + self.update_eye_state( + eye_id=eye_id, + eye_x=eye_x, + eye_y=eye_y, + eye_blink=eye_blink, + avg_velocity=avg_velocity, + ) + + if self.is_single_eye: + client.send_message("/avatar/parameters/v2/EyeX", eye_x) + client.send_message("/avatar/parameters/v2/EyeY", eye_y) + + self.output_vrcft_blink_data( + **default_eye_blink_params, + left_eye_blink_address="/avatar/parameters/v2/EyeLid", + right_eye_blink_address="/avatar/parameters/v2/EyeLid", + ) + + if eye_id in [EyeId.LEFT, EyeId.RIGHT] and not self.is_single_eye: + self.output_vrcft_blink_data( + **default_eye_blink_params, + left_eye_blink_address="/avatar/parameters/v2/EyeLidLeft", + right_eye_blink_address="/avatar/parameters/v2/EyeLidRight", + single_eye_mode=False, + ) + + if eye_id == EyeId.LEFT: + client.send_message("/avatar/parameters/v2/EyeLeftX", self.l_eye_x) + if self.left_y != 621: + client.send_message("/avatar/parameters/v2/EyeLeftY", eye_y) + + client.send_message( + "/avatar/parameters/v2/EyeLidLeft", + _eyelid_transformer(config, self.l_eye_blink), + ) + + if eye_id == EyeId.RIGHT: + client.send_message("/avatar/parameters/v2/EyeRightX", self.r_eye_x) + if eye_y != 621: + client.send_message("/avatar/parameters/v2/EyeRightY", eye_y) + + client.send_message( + "/avatar/parameters/v2/EyeLidRight", + _eyelid_transformer(config, self.r_eye_blink), + ) + + def output_vrcft_blink_data( + self, + eye_id: EyeId, + client: SimpleUDPClient, + config, + left_eye_blink_address, + right_eye_blink_address, + single_eye_mode=True, + ): + active_eye_blink = self.r_eye_blink if eye_id == EyeId.RIGHT else self.l_eye_blink + falloff_blink = self.r_eye_blink if eye_id == EyeId.LEFT else self.l_eye_blink + blink_address = right_eye_blink_address if eye_id == EyeId.RIGHT else left_eye_blink_address + + side_name = "left" if eye_id == EyeId.RIGHT else "right" + last_side_blink = getattr(self, f"{side_name}_last_blink") + + if single_eye_mode: + # in case of v1 params, we have to send the same data do each eye separately. + # so in case of v2 params, we will be generating one unnecessary call more + client.send_message(left_eye_blink_address, _eyelid_transformer(config, active_eye_blink)) + client.send_message(right_eye_blink_address, _eyelid_transformer(config, active_eye_blink)) + + elif eye_id in [EyeId.RIGHT, EyeId.LEFT] and not single_eye_mode: + if active_eye_blink == 0.0: + if last_side_blink > 0.20: + for _ in range(5): + client.send_message(blink_address, _eyelid_transformer(config, active_eye_blink)) + setattr(self, f"{side_name}_last_blink", time.time() - last_side_blink) + if config.gui_outer_side_falloff: + if falloff_blink == 0.0: + client.send_message(left_eye_blink_address, _eyelid_transformer(config, self.l_eye_blink)) + client.send_message(right_eye_blink_address, _eyelid_transformer(config, self.r_eye_blink)) + client.send_message(blink_address, _eyelid_transformer(config, active_eye_blink)) + + def output_osc_native_blink( + self, + eye_id: EyeId, + client, + config, + single_eye_mode=True, + ): + blink_address = "/tracking/eye/EyesClosedAmount" + active_eye_blink = self.r_eye_blink if eye_id == EyeId.RIGHT else self.l_eye_blink + falloff_blink = self.r_eye_blink if eye_id == EyeId.LEFT else self.l_eye_blink + + side_name = "left" if eye_id == EyeId.RIGHT else "right" + last_side_blink = getattr(self, f"{side_name}_last_blink") + + def send_native_binary_blink(address: str, blink_value): + if last_side_blink > 0.2: + for _ in range(5): + client.send_message(address, float(1 - blink_value)) + setattr(self, f"{side_name}_last_blink", time.time() - last_side_blink) + + if single_eye_mode: + if active_eye_blink == 0.0: + send_native_binary_blink(blink_address, active_eye_blink) + else: + client.send_message(blink_address, float(1 - active_eye_blink)) + + if eye_id in [EyeId.RIGHT, EyeId.LEFT] and not single_eye_mode: + client.send_message( + blink_address, + _eyelid_transformer(config, 1 - active_eye_blink), + ) + + if active_eye_blink == 0.0: + send_native_binary_blink(blink_address, active_eye_blink) + if config.gui_outer_side_falloff: + if falloff_blink == 0.0: + client.send_message(blink_address, float(1 - active_eye_blink)) + + if eye_id == EyeId.BOTH and self.r_eye_blink != 621 and self.r_eye_blink != 621: + if self.r_eye_blink == 0.0 or self.l_eye_blink == 0.0: + send_native_binary_blink(blink_address, active_eye_blink) + # this has a nasty habit of permanent-squint FIXME + averaged_eye_blink = (self.r_eye_blink + self.l_eye_blink) / 2 + client.send_message(blink_address, float(1 - averaged_eye_blink)) diff --git a/EyeTrackApp/osc/__init__.py b/EyeTrackApp/osc/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/EyeTrackApp/osc/osc.py b/EyeTrackApp/osc/osc.py new file mode 100644 index 0000000..177a663 --- /dev/null +++ b/EyeTrackApp/osc/osc.py @@ -0,0 +1,214 @@ +""" +------------------------------------------------------------------------------------------------------ + + ,@@@@@@ + @@@@@@@@@@@ @@@ + @@@@@@@@@@@@ @@@@@@@@@@@ + @@@@@@@@@@@@@ @@@@@@@@@@@@@@ + @@@@@@@/ ,@@@@@@@@@@@@@ + /@@@@@@@@@@@@@@@ @@@@@@@@ + @@@@@@@@@@@@@@@@@@@@@@@@ @@@@@ + @@@@@@@@ @@@@@ + ,@@@ @@@@& + @@@@@@. @@@@ + @@@ @@@@@@@@@/ @@@@@ + ,@@@. @@@@@@((@ @@@@( + //@@@ ,, @@@@ @@@@@ + @@@( @@@@@@@ + @@@ @ @@@@@@@@# + @@@@@@@@@@@@@@@@@ + @@@@@@@@@@@@@( + +Copyright (c) 2023 EyeTrackVR <3 +LICENSE: GNU GPLv3 +------------------------------------------------------------------------------------------------------ +""" + + +from time import sleep +from typing import Dict, Optional, Iterable, Callable + +from pythonosc import udp_client +from pythonosc import osc_server +from pythonosc import dispatcher + +from config import EyeTrackConfig +from osc.OSCMessage import OSCMessage, OSCMessageType +from osc.VRCFTModuleMessenger import VRCFTModuleSender +from osc.VRChatOSCSender import VRChatOSCSender +import queue +import threading + + +class OSCManager: + def __init__( + self, + osc_message_in_queue: queue.Queue[OSCMessage], + config: EyeTrackConfig, + ): + self.sender_cancellation_event = threading.Event() + self.receiver_cancellation_event = threading.Event() + self.listeners = {} + self.osc_message_in_queue = osc_message_in_queue + self.config = config + self.settings = config.settings + self.osc_sender: Optional[OSCSender] = None + self.osc_receiver = None + self.osc_sender_thread: Optional[threading.Thread] = None + self.osc_receiver_thread: Optional[threading.Thread] = None + + def start(self): + self.setup_sender() + self.setup_receiver() + + def setup_sender(self): + print(f"\033[92m[INFO] Setting up OSC sender\033[0m") + self.sender_cancellation_event.clear() + self.osc_sender = OSCSender(self.sender_cancellation_event, self.osc_message_in_queue, self.config) + self.osc_sender_thread = threading.Thread(target=self.osc_sender.run) + self.osc_sender_thread.start() + + def setup_receiver(self): + if self.settings.gui_ROSC: + self.receiver_cancellation_event.clear() + print(f"\033[92m[INFO] Setting up OSC receiver\033[0m") + self.osc_receiver = OSCReceiver(self.receiver_cancellation_event, self.config, self.listeners) + self.osc_receiver_thread = threading.Thread(target=self.osc_receiver.run) + self.osc_receiver_thread.start() + + def register_listeners(self, osc_address: str, callbacks: Iterable[Callable]): + if not self.listeners.get(osc_address): + self.listeners[osc_address] = [] + + self.listeners[osc_address].extend(callbacks) + + def update(self, data: dict): + keys = set(data.keys()) + sender_trigger_keys = { + "gui_osc_port", + "gui_VRCFTModulePort", + "gui_VRCFTModuleIPAddress", + "gui_use_module", + } + if sender_trigger_keys.intersection(keys): + self.stop_sender() + self.setup_sender() + + receiver_trigger_keys = { + "gui_ROSC", + "gui_osc_receiver_port", + } + if receiver_trigger_keys.intersection(keys): + self.stop_receiver() + self.setup_receiver() + + def shutdown(self): + self.stop_sender() + self.stop_receiver() + + def stop_sender(self): + self.sender_cancellation_event.set() + self.osc_sender_thread.join() + + def stop_receiver(self): + if self.osc_receiver_thread: + self.receiver_cancellation_event.set() + self.osc_receiver_thread.join() + + +class OSCSender: + def __init__( + self, + cancellation_event: threading.Event, + msg_queue: queue.Queue[OSCMessage], + main_config: EyeTrackConfig, + ): + self.cancellation_event = cancellation_event + self.msg_queue = msg_queue + self.main_config = main_config + self.config = main_config.settings + self.vrc_sender = VRChatOSCSender() + self.module_sender = VRCFTModuleSender() + + self.vrc_client = None + self.vrcft_client = None + + def run(self): + self.vrc_client = udp_client.SimpleUDPClient(self.config.gui_osc_address, int(self.config.gui_osc_port)) + self.vrcft_client = udp_client.SimpleUDPClient( + self.config.gui_VRCFTModuleIPAddress, + int(self.config.gui_VRCFTModulePort), + ) + + vrc_osc_output_client = self.vrc_client + if self.config.gui_use_module: + vrc_osc_output_client = self.vrcft_client + + while not self.cancellation_event.is_set(): + try: + osc_message: OSCMessage = self.msg_queue.get(block=True, timeout=0.1) + match osc_message.type: + case OSCMessageType.EYE_INFO: + self.vrc_sender.output_osc_info( + osc_message=osc_message, + client=vrc_osc_output_client, + main_config=self.main_config, + config=self.config, + ) + case OSCMessageType.VRCFT_MODULE_INFO: + self.module_sender.send(osc_message=osc_message, client=self.vrcft_client) + case _: + raise Exception("Encountered message without a handler %s", osc_message.type) + except queue.Empty: + continue + + +class OSCReceiver: + def __init__( + self, + cancellation_event: threading.Event, + main_config: EyeTrackConfig, + listeners: Dict[str, Callable[[OSCMessage], None]], + ): + self.config = main_config.settings + self.cancellation_event = cancellation_event + self.dispatcher = dispatcher.Dispatcher() + self.listeners = listeners + self.server_thread = None + try: + # this thing sucks ass god fucking damn it. + # like, there is no way of shutting it down UNLESS you run it in a thread + # which is kinda dumb, but oh well. + # Also, it doesn't shutdown properly. It's STILL connected to the port + self.server = osc_server.OSCUDPServer( + (self.config.gui_osc_address, int(self.config.gui_osc_receiver_port)), + self.dispatcher, + ) + except Exception: # noqa + print(f"\033[91m[ERROR] OSC Receive port: {self.config.gui_osc_receiver_port} occupied.\033[0m") + + def shutdown(self): + print("\033[94m[INFO] Exiting OSC Receiver\033[0m") + try: + self.server.shutdown() + self.server_thread.join() + except Exception: # noqa + pass + + def handle_osc_message(self, address, value): + for listener in self.listeners.get(address, []): + listener(OSCMessage(type=OSCMessageType.EYE_INFO, data=value)) + + def run(self): + try: + self.dispatcher.set_default_handler(self.handle_osc_message) + print("\033[92m[INFO] OSC Listening on {}\033[0m".format(self.server.server_address)) + self.server_thread = threading.Thread(target=self.server.serve_forever) + self.server_thread.start() + + while not self.cancellation_event.is_set(): + sleep(10) + + self.shutdown() + except Exception: # noqa: + print(f"\033[91m[ERROR] OSC Receive port: {self.config.gui_osc_receiver_port} occupied.\033[0m") diff --git a/EyeTrackApp/ransac.py b/EyeTrackApp/ransac.py index 3eb518a..1c4152c 100644 --- a/EyeTrackApp/ransac.py +++ b/EyeTrackApp/ransac.py @@ -28,7 +28,7 @@ LICENSE: Summer Software Distribution License 1.0 """ import cv2 import numpy as np -from enum import IntEnum +from eye import EyeId from utils.img_utils import safe_crop from utils.misc_utils import clamp import os @@ -46,13 +46,6 @@ else: process.nice() -class EyeId(IntEnum): - RIGHT = 0 - LEFT = 1 - BOTH = 2 - SETTINGS = 3 - - def ellipse_model(data, y, f): """ There is no need to make this process a function, since making the process a function will slow it down a little by calling it. diff --git a/EyeTrackApp/settings/BaseSettings.py b/EyeTrackApp/settings/BaseSettings.py index d2cb1ae..94b039a 100644 --- a/EyeTrackApp/settings/BaseSettings.py +++ b/EyeTrackApp/settings/BaseSettings.py @@ -6,7 +6,7 @@ from colorama import Fore from config import EyeTrackConfig, EyeTrackSettingsConfig from threading import Event -from osc import EyeId # TODO this is bad, fix this +from eye import EyeId class BaseSettingsWidget: @@ -25,9 +25,7 @@ class BaseSettingsWidget: self.main_config = main_config self.config = main_config.settings - self.initialized_modules = self._initialize_modules( - settings_modules=settings_modules, widget_id=widget_id - ) + self.initialized_modules = self._initialize_modules(settings_modules=settings_modules, widget_id=widget_id) self.general_settings_layout = [] for module in self.initialized_modules: @@ -41,8 +39,16 @@ class BaseSettingsWidget: background_color="#424042", ), ], - [sg.Text("", background_color="#424042"), ], - [sg.Button("Reset settings to default", key=self.reset_button_key, button_color="#c40e23")], + [ + sg.Text("", background_color="#424042"), + ], + [ + sg.Button( + "Reset settings to default", + key=self.reset_button_key, + button_color="#c40e23", + ) + ], ] self.cancellation_event = ( diff --git a/EyeTrackApp/settings/VRCFTModuleSettings.py b/EyeTrackApp/settings/VRCFTModuleSettings.py new file mode 100644 index 0000000..62e281b --- /dev/null +++ b/EyeTrackApp/settings/VRCFTModuleSettings.py @@ -0,0 +1,34 @@ +from queue import Queue + +from config import EyeTrackConfig +from eye import EyeId +from osc.OSCMessage import OSCMessage, OSCMessageType +from settings.BaseSettings import BaseSettingsWidget +from settings.modules.VRCFTSettingsModule import VRCFTSettingsModule + + +class VRCFTSettingsWidget(BaseSettingsWidget): + def __init__(self, widget_id: EyeId, main_config: EyeTrackConfig, osc_queue_in: Queue[OSCMessage]): + self.osc_queue = osc_queue_in + settings_modules = [ + VRCFTSettingsModule, + ] + + super().__init__(widget_id, main_config, settings_modules) + + def _update_and_save_config(self, validated_data: dict): + self.main_config.update(validated_data, save=True) + + for field, value in validated_data.items(): + self.osc_queue.put( + OSCMessage( + type=OSCMessageType.VRCFT_MODULE_INFO, + data={ + "command": "set", + "field": field, + "value": value, + }, + ) + ) + + self.is_saving = False diff --git a/EyeTrackApp/algo_settings_widget.py b/EyeTrackApp/settings/algo_settings_widget.py similarity index 96% rename from EyeTrackApp/algo_settings_widget.py rename to EyeTrackApp/settings/algo_settings_widget.py index 40a356f..ccc268c 100644 --- a/EyeTrackApp/algo_settings_widget.py +++ b/EyeTrackApp/settings/algo_settings_widget.py @@ -20,14 +20,12 @@ @@@@@@@@@@@@@( Copyright (c) 2023 EyeTrackVR <3 -LICENSE: GNU GPLv3 +LICENSE: GNU GPLv3 ------------------------------------------------------------------------------------------------------ """ -import PySimpleGUI as sg - from config import EyeTrackConfig -from osc import EyeId +from eye import EyeId from settings.BaseSettings import BaseSettingsWidget from settings.modules.AdvancedTrackingAlgoSettingsModule import ( diff --git a/EyeTrackApp/general_settings_widget.py b/EyeTrackApp/settings/general_settings_widget.py similarity index 98% rename from EyeTrackApp/general_settings_widget.py rename to EyeTrackApp/settings/general_settings_widget.py index 74ff2bc..1031cd6 100644 --- a/EyeTrackApp/general_settings_widget.py +++ b/EyeTrackApp/settings/general_settings_widget.py @@ -25,7 +25,7 @@ LICENSE: GNU GPLv3 """ from config import EyeTrackConfig -from osc import EyeId +from eye import EyeId from settings.BaseSettings import BaseSettingsWidget from settings.modules.GeneralSettingsModule import GeneralSettingsModule @@ -41,4 +41,3 @@ class SettingsWidget(BaseSettingsWidget): OSCSettingsModule, ] super().__init__(widget_id, main_config, settings_modules) - diff --git a/EyeTrackApp/settings/modules/CommonFieldValidators.py b/EyeTrackApp/settings/modules/CommonFieldValidators.py index 075411a..b42dab5 100644 --- a/EyeTrackApp/settings/modules/CommonFieldValidators.py +++ b/EyeTrackApp/settings/modules/CommonFieldValidators.py @@ -1,11 +1,34 @@ -def check_is_float_convertible(v: str): - """Check if value provided can be converted to a float or double. +import ipaddress - PySimpleGUI does not support floats or doubles in UI, so we have to make sure - that what the user typed in is correct + +def check_is_float_convertible(v: str): + """ + Check if value provided can be converted to a float or double. + + PySimpleGUI does not support floats or doubles in UI, so we have to make sure + that what the user typed in is correct """ try: float(v) return v except ValueError: raise ValueError("Please provide a proper number") + + +def try_convert_to_float(v: str): + """ + Check if value provided can be converted to a float and return converted result + Basically what `check_is_float_convertible` does but returns a float + """ + try: + return float(check_is_float_convertible(v)) + except ValueError: + raise + + +def check_is_ip_address(v: str): + try: + ipaddress.IPv4Address(v) + return v + except ValueError: + raise ValueError("Please provide a valid IP Address") diff --git a/EyeTrackApp/settings/modules/OSCSettingsModule.py b/EyeTrackApp/settings/modules/OSCSettingsModule.py index 8cc3722..728bd0b 100644 --- a/EyeTrackApp/settings/modules/OSCSettingsModule.py +++ b/EyeTrackApp/settings/modules/OSCSettingsModule.py @@ -15,6 +15,7 @@ class OSCValidationModel(BaseValidationModel): gui_vrc_native: bool gui_osc_vrcft_v1: bool gui_osc_vrcft_v2: bool + gui_use_module: bool @model_validator(mode="after") def check_osc_vrcft_versions(self): @@ -42,12 +43,22 @@ class OSCSettingsModule(BaseSettingsModule): self.gui_vrc_native = f"-VRCNATIVE{widget_id}-" self.gui_osc_vrcft_v1 = f"-OSCVRCFTV1{widget_id}-" self.gui_osc_vrcft_v2 = f"-OSCVRCFTV2{widget_id}-" + self.gui_use_module = f"-OSCUSEMODULE{widget_id}-" def get_layout(self): return [ [ sg.Text("OSC Settings:", background_color="#242224"), ], + [ + sg.Checkbox( + "Use ETVR VRCFT Module", + default=self.config.gui_use_module, + key=self.gui_use_module, + background_color="#424042", + tooltip="Toggle output to VRCFT Module or just regular OSC port", + ), + ], [ sg.Checkbox( "VRC Native Eyetracking", diff --git a/EyeTrackApp/settings/modules/VRCFTSettingsModule.py b/EyeTrackApp/settings/modules/VRCFTSettingsModule.py new file mode 100644 index 0000000..e74b136 --- /dev/null +++ b/EyeTrackApp/settings/modules/VRCFTSettingsModule.py @@ -0,0 +1,225 @@ +from typing import Iterable + +import PySimpleGUI as sg + +from pydantic import AfterValidator +from typing_extensions import Annotated + +from settings.modules.BaseModule import BaseSettingsModule, BaseValidationModel +from settings.modules.CommonFieldValidators import check_is_ip_address, try_convert_to_float + + +class VRCFTSettingsModuleValidationModel(BaseValidationModel): + gui_VRCFTModulePort: int + gui_VRCFTModuleIPAddress: Annotated[str, AfterValidator(check_is_ip_address)] + gui_ShouldEmulateEyeWiden: bool + gui_ShouldEmulateEyeSquint: bool + gui_ShouldEmulateEyebrows: bool + gui_WidenThresholdV1_min: float + gui_WidenThresholdV1_max: float + gui_WidenThresholdV2_min: float + gui_WidenThresholdV2_max: float + gui_SqueezeThresholdV1_min: float + gui_SqueezeThresholdV1_max: float + gui_SqueezeThresholdV2_min: float + gui_SqueezeThresholdV2_max: float + gui_EyebrowThresholdRising: float + gui_EyebrowThresholdLowering: float + # this is a hack. I don't like it, but that's what I gotta do to make both, Pydantic and PySimpleGUI happy + gui_OutputMultiplier: Annotated[float, AfterValidator(try_convert_to_float)] + + +class VRCFTSettingsModule(BaseSettingsModule): + def __init__(self, config, widget_id, **kwargs): + super().__init__(config=config, widget_id=widget_id, **kwargs) + self.validation_model = VRCFTSettingsModuleValidationModel + self.gui_VRCFTModulePort = f"-VRCFTSETTINGSPORTNUMBER{widget_id}" + self.gui_VRCFTModuleIPAddress = f"-VRCFTSETTINGSIPNUMBER{widget_id}" + self.gui_ShouldEmulateEyeWiden = f"-VRCFTSETTINGSEMULATEWIDEN{widget_id}" + self.gui_ShouldEmulateEyeSquint = f"-VRCFTSETTINGSEMULATEEYEWIDEN{widget_id}" + self.gui_ShouldEmulateEyebrows = f"-VRCFTSETTINGSEMULATEEYEBROWS{widget_id}" + self.gui_WidenThresholdV1_min = f"-VRCFTSETTINGSWIDENTHRESHOLDV1MIN{widget_id}" + self.gui_WidenThresholdV1_max = f"-VRCFTSETTINGSWIDENTHRESHOLDV1MAX{widget_id}" + self.gui_WidenThresholdV2_min = f"-VRCFTSETTINGSWIDENTHRESHOLDV2MIN{widget_id}" + self.gui_WidenThresholdV2_max = f"-VRCFTSETTINGSWIDENTHRESHOLDV2MAX{widget_id}" + self.gui_SqueezeThresholdV1_min = f"-VRCFTSETTINGSSQUEEZETHRESHOLDV1MIN{widget_id}" + self.gui_SqueezeThresholdV1_max = f"-VRCFTSETTINGSSQUEEZETHRESHOLDV1MAX{widget_id}" + self.gui_SqueezeThresholdV2_min = f"-VRCFTSETTINGSSQUEEZETHRESHOLDV2MIN{widget_id}" + self.gui_SqueezeThresholdV2_max = f"-VRCFTSETTINGSSQUEEZETHRESHOLDV2MAX{widget_id}" + self.gui_EyebrowThresholdRising = f"-VRCFTSETTINGSEYEBROWTHRESHOLDRISING{widget_id}" + self.gui_EyebrowThresholdLowering = f"-VRCFTSETTINGSEYEBROWTHRESHOLDLOWERING{widget_id}" + self.gui_OutputMultiplier = f"-VRCFTSETTINGSOUTPUTMULTIPLIER{widget_id}" + + def get_layout(self) -> Iterable: + return [ + [ + sg.Text("Emulation selection:", background_color="#242224"), + ], + [ + sg.Checkbox( + "Emulate Eye Widen", + default=self.config.gui_ShouldEmulateEyeWiden, + key=self.gui_ShouldEmulateEyeWiden, + background_color="#424042", + ), + sg.Checkbox( + "Emulate Eye Squint", + default=self.config.gui_ShouldEmulateEyeSquint, + key=self.gui_ShouldEmulateEyeSquint, + background_color="#424042", + ), + sg.Checkbox( + "Emulate Eyebrows", + default=self.config.gui_ShouldEmulateEyebrows, + key=self.gui_ShouldEmulateEyebrows, + background_color="#424042", + ), + ], + [ + sg.Text("General Module Settings:", background_color="#242224"), + ], + [ + sg.Text("VRCFT Module listening IP", background_color="#242224"), + sg.InputText( + self.config.gui_VRCFTModuleIPAddress, + key=self.gui_VRCFTModuleIPAddress, + size=(0, 10), + tooltip="Ip on which the module should listen.", + ), + sg.Text("port", background_color="#242224"), + sg.InputText( + self.config.gui_VRCFTModulePort, + key=self.gui_VRCFTModulePort, + size=(0, 10), + tooltip="UDP port on which the module should listen.", + ), + ], + [ + sg.Text("VRCFT Module output multiplier", background_color="#242224"), + sg.InputText( + self.config.gui_OutputMultiplier, + key=self.gui_OutputMultiplier, + size=(0, 10), + tooltip="Output multiplier adjusts the output by the given amount", + ), + ], + [ + sg.Text("Eye Widen thresholds:", background_color="#424042"), + ], + [ + sg.Text("V1 Min:", background_color="#424042"), + sg.Slider( + range=(0, 1), + resolution=0.01, + default_value=self.config.gui_WidenThresholdV1_min, + orientation="h", + key=self.gui_WidenThresholdV1_min, + background_color="#424042", + tooltip="Controls the point at which the emulation should start for v1 params, reacts to openness", + ), + sg.Text("V1 Max:", background_color="#424042"), + sg.Slider( + range=(0, 2), + resolution=0.01, + default_value=self.config.gui_WidenThresholdV1_max, + orientation="h", + key=self.gui_WidenThresholdV1_max, + background_color="#424042", + tooltip="Controls the maximum range of widen emulation", + ), + ], + [ + sg.Text("V2 Min:", background_color="#424042"), + sg.Slider( + range=(0, 2), + resolution=0.01, + default_value=self.config.gui_WidenThresholdV2_min, + orientation="h", + key=self.gui_WidenThresholdV2_min, + background_color="#424042", + tooltip="Controls the point at which the emulation should start for v2 params, reacts to openness", + ), + sg.Text("V2 Max:", background_color="#424042"), + sg.Slider( + range=(0, 2), + resolution=0.01, + default_value=self.config.gui_WidenThresholdV2_max, + orientation="h", + key=self.gui_WidenThresholdV2_max, + background_color="#424042", + tooltip="Controls the maximum range of widen emulation", + ), + ], + [ + sg.Text("Eye Squeeze thresholds:", background_color="#424042"), + ], + [ + sg.Text("V1 Min:", background_color="#424042"), + sg.Slider( + range=(0, 1), + resolution=0.01, + default_value=self.config.gui_SqueezeThresholdV1_min, + orientation="h", + key=self.gui_SqueezeThresholdV1_min, + background_color="#424042", + tooltip="Controls the point at which the emulation should start for v1 params, reacts to openness", + ), + sg.Text("V1 Max:", background_color="#424042"), + sg.Slider( + range=(0, 2), + resolution=0.01, + default_value=self.config.gui_SqueezeThresholdV1_max, + orientation="h", + key=self.gui_SqueezeThresholdV1_max, + background_color="#424042", + tooltip="Controls the maximum range of squeeze emulation", + ), + ], + [ + sg.Text("V2 Min:", background_color="#424042"), + sg.Slider( + range=(0, 1), + resolution=0.01, + default_value=self.config.gui_SqueezeThresholdV2_min, + orientation="h", + key=self.gui_SqueezeThresholdV2_min, + background_color="#424042", + tooltip="Controls the point at which the emulation should start for v2 params, reacts to openness", + ), + sg.Text("V2 Max:", background_color="#424042"), + sg.Slider( + range=(-2, 0), + resolution=0.01, + default_value=self.config.gui_SqueezeThresholdV2_max, + orientation="h", + key=self.gui_SqueezeThresholdV2_max, + background_color="#424042", + tooltip="Controls the maximum range of squeeze emulation", + ), + ], + [ + sg.Text("Eyebrow emulation Thresholds:", background_color="#424042"), + ], + [ + sg.Text("Rising:", background_color="#424042"), + sg.Slider( + range=(0, 1), + resolution=0.01, + default_value=self.config.gui_EyebrowThresholdRising, + orientation="h", + key=self.gui_EyebrowThresholdRising, + background_color="#424042", + tooltip="Controls the point at which the emulation should start, reacts to openness", + ), + sg.Text("Lowering:", background_color="#424042"), + sg.Slider( + range=(0, 2), + resolution=0.01, + default_value=self.config.gui_EyebrowThresholdLowering, + orientation="h", + key=self.gui_EyebrowThresholdLowering, + background_color="#424042", + tooltip="Controls the maximum range of eyebrows emulation", + ), + ], + ] diff --git a/EyeTrackApp/utils/eye_falloff.py b/EyeTrackApp/utils/eye_falloff.py index 0496aa9..041864e 100644 --- a/EyeTrackApp/utils/eye_falloff.py +++ b/EyeTrackApp/utils/eye_falloff.py @@ -1,12 +1,6 @@ import numpy as np -from enum import IntEnum - -class EyeId(IntEnum): - RIGHT = 0 - LEFT = 1 - BOTH = 2 - SETTINGS = 3 +from eye import EyeId def velocity_falloff(self, var, out_x, out_y): diff --git a/EyeTrackApp/utils/misc_utils.py b/EyeTrackApp/utils/misc_utils.py index 960c45a..670030e 100644 --- a/EyeTrackApp/utils/misc_utils.py +++ b/EyeTrackApp/utils/misc_utils.py @@ -7,15 +7,21 @@ from typing import Union is_nt = True if os.name == "nt" else False -def PlaySound(*args, **kwargs): pass + +def PlaySound(*args, **kwargs): + pass + + SND_FILENAME = SND_ASYNC = 1 if is_nt: import winsound + PlaySound = winsound.PlaySound SND_FILENAME = winsound.SND_FILENAME SND_ASYNC = winsound.SND_ASYNC + def clamp(x, low, high): return max(low, min(x, high)) @@ -42,7 +48,7 @@ class FastMedian: self.more, self.__median = None, None if inits is not None: [self + x for x in inits] - + # When full, push the median of current values to next list, then reset. def __add__(self, x): self.__median = None @@ -52,11 +58,11 @@ class FastMedian: self.more + self.__medianPrim(self.all) # It's going to be slower because of the re-allocation. self.all = [] # reset - + # If there is a next list, ask its median. Else, work it out locally. def median(self): return self.more.median() if self.more else self.__medianPrim(self.all) - + # Only recompute median if we do not know it already. def __medianPrim(self, all): if self.__median is None: diff --git a/conftest.py b/conftest.py index 2b78419..4a10c85 100644 --- a/conftest.py +++ b/conftest.py @@ -1,5 +1,5 @@ import pytest -from EyeTrackApp.config import ( +from config import ( EyeTrackConfig, EyeTrackCameraConfig, EyeTrackSettingsConfig, diff --git a/pyproject.toml b/pyproject.toml index 013958e..6c563b9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -42,7 +42,7 @@ requires = ["poetry-core>=1.0.0"] build-backend = "poetry.core.masonry.api" [tool.pytest.ini_options] -addopts = "-ra -q --cov=. --cov-report html --cov-report term" +addopts = "-ra -q" pythonpath = "." python_files = [ "test_*.py" diff --git a/tests/__init__.py b/tests/__init__.py index e69de29..37b0d36 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -0,0 +1,20 @@ +import dataclasses + + +@dataclasses.dataclass +class EyeInfoMock: + x: int + y: int + blink: float + pupil_dilation: float + avg_velocity: float + + +class SimpleUDPClientMock: + def __init__(self, osc_address, port): + self.osc_address = osc_address + self.port = port + self.messages = [] + + def send_message(self, address, value): + self.messages.append((address, value)) diff --git a/tests/test_osc.py b/tests/test_osc.py deleted file mode 100644 index dc72d25..0000000 --- a/tests/test_osc.py +++ /dev/null @@ -1,314 +0,0 @@ -import dataclasses -import threading -from queue import Queue -from time import sleep -from unittest import mock - -import pytest - -from EyeTrackApp.osc import VRChatOSC - - -@dataclasses.dataclass -class EyeInfoMock: - x: int - y: int - blink: float - pupil_dilation: float - avg_velocity: float - - -class SimpleUDPClientMock: - def __init__(self, osc_address, port): - self.osc_address = osc_address - self.port = port - self.messages = [] - - def send_message(self, address, value): - self.messages.append((address, value)) - - -@pytest.mark.parametrize( - "eye_id,messages,expected_outcome", - [ - ( - 0, - [ - EyeInfoMock( - x=0, - y=0, - blink=1, - pupil_dilation=0, - avg_velocity=0, - ), - ], - [ - ("/avatar/parameters/v2/EyeX", 0), - ("/avatar/parameters/v2/EyeY", 0), - ("/avatar/parameters/v2/EyeLid", 1.0), - ], - ), - ( - 1, - [ - EyeInfoMock( - x=0, - y=0, - blink=1, - pupil_dilation=0, - avg_velocity=0, - ), - ], - [ - ("/avatar/parameters/v2/EyeX", 0), - ("/avatar/parameters/v2/EyeY", 0), - ("/avatar/parameters/v2/EyeLid", 1.0), - ], - ), - ], -) -def test_send_command_v2_params_single_eye(main_config_v2_params, eye_id, messages, expected_outcome): - with mock.patch("EyeTrackApp.osc.udp_client.SimpleUDPClient", SimpleUDPClientMock): - cancellation_event = threading.Event() - msg_queue = Queue() - client = VRChatOSC( - main_config=main_config_v2_params, - msg_queue=msg_queue, - cancellation_event=cancellation_event, - ) - - osc_thread = threading.Thread(target=client.run) - osc_thread.start() - - for message in messages: - sleep(0.01) - msg_queue.put((eye_id, message)) - - cancellation_event.set() - osc_thread.join() - - assert msg_queue.empty() - assert client.client.messages == expected_outcome - - -@pytest.mark.parametrize( - "eye_data,expected_outcome", - [ - ( - [ - ( - 0, - EyeInfoMock( - x=0, - y=0, - blink=1, - pupil_dilation=1, - avg_velocity=0, - ), - ), - ( - 1, - EyeInfoMock( - x=10, - y=5, - blink=0.5, - pupil_dilation=1, - avg_velocity=0, - ), - ), - ], - [ - ("/avatar/parameters/v2/EyeRightX", 0), - ("/avatar/parameters/v2/EyeRightY", 0), - ("/avatar/parameters/v2/EyeLidRight", 1.0), - ("/avatar/parameters/v2/EyeLeftX", 10), - ("/avatar/parameters/v2/EyeLeftY", 5), - ("/avatar/parameters/v2/EyeLidLeft", 0.5), - ], - ), - # binary blink - ( - [ - ( - 0, - EyeInfoMock( - x=0, - y=0, - blink=0, - pupil_dilation=1, - avg_velocity=0, - ), - ), - ( - 1, - EyeInfoMock( - x=10, - y=5, - blink=0, - pupil_dilation=1, - avg_velocity=0, - ), - ), - ], - [ - ("/avatar/parameters/v2/EyeLidRight", 0.0), - ("/avatar/parameters/v2/EyeLidRight", 0.0), - ("/avatar/parameters/v2/EyeLidRight", 0.0), - ("/avatar/parameters/v2/EyeLidRight", 0.0), - ("/avatar/parameters/v2/EyeRightX", 0), - ("/avatar/parameters/v2/EyeRightY", 0), - ("/avatar/parameters/v2/EyeLidRight", 0.0), - ("/avatar/parameters/v2/EyeLidLeft", 0.0), - ("/avatar/parameters/v2/EyeLidLeft", 0.0), - ("/avatar/parameters/v2/EyeLidLeft", 0.0), - ("/avatar/parameters/v2/EyeLidLeft", 0.0), - ("/avatar/parameters/v2/EyeLeftX", 0), - ("/avatar/parameters/v2/EyeLeftY", 5), - ("/avatar/parameters/v2/EyeLidLeft", 0.0), - ], - ), - ], -) -def test_send_command_v2_params_dual_eye(main_config_v2_params, eye_data, expected_outcome): - main_config_v2_params.eye_display_id = 2 - - with mock.patch("EyeTrackApp.osc.udp_client.SimpleUDPClient", SimpleUDPClientMock): - cancellation_event = threading.Event() - msg_queue = Queue() - client = VRChatOSC( - main_config=main_config_v2_params, - msg_queue=msg_queue, - cancellation_event=cancellation_event, - ) - - osc_thread = threading.Thread(target=client.run) - osc_thread.start() - - for eye_id, message in eye_data: - sleep(0.01) - msg_queue.put((eye_id, message)) - - cancellation_event.set() - osc_thread.join() - - assert msg_queue.empty() - assert client.client.messages == expected_outcome - - -@pytest.mark.parametrize( - "eye_data,expected_outcome", - [ - ( - [ - ( - 0, - EyeInfoMock( - x=0, - y=0, - blink=0, - pupil_dilation=1, - avg_velocity=0, - ), - ), - ( - 1, - EyeInfoMock( - x=10, - y=5, - blink=0, - pupil_dilation=1, - avg_velocity=0, - ), - ), - ], - [ - ("/avatar/parameters/v2/EyeLidRight", 0.0), - ("/avatar/parameters/v2/EyeLidRight", 0.0), - ("/avatar/parameters/v2/EyeLidRight", 0.0), - ("/avatar/parameters/v2/EyeLidRight", 0.0), - ("/avatar/parameters/v2/EyeRightX", 0), - ("/avatar/parameters/v2/EyeRightY", 0), - ("/avatar/parameters/v2/EyeLidRight", 0.0), - ("/avatar/parameters/v2/EyeLidLeft", 0.0), - ("/avatar/parameters/v2/EyeLidLeft", 0.0), - ("/avatar/parameters/v2/EyeLidLeft", 0.0), - ("/avatar/parameters/v2/EyeLidLeft", 0.0), - ("/avatar/parameters/v2/EyeLidLeft", 0.0), - ("/avatar/parameters/v2/EyeLidRight", 0.0), - ("/avatar/parameters/v2/EyeLeftX", 0), - ("/avatar/parameters/v2/EyeLeftY", 5), - ("/avatar/parameters/v2/EyeLidLeft", 0.0), - ], - ), - ( - [ - ( - 0, - EyeInfoMock( - x=0, - y=0, - blink=0, - pupil_dilation=1, - avg_velocity=0, - ), - ), - ( - 1, - EyeInfoMock( - x=10, - y=5, - blink=0, - pupil_dilation=1, - avg_velocity=0, - ), - ), - ], - [ - # binary blink - ("/avatar/parameters/v2/EyeLidRight", 0.0), - ("/avatar/parameters/v2/EyeLidRight", 0.0), - ("/avatar/parameters/v2/EyeLidRight", 0.0), - ("/avatar/parameters/v2/EyeLidRight", 0.0), - ("/avatar/parameters/v2/EyeRightX", 0), - ("/avatar/parameters/v2/EyeRightY", 0), - # side falloff - ("/avatar/parameters/v2/EyeLidRight", 0.0), - ("/avatar/parameters/v2/EyeLidLeft", 0.0), - # binary blink again - ("/avatar/parameters/v2/EyeLidLeft", 0.0), - ("/avatar/parameters/v2/EyeLidLeft", 0.0), - ("/avatar/parameters/v2/EyeLidLeft", 0.0), - ("/avatar/parameters/v2/EyeLidLeft", 0.0), - ("/avatar/parameters/v2/EyeLidRight", 0.0), - ("/avatar/parameters/v2/EyeLeftX", 0), - ("/avatar/parameters/v2/EyeLeftY", 5), - ("/avatar/parameters/v2/EyeLidLeft", 0.0), - ], - ), - ], -) -def test_send_command_v2_params_eye_outer_side_falloff(main_config_v2_params, eye_data, expected_outcome): - main_config_v2_params.eye_display_id = 2 - main_config_v2_params.settings.gui_outer_side_falloff = True - - with mock.patch("EyeTrackApp.osc.udp_client.SimpleUDPClient", SimpleUDPClientMock): - cancellation_event = threading.Event() - msg_queue = Queue() - client = VRChatOSC( - main_config=main_config_v2_params, - msg_queue=msg_queue, - cancellation_event=cancellation_event, - ) - - osc_thread = threading.Thread(target=client.run) - osc_thread.start() - - for eye_id, message in eye_data: - sleep(0.01) - msg_queue.put((eye_id, message)) - - cancellation_event.set() - osc_thread.join() - - assert msg_queue.empty() - assert client.client.messages == expected_outcome diff --git a/tests/test_osc_native_params.py b/tests/test_osc_native_params.py new file mode 100644 index 0000000..a3fd26b --- /dev/null +++ b/tests/test_osc_native_params.py @@ -0,0 +1,269 @@ +from queue import Queue +from time import sleep +from unittest import mock + +import pytest + +from osc.osc import OSCManager, OSCMessage +from osc.OSCMessage import OSCMessageType +from tests import EyeInfoMock, SimpleUDPClientMock + + +@pytest.mark.parametrize( + "messages,expected_outcome", + [ + ( + [ + OSCMessage( + type=OSCMessageType.EYE_INFO, + data=( + 0, + EyeInfoMock( + x=0, + y=0, + blink=1, + pupil_dilation=0, + avg_velocity=0, + ), + ), + ), + ], + [ + ("/tracking/eye/EyesClosedAmount", 0.0), + ("/tracking/eye/LeftRightVec", [0.0, 0.0, 1.0, 0.0, 0.0, 1.0]), + ], + ), + ( + [ + OSCMessage( + type=OSCMessageType.EYE_INFO, + data=( + 1, + EyeInfoMock( + x=0, + y=0, + blink=1, + pupil_dilation=0, + avg_velocity=0, + ), + ), + ), + ], + [ + ("/tracking/eye/EyesClosedAmount", 0.0), + ("/tracking/eye/LeftRightVec", [0.0, 0.0, 1.0, 0.0, 0.0, 1.0]), + ], + ), + ], +) +def test_send_command_native_params_single_eye(main_config_native_params, messages, expected_outcome): + with mock.patch("EyeTrackApp.osc.osc.udp_client.SimpleUDPClient", SimpleUDPClientMock): + msg_queue = Queue() + client = OSCManager( + config=main_config_native_params, + osc_message_in_queue=msg_queue, + ) + + client.start() + + for message in messages: + sleep(0.01) + msg_queue.put(message) + client.shutdown() + + assert msg_queue.empty() + assert client.osc_sender.client.messages == expected_outcome + + +@pytest.mark.parametrize( + "eye_data,expected_outcome", + [ + ( + [ + OSCMessage( + type=OSCMessageType.EYE_INFO, + data=( + 0, + EyeInfoMock( + x=0, + y=0, + blink=1, + pupil_dilation=1, + avg_velocity=0, + ), + ), + ), + OSCMessage( + type=OSCMessageType.EYE_INFO, + data=( + 1, + EyeInfoMock( + x=10, + y=5, + blink=0.5, + pupil_dilation=1, + avg_velocity=0, + ), + ), + ), + ], + [ + ("/tracking/eye/EyesClosedAmount", 0.0), + ("/tracking/eye/EyesClosedAmount", 0.0), + # we're expecting 621 as left_y here because that's the default value + # before the first state update with real data, but that's ok + # we're gonna be like 10 messages deep before anyone starts playing + # and if they already are, they won't be able to notice + ("/tracking/eye/LeftRightVec", [0.0, 621.0, 1.0, 0.0, 0.0, 1.0]), + ("/tracking/eye/EyesClosedAmount", 0.5), + ("/tracking/eye/EyesClosedAmount", 0.5), + ("/tracking/eye/LeftRightVec", [0.0, 5.0, 1.0, 0.0, 0.0, 1.0]), + ], + ), + # binary blink + ( + [ + OSCMessage( + type=OSCMessageType.EYE_INFO, + data=( + 0, + EyeInfoMock( + x=0, + y=0, + blink=0, + pupil_dilation=1, + avg_velocity=0, + ), + ), + ), + OSCMessage( + type=OSCMessageType.EYE_INFO, + data=( + 1, + EyeInfoMock( + x=10, + y=5, + blink=0, + pupil_dilation=1, + avg_velocity=0, + ), + ), + ), + ], + [ + ("/tracking/eye/EyesClosedAmount", 1.0), + ("/tracking/eye/EyesClosedAmount", 1.0), + ("/tracking/eye/EyesClosedAmount", 1.0), + ("/tracking/eye/EyesClosedAmount", 1.0), + ("/tracking/eye/EyesClosedAmount", 1.0), + ("/tracking/eye/EyesClosedAmount", 1.0), + ("/tracking/eye/LeftRightVec", [0.0, 621.0, 1.0, 0.0, 0.0, 1.0]), + ("/tracking/eye/EyesClosedAmount", 1.0), + ("/tracking/eye/EyesClosedAmount", 1.0), + ("/tracking/eye/EyesClosedAmount", 1.0), + ("/tracking/eye/EyesClosedAmount", 1.0), + ("/tracking/eye/EyesClosedAmount", 1.0), + ("/tracking/eye/EyesClosedAmount", 1.0), + ("/tracking/eye/LeftRightVec", [0.0, 5.0, 1.0, 0.0, 0.0, 1.0]), + ], + ), + ], +) +def test_send_command_native_params_dual_eye(main_config_native_params, eye_data, expected_outcome): + main_config_native_params.eye_display_id = 2 + + with mock.patch("EyeTrackApp.osc.osc.udp_client.SimpleUDPClient", SimpleUDPClientMock): + msg_queue = Queue() + client = OSCManager( + config=main_config_native_params, + osc_message_in_queue=msg_queue, + ) + + client.start() + + for message in eye_data: + sleep(0.01) + msg_queue.put(message) + client.shutdown() + + assert msg_queue.empty() + assert client.osc_sender.client.messages == expected_outcome + + +@pytest.mark.parametrize( + "eye_data,expected_outcome", + [ + ( + [ + OSCMessage( + type=OSCMessageType.EYE_INFO, + data=( + 0, + EyeInfoMock( + x=0, + y=0, + blink=0, + pupil_dilation=1, + avg_velocity=0, + ), + ), + ), + OSCMessage( + type=OSCMessageType.EYE_INFO, + data=( + 1, + EyeInfoMock( + x=10, + y=5, + blink=0, + pupil_dilation=1, + avg_velocity=0, + ), + ), + ), + ], + [ + ("/tracking/eye/EyesClosedAmount", 1.0), + ("/tracking/eye/EyesClosedAmount", 1.0), + ("/tracking/eye/EyesClosedAmount", 1.0), + ("/tracking/eye/EyesClosedAmount", 1.0), + ("/tracking/eye/EyesClosedAmount", 1.0), + ("/tracking/eye/EyesClosedAmount", 1.0), + ("/tracking/eye/LeftRightVec", [0.0, 621.0, 1.0, 0.0, 0.0, 1.0]), + ("/tracking/eye/EyesClosedAmount", 1.0), + ("/tracking/eye/EyesClosedAmount", 1.0), + ("/tracking/eye/EyesClosedAmount", 1.0), + ("/tracking/eye/EyesClosedAmount", 1.0), + ("/tracking/eye/EyesClosedAmount", 1.0), + ("/tracking/eye/EyesClosedAmount", 1.0), + ("/tracking/eye/EyesClosedAmount", 1.0), + ("/tracking/eye/EyesClosedAmount", 1.0), + ("/tracking/eye/EyesClosedAmount", 1.0), + ("/tracking/eye/EyesClosedAmount", 1.0), + ("/tracking/eye/EyesClosedAmount", 1.0), + ("/tracking/eye/EyesClosedAmount", 1.0), + ("/tracking/eye/LeftRightVec", [0.0, 5.0, 1.0, 0.0, 0.0, 1.0]), + ], + ), + ], +) +def test_send_command_native_params_eye_outer_side_falloff(main_config_native_params, eye_data, expected_outcome): + main_config_native_params.eye_display_id = 2 + main_config_native_params.settings.gui_outer_side_falloff = True + + with mock.patch("EyeTrackApp.osc.osc.udp_client.SimpleUDPClient", SimpleUDPClientMock): + msg_queue = Queue() + client = OSCManager( + config=main_config_native_params, + osc_message_in_queue=msg_queue, + ) + + client.start() + + for message in eye_data: + msg_queue.put(message) + sleep(1) + client.shutdown() + + assert msg_queue.empty() + assert client.osc_sender.client.messages == expected_outcome diff --git a/tests/test_osc_v1_params.py b/tests/test_osc_v1_params.py new file mode 100644 index 0000000..592d09c --- /dev/null +++ b/tests/test_osc_v1_params.py @@ -0,0 +1,274 @@ +from queue import Queue +from time import sleep +from unittest import mock + +import pytest + +from osc.osc import OSCManager, OSCMessage +from osc.OSCMessage import OSCMessageType +from tests import EyeInfoMock, SimpleUDPClientMock + + +@pytest.mark.parametrize( + "messages,expected_outcome", + [ + ( + [ + OSCMessage( + type=OSCMessageType.EYE_INFO, + data=( + 0, + EyeInfoMock( + x=0, + y=0, + blink=1, + pupil_dilation=0, + avg_velocity=0, + ), + ), + ), + ], + [ + ("/avatar/parameters/LeftEyeX", 0), + ("/avatar/parameters/RightEyeX", 0), + ("/avatar/parameters/EyesY", 0), + ("/avatar/parameters/LeftEyeLidExpandedSqueeze", 1.0), + ("/avatar/parameters/RightEyeLidExpandedSqueeze", 1.0), + ], + ), + ( + [ + OSCMessage( + type=OSCMessageType.EYE_INFO, + data=( + 1, + EyeInfoMock( + x=0, + y=0, + blink=1, + pupil_dilation=0, + avg_velocity=0, + ), + ), + ), + ], + [ + ("/avatar/parameters/LeftEyeX", 0), + ("/avatar/parameters/RightEyeX", 0), + ("/avatar/parameters/EyesY", 0), + ("/avatar/parameters/LeftEyeLidExpandedSqueeze", 1.0), + ("/avatar/parameters/RightEyeLidExpandedSqueeze", 1.0), + ], + ), + ], +) +def test_send_command_v1_params_single_eye(main_config_v1_params, messages, expected_outcome): + with mock.patch("EyeTrackApp.osc.osc.udp_client.SimpleUDPClient", SimpleUDPClientMock): + msg_queue = Queue() + client = OSCManager( + config=main_config_v1_params, + osc_message_in_queue=msg_queue, + ) + + client.start() + + for message in messages: + sleep(0.01) + msg_queue.put(message) + client.shutdown() + + assert msg_queue.empty() + assert client.osc_sender.client.messages == expected_outcome + + +@pytest.mark.parametrize( + "eye_data,expected_outcome", + [ + ( + [ + OSCMessage( + type=OSCMessageType.EYE_INFO, + data=( + 0, + EyeInfoMock( + x=0, + y=0, + blink=1, + pupil_dilation=1, + avg_velocity=0, + ), + ), + ), + OSCMessage( + type=OSCMessageType.EYE_INFO, + data=( + 1, + EyeInfoMock( + x=10, + y=5, + blink=0.5, + pupil_dilation=1, + avg_velocity=0, + ), + ), + ), + ], + [ + ("/avatar/parameters/RightEyeLidExpandedSqueeze", 1.0), + ("/avatar/parameters/RightEyeX", 0), + ("/avatar/parameters/RightEyeLidExpandedSqueeze", 1.0), + ("/avatar/parameters/LeftEyeLidExpandedSqueeze", 0.5), + ("/avatar/parameters/LeftEyeX", 0), + ("/avatar/parameters/LeftEyeLidExpandedSqueeze", 0.5), + ("/avatar/parameters/EyesY", 2.5), + ], + ), + # binary blink + ( + [ + OSCMessage( + type=OSCMessageType.EYE_INFO, + data=( + 0, + EyeInfoMock( + x=0, + y=0, + blink=0, + pupil_dilation=1, + avg_velocity=0, + ), + ), + ), + OSCMessage( + type=OSCMessageType.EYE_INFO, + data=( + 1, + EyeInfoMock( + x=10, + y=5, + blink=0, + pupil_dilation=1, + avg_velocity=0, + ), + ), + ), + ], + [ + ("/avatar/parameters/RightEyeLidExpandedSqueeze", 0.0), + ("/avatar/parameters/RightEyeLidExpandedSqueeze", 0.0), + ("/avatar/parameters/RightEyeLidExpandedSqueeze", 0.0), + ("/avatar/parameters/RightEyeLidExpandedSqueeze", 0.0), + ("/avatar/parameters/RightEyeLidExpandedSqueeze", 0.0), + ("/avatar/parameters/RightEyeLidExpandedSqueeze", 0.0), + ("/avatar/parameters/RightEyeX", 0), + ("/avatar/parameters/RightEyeLidExpandedSqueeze", 0.0), + ("/avatar/parameters/LeftEyeLidExpandedSqueeze", 0.0), + ("/avatar/parameters/LeftEyeLidExpandedSqueeze", 0.0), + ("/avatar/parameters/LeftEyeLidExpandedSqueeze", 0.0), + ("/avatar/parameters/LeftEyeLidExpandedSqueeze", 0.0), + ("/avatar/parameters/LeftEyeLidExpandedSqueeze", 0.0), + ("/avatar/parameters/LeftEyeLidExpandedSqueeze", 0.0), + ("/avatar/parameters/LeftEyeX", 0), + ("/avatar/parameters/LeftEyeLidExpandedSqueeze", 0.0), + ("/avatar/parameters/EyesY", 2.5), + ], + ), + ], +) +def test_send_command_v1_params_dual_eye(main_config_v1_params, eye_data, expected_outcome): + main_config_v1_params.eye_display_id = 2 + + with mock.patch("EyeTrackApp.osc.osc.udp_client.SimpleUDPClient", SimpleUDPClientMock): + msg_queue = Queue() + client = OSCManager( + config=main_config_v1_params, + osc_message_in_queue=msg_queue, + ) + + client.start() + + for message in eye_data: + sleep(0.01) + msg_queue.put(message) + client.shutdown() + + assert msg_queue.empty() + assert client.osc_sender.client.messages == expected_outcome + + +@pytest.mark.parametrize( + "eye_data,expected_outcome", + [ + ( + [ + OSCMessage( + type=OSCMessageType.EYE_INFO, + data=( + 0, + EyeInfoMock( + x=0, + y=0, + blink=0, + pupil_dilation=1, + avg_velocity=0, + ), + ), + ), + OSCMessage( + type=OSCMessageType.EYE_INFO, + data=( + 1, + EyeInfoMock( + x=10, + y=5, + blink=0, + pupil_dilation=1, + avg_velocity=0, + ), + ), + ), + ], + [ + ("/avatar/parameters/RightEyeLidExpandedSqueeze", 0.0), + ("/avatar/parameters/RightEyeLidExpandedSqueeze", 0.0), + ("/avatar/parameters/RightEyeLidExpandedSqueeze", 0.0), + ("/avatar/parameters/RightEyeLidExpandedSqueeze", 0.0), + ("/avatar/parameters/RightEyeLidExpandedSqueeze", 0.0), + ("/avatar/parameters/RightEyeLidExpandedSqueeze", 0.0), + ("/avatar/parameters/RightEyeX", 0), + ("/avatar/parameters/RightEyeLidExpandedSqueeze", 0.0), + ("/avatar/parameters/LeftEyeLidExpandedSqueeze", 0.0), + ("/avatar/parameters/LeftEyeLidExpandedSqueeze", 0.0), + ("/avatar/parameters/LeftEyeLidExpandedSqueeze", 0.0), + ("/avatar/parameters/LeftEyeLidExpandedSqueeze", 0.0), + ("/avatar/parameters/LeftEyeLidExpandedSqueeze", 0.0), + ("/avatar/parameters/LeftEyeLidExpandedSqueeze", 0.0), + ("/avatar/parameters/RightEyeLidExpandedSqueeze", 0.0), + ("/avatar/parameters/LeftEyeLidExpandedSqueeze", 0.0), + ("/avatar/parameters/LeftEyeX", 0), + ("/avatar/parameters/LeftEyeLidExpandedSqueeze", 0.0), + ("/avatar/parameters/EyesY", 2.5), + ], + ), + ], +) +def test_send_command_v1_params_eye_outer_side_falloff(main_config_v1_params, eye_data, expected_outcome): + main_config_v1_params.eye_display_id = 2 + main_config_v1_params.settings.gui_outer_side_falloff = True + + with mock.patch("EyeTrackApp.osc.osc.udp_client.SimpleUDPClient", SimpleUDPClientMock): + msg_queue = Queue() + client = OSCManager( + config=main_config_v1_params, + osc_message_in_queue=msg_queue, + ) + + client.start() + + for message in eye_data: + msg_queue.put(message) + sleep(1) + client.shutdown() + + assert msg_queue.empty() + assert client.osc_sender.client.messages == expected_outcome diff --git a/tests/test_osc_v2_params.py b/tests/test_osc_v2_params.py new file mode 100644 index 0000000..a3e3dfe --- /dev/null +++ b/tests/test_osc_v2_params.py @@ -0,0 +1,275 @@ +from queue import Queue +from time import sleep +from unittest import mock + +import pytest + +from osc.osc import OSCManager, OSCMessage +from osc.OSCMessage import OSCMessageType +from tests import EyeInfoMock, SimpleUDPClientMock + + +@pytest.mark.parametrize( + "messages,expected_outcome", + [ + ( + [ + OSCMessage( + type=OSCMessageType.EYE_INFO, + data=( + 0, + EyeInfoMock( + x=0, + y=0, + blink=1, + pupil_dilation=0, + avg_velocity=0, + ), + ), + ), + ], + [ + ("/avatar/parameters/v2/EyeX", 0), + ("/avatar/parameters/v2/EyeY", 0), + ("/avatar/parameters/v2/EyeLid", 1.0), + ("/avatar/parameters/v2/EyeLid", 1.0), + ], + ), + ( + [ + OSCMessage( + type=OSCMessageType.EYE_INFO, + data=( + 1, + EyeInfoMock( + x=0, + y=0, + blink=1, + pupil_dilation=0, + avg_velocity=0, + ), + ), + ), + ], + [ + ("/avatar/parameters/v2/EyeX", 0), + ("/avatar/parameters/v2/EyeY", 0), + ("/avatar/parameters/v2/EyeLid", 1.0), + ("/avatar/parameters/v2/EyeLid", 1.0), + ], + ), + ], +) +def test_send_command_v2_params_single_eye(main_config_v2_params, messages, expected_outcome): + with mock.patch("EyeTrackApp.osc.osc.udp_client.SimpleUDPClient", SimpleUDPClientMock): + msg_queue = Queue() + client = OSCManager( + config=main_config_v2_params, + osc_message_in_queue=msg_queue, + ) + + client.start() + + for message in messages: + sleep(0.01) + msg_queue.put(message) + client.shutdown() + + assert msg_queue.empty() + assert client.osc_sender.client.messages == expected_outcome + + +@pytest.mark.parametrize( + "eye_data,expected_outcome", + [ + ( + [ + OSCMessage( + type=OSCMessageType.EYE_INFO, + data=( + 0, + EyeInfoMock( + x=0, + y=0, + blink=1, + pupil_dilation=1, + avg_velocity=0, + ), + ), + ), + OSCMessage( + type=OSCMessageType.EYE_INFO, + data=( + 1, + EyeInfoMock( + x=10, + y=5, + blink=0.5, + pupil_dilation=1, + avg_velocity=0, + ), + ), + ), + ], + [ + ("/avatar/parameters/v2/EyeLidRight", 1.0), + ("/avatar/parameters/v2/EyeRightX", 0), + ("/avatar/parameters/v2/EyeRightY", 0), + ("/avatar/parameters/v2/EyeLidRight", 1.0), + ("/avatar/parameters/v2/EyeLidLeft", 0.5), + ("/avatar/parameters/v2/EyeLeftX", 10), + ("/avatar/parameters/v2/EyeLeftY", 5), + ("/avatar/parameters/v2/EyeLidLeft", 0.5), + ], + ), + # binary blink + ( + [ + OSCMessage( + type=OSCMessageType.EYE_INFO, + data=( + 0, + EyeInfoMock( + x=0, + y=0, + blink=0, + pupil_dilation=1, + avg_velocity=0, + ), + ), + ), + OSCMessage( + type=OSCMessageType.EYE_INFO, + data=( + 1, + EyeInfoMock( + x=10, + y=5, + blink=0, + pupil_dilation=1, + avg_velocity=0, + ), + ), + ), + ], + [ + ("/avatar/parameters/v2/EyeLidRight", 0.0), + ("/avatar/parameters/v2/EyeLidRight", 0.0), + ("/avatar/parameters/v2/EyeLidRight", 0.0), + ("/avatar/parameters/v2/EyeLidRight", 0.0), + ("/avatar/parameters/v2/EyeLidRight", 0.0), + ("/avatar/parameters/v2/EyeLidRight", 0.0), + ("/avatar/parameters/v2/EyeRightX", 0), + ("/avatar/parameters/v2/EyeRightY", 0), + ("/avatar/parameters/v2/EyeLidRight", 0.0), + ("/avatar/parameters/v2/EyeLidLeft", 0.0), + ("/avatar/parameters/v2/EyeLidLeft", 0.0), + ("/avatar/parameters/v2/EyeLidLeft", 0.0), + ("/avatar/parameters/v2/EyeLidLeft", 0.0), + ("/avatar/parameters/v2/EyeLidLeft", 0.0), + ("/avatar/parameters/v2/EyeLidLeft", 0.0), + ("/avatar/parameters/v2/EyeLeftX", 10), + ("/avatar/parameters/v2/EyeLeftY", 5), + ("/avatar/parameters/v2/EyeLidLeft", 0.0), + ], + ), + ], +) +def test_send_command_v2_params_dual_eye(main_config_v2_params, eye_data, expected_outcome): + main_config_v2_params.eye_display_id = 2 + + with mock.patch("EyeTrackApp.osc.osc.udp_client.SimpleUDPClient", SimpleUDPClientMock): + msg_queue = Queue() + client = OSCManager( + config=main_config_v2_params, + osc_message_in_queue=msg_queue, + ) + + client.start() + + for message in eye_data: + sleep(0.01) + msg_queue.put(message) + client.shutdown() + + assert msg_queue.empty() + assert client.osc_sender.client.messages == expected_outcome + + +@pytest.mark.parametrize( + "eye_data,expected_outcome", + [ + ( + [ + OSCMessage( + type=OSCMessageType.EYE_INFO, + data=( + 0, + EyeInfoMock( + x=0, + y=0, + blink=0, + pupil_dilation=1, + avg_velocity=0, + ), + ), + ), + OSCMessage( + type=OSCMessageType.EYE_INFO, + data=( + 1, + EyeInfoMock( + x=10, + y=5, + blink=0, + pupil_dilation=1, + avg_velocity=0, + ), + ), + ), + ], + [ + ("/avatar/parameters/v2/EyeLidRight", 0.0), + ("/avatar/parameters/v2/EyeLidRight", 0.0), + ("/avatar/parameters/v2/EyeLidRight", 0.0), + ("/avatar/parameters/v2/EyeLidRight", 0.0), + ("/avatar/parameters/v2/EyeLidRight", 0.0), + ("/avatar/parameters/v2/EyeLidRight", 0.0), + ("/avatar/parameters/v2/EyeRightX", 0), + ("/avatar/parameters/v2/EyeRightY", 0), + ("/avatar/parameters/v2/EyeLidRight", 0.0), + ("/avatar/parameters/v2/EyeLidLeft", 0.0), + ("/avatar/parameters/v2/EyeLidLeft", 0.0), + ("/avatar/parameters/v2/EyeLidLeft", 0.0), + ("/avatar/parameters/v2/EyeLidLeft", 0.0), + ("/avatar/parameters/v2/EyeLidLeft", 0.0), + ("/avatar/parameters/v2/EyeLidLeft", 0.0), + ("/avatar/parameters/v2/EyeLidRight", 0.0), + ("/avatar/parameters/v2/EyeLidLeft", 0.0), + ("/avatar/parameters/v2/EyeLeftX", 10), + ("/avatar/parameters/v2/EyeLeftY", 5), + ("/avatar/parameters/v2/EyeLidLeft", 0.0), + ], + ), + ], +) +def test_send_command_v2_params_eye_outer_side_falloff(main_config_v2_params, eye_data, expected_outcome): + main_config_v2_params.eye_display_id = 2 + main_config_v2_params.settings.gui_outer_side_falloff = True + + with mock.patch("EyeTrackApp.osc.osc.udp_client.SimpleUDPClient", SimpleUDPClientMock): + msg_queue = Queue() + client = OSCManager( + config=main_config_v2_params, + osc_message_in_queue=msg_queue, + ) + + client.start() + + for message in eye_data: + msg_queue.put(message) + sleep(1) + client.shutdown() + + assert msg_queue.empty() + assert client.osc_sender.client.messages == expected_outcome