feat: more work on AHSF and LEAP revert

This commit is contained in:
Prohurtz 2024-07-10 16:27:58 -05:00
parent 1006d8644b
commit be7e04535d
2 changed files with 23 additions and 75 deletions

View File

@ -911,13 +911,14 @@ def External_Run_AHSF(frame_gray):
frame_clear_resize = frame_gray.copy()
org_frame_gray = frame_gray.copy()
frame_gray = cv2.resize(frame_gray, (130, 130)) # TODO TEST FIXED RESIZE
# frame_gray = cv2.resize(frame_gray, (130, 130)) # TODO TEST FIXED RESIZE
# Get the dimensions of the rotated image
height, width = frame_gray.shape
# Determine the size of the square background (choose the larger dimension)
max_dimension = max(height, width)
min_dimension = min(height, width)
# Create a square background with the average color
square_background = np.full((max_dimension, max_dimension), average_color, dtype=np.uint8)
@ -934,15 +935,16 @@ def External_Run_AHSF(frame_gray):
wh_step = max((int(max_dimension / 80)),1) # TODO: FINETUNE VALUES
xy_step = max(int(max_dimension / 24), 1) # TODO: FINETUNE VALUES
# print(xy_step, max_dimension)
wmax = max_dimension * 0.4 # likes to crash, might need more tuning still
wmin = max_dimension * 0.02
wmax = max_dimension * 0.5 # likes to crash, might need more tuning still
wmin = max_dimension * 0.1
params = {
"ratio_downsample": 0.5,
"use_init_rect": False,
"mu_outer": 200, # aprroximatly how much pupil should be in the outer rect
"mu_inner": 70, # aprroximatly how much pupil should be in the inner rect
"mu_outer": 100, # aprroximatly how much pupil should be in the outer rect
"mu_inner": 50, # aprroximatly how much pupil should be in the inner rect
"ratio_outer": 1.0, # rectangular ratio. 1 means square (LIKE REGULAR HSF)
"kf": 1.5, # noise filter. May lose tracking if too high (or even never start)
"width_min": wmin, # Minimum width of the pupil
@ -975,29 +977,28 @@ def External_Run_AHSF(frame_gray):
y_center = outer_rect_coarse[1] + outer_rect_coarse[3] / 2
x, y, width, height = outer_rect_coarse
scale_x = orig_width / 130
scale_y = orig_height / 130
x_center = int(x_center * scale_x)
y_center = int(y_center * scale_y)
x_center = int(x_center)
y_center = int(y_center)
# print(x_center, y_center, scale_x, orig_height, orig_width)
cv2.circle(org_frame_gray, (int(x_center), int(y_center)), 2, (255, 255, 255), -1)
pupil_rect_coarse_0 = int(pupil_rect_coarse[0] * scale_x)
pupil_rect_coarse_2 = int(pupil_rect_coarse[2] * scale_x)
pupil_rect_coarse_0 = int(pupil_rect_coarse[0]) # added for test, now redundant
pupil_rect_coarse_2 = int(pupil_rect_coarse[2])
pupil_rect_coarse_1 = int(pupil_rect_coarse[1] * scale_x)
pupil_rect_coarse_3 = int(pupil_rect_coarse[3] * scale_x)
pupil_rect_coarse_1 = int(pupil_rect_coarse[1])
pupil_rect_coarse_3 = int(pupil_rect_coarse[3])
outer_rect_coarse_0 = int(outer_rect_coarse[0] * scale_x)
outer_rect_coarse_2 = int(outer_rect_coarse[2] * scale_x)
outer_rect_coarse_0 = int(outer_rect_coarse[0])
outer_rect_coarse_2 = int(outer_rect_coarse[2])
outer_rect_coarse_1 = int(outer_rect_coarse[1] * scale_x)
outer_rect_coarse_3 = int(outer_rect_coarse[3] * scale_x)
outer_rect_coarse_1 = int(outer_rect_coarse[1])
outer_rect_coarse_3 = int(outer_rect_coarse[3])
cv2.rectangle(
@ -1027,7 +1028,7 @@ def External_Run_AHSF(frame_gray):
minor_diameter = min(width, height)
average_diameter = (major_diameter + minor_diameter) / 2
# print(orig_width, orig_height)
# print(average_diameter)
return org_frame_gray, frame_clear_resize, x_center, y_center, abs(width - height)
return org_frame_gray, frame_clear_resize, x_center, y_center, int(average_diameter -15)

View File

@ -22,58 +22,6 @@
LEAP by: Prohurtz
Algorithm App Implementation By: Prohurtz
Copyright (c) 2023 EyeTrackVR <3
LICENSE: GNU GPLv3
------------------------------------------------------------------------------------------------------
"""
"""
DATASET CONTRIBUTIONS:
@article{ICML2021DS,
title={TEyeD: Over 20 million real-world eye images with Pupil, Eyelid, and Iris 2D and 3D Segmentations, 2D and 3D Landmarks, 3D Eyeball, Gaze Vector, and Eye Movement Types},
author={Fuhl, Wolfgang and Kasneci, Gjergji and Kasneci, Enkelejda},
journal={arXiv preprint arXiv:2102.02115},
year={2021}
}
@inproceedings{tonsen2016labelled,
title={Labelled pupils in the wild: a dataset for studying pupil detection in unconstrained environments},
author={Tonsen, Marc and Zhang, Xucong and Sugano, Yusuke and Bulling, Andreas},
booktitle={Proceedings of the ninth biennial ACM symposium on eye tracking research \& applications},
pages={139--142},
year={2016}
}
+ Custom user annotated and submitted data.
"""
"""
------------------------------------------------------------------------------------------------------
,@@@@@@
@@@@@@@@@@@ @@@
@@@@@@@@@@@@ @@@@@@@@@@@
@@@@@@@@@@@@@ @@@@@@@@@@@@@@
@@@@@@@/ ,@@@@@@@@@@@@@
/@@@@@@@@@@@@@@@ @@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@
@@@@@@@@ @@@@@
,@@@ @@@@&
@@@@@@. @@@@
@@@ @@@@@@@@@/ @@@@@
,@@@. @@@@@@((@ @@@@(
//@@@ ,, @@@@ @@@@@
@@@( @@@@@@@
@@@ @ @@@@@@@@#
@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@(
LEAP by: Prohurtz
Algorithm App Implementation By: Prohurtz
Copyright (c) 2023 EyeTrackVR <3
LICENSE: GNU GPLv3
------------------------------------------------------------------------------------------------------
@ -128,9 +76,9 @@ class LEAP_C(object):
def __init__(self):
onnxruntime.disable_telemetry_events()
# Config variables
self.num_threads = 4 # Number of python threads to use (using ~1 more than needed to achieve wanted fps yields lower cpu usage)
self.num_threads = 1 # Number of python threads to use (using ~1 more than needed to achieve wanted fps yields lower cpu usage)
self.queue_max_size = 1 # Optimize for best CPU usage, Memory, and Latency. A maxsize is needed to not create a potential memory leak.
self.model_path = resource_path(models / 'LEAP062120246epoch.onnx')
self.model_path = resource_path(models / 'LEAP071024_E16.onnx')
self.low_priority = (
False # set process priority to low (may cause issues when unfocusing? reported by one, not reproducable)
@ -153,7 +101,7 @@ class LEAP_C(object):
opts = onnxruntime.SessionOptions()
opts.inter_op_num_threads = 4
opts.intra_op_num_threads = 1
opts.intra_op_num_threads = 1 # big perf hit
opts.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
opts.optimized_model_filepath = ""
@ -328,7 +276,6 @@ class External_Run_LEAP(object):
self.algo = LEAP_C()
def run(self, current_image_gray, current_image_gray_clean):
self.algo.current_image_gray = current_image_gray
self.algo.current_image_gray_clean = current_image_gray_clean
img, x, y, per = self.algo.leap_run()