From 88b3e1e7835ec75c5f0cbff9158eae835d125e86 Mon Sep 17 00:00:00 2001 From: Prohurtz <48768484+RedHawk989@users.noreply.github.com> Date: Mon, 29 Jul 2024 19:37:10 -0500 Subject: [PATCH] feat: clean up files, remove uneeded code, tune leap filter tune --- EyeTrackApp/AHSF.py | 212 ++++----------------------- EyeTrackApp/camera_widget.py | 20 +-- EyeTrackApp/eye_processor.py | 6 +- EyeTrackApp/eyetrackapp.py | 9 +- EyeTrackApp/haar_surround_feature.py | 11 +- EyeTrackApp/leap.py | 147 +------------------ 6 files changed, 41 insertions(+), 364 deletions(-) diff --git a/EyeTrackApp/AHSF.py b/EyeTrackApp/AHSF.py index 6a2514b..efb2965 100644 --- a/EyeTrackApp/AHSF.py +++ b/EyeTrackApp/AHSF.py @@ -266,8 +266,6 @@ def filter_light(img_gray, img_blur, tau): def pupil_detector_haar(img_gray, params): frame_num = 0 - mu_inner0 = 50 - mu_outer0 = 200 img_down = cv2.resize( img_gray, ( @@ -545,8 +543,6 @@ def coarse_detection(img_gray, params): inner_sum = cv2.add(in_p00, in_p11) cv2.subtract(inner_sum, in_p01, dst=inner_sum) cv2.subtract(inner_sum, in_p10, dst=inner_sum) - # inner_sum=inner_sum.astype(np.float64) - # inner_sum = cv2.transpose(inner_sum) # memo: Multiplication, etc. can be faster by self-assignment, but care must be taken because array initialization is required. # https://stackoverflow.com/questions/71204415/opencv-python-fastest-way-to-multiply-pixel-value @@ -558,26 +554,13 @@ def coarse_detection(img_gray, params): response_value = np.empty(outer_sum.shape, dtype=np.float64) inout_rect_sum = mu_outer_rect2.copy() inout_rect_mul = mu_outer_rect.copy() - # outer_sum_rect = cv2.multiply(outer_sum, mu_outer_rect,None,-1.0) - # inner_sum_rect = cv2.multiply(inner_sum, mu_outer_rect) + cv2.multiply(inner_sum_f, inout_rect_mul, inout_rect_mul) cv2.multiply(outer_sum_f, inout_rect_sum, inout_rect_sum) cv2.add(inout_rect_mul, inout_rect_sum, dst=inout_rect_sum) - # inout_rect_sum = inout_rect_mul[:,:,0]+inout_rect_mul[:,:,1] - # inner_sum_wh = cv2.multiply(inner_sum_f,wh_in_arr,None,kf) + cv2.multiply(inner_sum_f, wh_in_arr, inner_sum_f, kf) - # inout_sum = np.empty((*inner_sum.shape,2),dtype=np.float64) - # inout_sum[:,:,0]=inner_sum - # inout_sum[:,:,1]=outer_sum - # # outer_sum_rect = cv2.multiply(outer_sum, mu_outer_rect,None,-1.0) - # # inner_sum_rect = cv2.multiply(inner_sum, mu_outer_rect) - # inout_rect_mul = cv2.multiply(inout_sum[:,:,0],mu_outer_rect2[:,:,0]) - # inout_rect_sum=cv2.multiply(inout_sum[:,:,1],mu_outer_rect2[:,:,1]) - # inout_rect_sum=cv2.add(inout_rect_mul,inout_rect_sum) - # # inout_rect_sum = inout_rect_mul[:,:,0]+inout_rect_mul[:,:,1] - # inner_sum_wh = cv2.multiply(inout_sum[:,:,0],wh_in_arr,None,kf) - # response_value2= outer_sum_rect+inner_sum_rect+inner_sum_wh - # response_value = inout_rect_sum + inner_sum_wh + cv2.add(inout_rect_sum, inner_sum_f, dst=response_value) # mu_outer_left+(kf*inner_sum*wh_in_arr) @@ -601,14 +584,6 @@ def coarse_detection(img_gray, params): pupil_rect_coarse = rec_in outer_rect_coarse = rec_o - rectlist2 = [] - response2 = [] - - # print() - # print("rectlist: ", rectlist) - # rect_suppression(rectlist, response, rectlist2, response2) - # rect_suppression(rectlist2, response2, rectlist, response) - return pupil_rect_coarse, outer_rect_coarse, max_response_coarse, mu_inner, mu_outer @@ -740,6 +715,7 @@ def draw_coarse(img_bgr, pupil_rect, outer_rect, max_response, color): put_number(img_bgr, max_response, center, color) + def rect_suppression(rectlist, response, rectlist_out, response_out): for i in range(len(rectlist)): flag_intersect = False @@ -771,139 +747,6 @@ def put_number(img_bgr, number, position, color): ) -if __name__ == "__main__": - if not print_enable: - - def print(*args, **kwargs): - pass - - logger.info(this_file_basename) - if save_logfile: - logger.info("log path: {}".format(logfilename)) - logger.info("alg ver: {}".format(alg_ver)) - if benchmark_flag: - logger.info("loops: {}".format(loop_num)) - - if not input_is_webcam: - if not os.path.exists(VideoCapture_SRC) or not os.path.isfile(VideoCapture_SRC): - raise FileNotFoundError(VideoCapture_SRC) - logger.info("input video name: {}".format(os.path.basename(VideoCapture_SRC))) - else: - logger.info("input video: {}".format(VideoCapture_SRC)) - - cap = cv2.VideoCapture(VideoCapture_SRC) - if not cap.isOpened(): - raise IOError("Error opening video stream or file") - if not input_is_webcam: - logger.info( - "video info: size:{}x{} fps:{} frames:{} total:{:.3f} sec".format( - int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), - int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), - cap.get(cv2.CAP_PROP_FPS), - int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), - cap.get(cv2.CAP_PROP_FRAME_COUNT) / cap.get(cv2.CAP_PROP_FPS), - ) - ) - else: - logger.info( - "video info: size:{}x{} fps:{}".format( - int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), - int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), - cap.get(cv2.CAP_PROP_FPS), - ) - ) - # video writer - if save_video: - # mp4 - video_wr = video_wr( - output_video_path, - cv2.VideoWriter_fourcc(*"x264"), - cap.get(cv2.CAP_PROP_FPS), - ( - int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), - int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), - ), - ) - # avi - # video_wr = video_wr(output_video_path, cv2.VideoWriter_fourcc(*"XVID"), cap.get(cv2.CAP_PROP_FPS), - # (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))) - - cap.release() - - # Load an image - image_path = "image (1).png" - if not os.path.exists(image_path) or not os.path.isfile(image_path): - cap = cv2.VideoCapture(VideoCapture_SRC) - time.sleep(0.1) - _, img = cap.read() - cap.release() - else: - img = cv2.imread(image_path) - # img = cv2.resize(img, (100, 100)) - img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) - - # If using uncropped source - # # make the image 100x100 - # # img_gray = cv2.resize(img_gray, (00, 100)) - # # remove 20 pixels from the right - # img_gray = img_gray[:, :-200] - # # remove 30 pixels from the bottom - # img_gray = img_gray[:-50, :] - - # Define the parameters for pupil detection - # Default - # params = { - # "ratio_downsample": 0.5, - # "use_init_rect": False, - # "mu_outer": 200, #aprroximatly how much pupil should be in the outer rect - # "mu_inner": 50, #aprroximatly how much pupil should be in the inner rect - # "ratio_outer": 1, #rectangular ratio. 1 means square (LIKE REGULAR HSF) - # "kf": 5, #noise filter. May lose tracking if too high (or even never start) - # "width_min": 50, #Minimum width of the pupil - # "width_max": 100, #Maximum width of the pupil - # "wh_step": 1, #Pupil width and height step search size - # "xy_step": 5, #Kernel movement step search size - # "roi": (0, 0, img_gray.shape[1], img_gray.shape[0]), - # "init_rect_flag": False, - # "init_rect": (0, 0, img_gray.shape[1], img_gray.shape[0]), - # } - - logger.info("params: {}".format(params)) - - # Call the pupil_detector_haar function - ( - pupil_rect_coarse, - outer_rect_coarse, - max_response_coarse, - mu_inner, - mu_outer, - ) = coarse_detection(img_gray, params) - - # show the coarse detection - - image_brg = cv2.cvtColor(img_gray, cv2.COLOR_GRAY2BGR) - # show the pupil_rect_coarse - cv2.rectangle( - image_brg, - (pupil_rect_coarse[0], pupil_rect_coarse[1]), - ( - pupil_rect_coarse[0] + pupil_rect_coarse[2], - pupil_rect_coarse[1] + pupil_rect_coarse[3], - ), - (0, 255, 0), - 2, - ) - # upscale it to 200 x 200 - # show the img - cv2.imshow("pppp", image_brg) - cv2.waitKey(10) - cv2.destroyAllWindows() - - timedict = {"to_gray": [], "coarse": [], "fine": [], "total_cv": []} - # For measuring total processing time - main_start_time = timeit.default_timer() - - def External_Run_AHSF(frame_gray): average_color = np.mean(frame_gray) @@ -932,7 +775,7 @@ def External_Run_AHSF(frame_gray): "use_init_rect": False, "mu_outer": 200, # aprroximatly how much pupil should be in the outer rect "mu_inner": 50, # aprroximatly how much pupil should be in the inner rect - "ratio_outer": 1.0, # rectangular ratio. 1 means square (LIKE REGULAR HSF) + "ratio_outer": 0.9, # rectangular ratio. 1 means square (LIKE REGULAR HSF) "kf": 2, # noise filter. May lose tracking if too high (or even never start) "width_min": 16, # Minimum width of the pupil "width_max": 50, # Maximum width of the pupil @@ -954,36 +797,35 @@ def External_Run_AHSF(frame_gray): except TypeError: # print("[WARN] AHSF NoneType Error") return frame_gray, frame_gray, 0, 0, 0 - # print(ellipse_rect) - # Pupil_rect, Outer_rect, max_response, mu_inner, mu_outer = coarse_detection(frame_gray, params) - image_brg = frame_gray # cv2.cvtColor(frame_gray, cv2.COLOR_GRAY2BGR) # show - # cv2.rectangle( - # image_brg, - # (pupil_rect_coarse[0], pupil_rect_coarse[1]), - # ( - # pupil_rect_coarse[0] + pupil_rect_coarse[2], - # pupil_rect_coarse[1] + pupil_rect_coarse[so 3], - # ), - # (0, 255, 0), - # 2, - # ) - cv2.rectangle( - frame_gray, - (outer_rect_coarse[0], outer_rect_coarse[1]), - ( - outer_rect_coarse[0] + outer_rect_coarse[2], - outer_rect_coarse[1] + outer_rect_coarse[3], - ), - (255, 0, 0), - 1, - ) x_center = outer_rect_coarse[0] + outer_rect_coarse[2] / 2 y_center = outer_rect_coarse[1] + outer_rect_coarse[3] / 2 x, y, width, height = outer_rect_coarse cv2.circle(frame_gray, (int(x_center), int(y_center)), 2, (255, 255, 255), -1) + thickness = 1 + cv2.rectangle( + frame_gray, + (pupil_rect_coarse[0], pupil_rect_coarse[1]), + (pupil_rect_coarse[0] + pupil_rect_coarse[2], pupil_rect_coarse[1] + pupil_rect_coarse[3]), + (255, 255, 255), + thickness, + ) + cv2.rectangle( + frame_gray, + (outer_rect_coarse[0], outer_rect_coarse[1]), + (outer_rect_coarse[0] + outer_rect_coarse[2], outer_rect_coarse[1] + outer_rect_coarse[3]), + (255, 255, 255), + thickness, + ) + + center = (pupil_rect_coarse[0] + pupil_rect_coarse[2] // 2, pupil_rect_coarse[1] + pupil_rect_coarse[3] // 2) +# cv2.drawMarker(frame_gray, center, (255, 255, 255), cv2.MARKER_CROSS, 20, thickness) + + + + # Calculate the major and minor diameters major_diameter = math.sqrt(width**2 + height**2) diff --git a/EyeTrackApp/camera_widget.py b/EyeTrackApp/camera_widget.py index 010cc03..9a446d4 100644 --- a/EyeTrackApp/camera_widget.py +++ b/EyeTrackApp/camera_widget.py @@ -691,13 +691,7 @@ class CameraWidget: elif eye_info.info_type == EyeInfoOrigin.FAILURE: graph.update(background_color="red") - # Relay information to OSC - # if eye_info.info_type != EyeInfoOrigin.FAILURE: - # osc_message = OSCMessage( - # type=OSCMessageType.EYE_INFO, - # data=(self.eye_id, eye_info), - # ) - # self.osc_queue.put(osc_message) + except Empty: pass @@ -709,17 +703,5 @@ class CameraWidget: window[self.gui_output_graph].update(visible=False) (maybe_image, eye_info) = self.image_queue.get(block=False) - - if ( - eye_info.info_type != EyeInfoOrigin.FAILURE - ): - - # Relay information to OSC - if eye_info.info_type != EyeInfoOrigin.FAILURE: - osc_message = OSCMessage( - type=OSCMessageType.EYE_INFO, - data=(self.eye_id, eye_info), - ) - self.osc_queue.put(osc_message) except Empty: pass \ No newline at end of file diff --git a/EyeTrackApp/eye_processor.py b/EyeTrackApp/eye_processor.py index 0c3e35c..e814b05 100644 --- a/EyeTrackApp/eye_processor.py +++ b/EyeTrackApp/eye_processor.py @@ -298,7 +298,6 @@ class EyeProcessor: def UPDATE(self): - if self.settings.gui_BLINK: self.eyeopen = BLINK(self) @@ -344,10 +343,9 @@ class EyeProcessor: else: self.prev_y_list.append(self.out_y) - # print(abs(self.eyeopen - self.past_blink)) + blink_vec = min(abs(self.eyeopen - self.past_blink), 1) # clamp to 1 - # if blink_vec >= 0.2: if blink_vec >= 0.18: # self.out_x = sum(self.prev_x_list) / len(self.prev_x_list) self.out_y = sum(self.prev_y_list) / len(self.prev_y_list) @@ -383,7 +381,7 @@ class EyeProcessor: ), ) - # if self.settings.gui_RANSACBLINK and self.eyeopen == 0.0: + # if self.settings.gui_RANSACBLINK and self.eyeopen == 0.0: why is this here # pass # else: # self.eyeopen = 0.81 diff --git a/EyeTrackApp/eyetrackapp.py b/EyeTrackApp/eyetrackapp.py index 84523ca..7965558 100644 --- a/EyeTrackApp/eyetrackapp.py +++ b/EyeTrackApp/eyetrackapp.py @@ -292,7 +292,7 @@ def main(): ] # Create the window - windowg = sg.Window('No GUI', layoutg, background_color="#242224") + windowg = sg.Window('ETVR', layoutg, background_color="#242224", size=(200, 80)) #icon=resource_path("Images/logo.ico") adds cpu usage..... # Event loop while True: @@ -306,9 +306,8 @@ def main(): config.settings.gui_disable_gui = False config.save() print('GUI Enabled') - break - # Close the window + windowg.close() @@ -428,8 +427,8 @@ def main(): settings[0].stop() settings[1].stop() settings[2].stop() - # window[key_manager.RIGHT_EYE_NAME].update(visible=False) - # window[key_manager.LEFT_EYE_NAME].update(visible=False) + window[key_manager.RIGHT_EYE_NAME].update(visible=False) + window[key_manager.LEFT_EYE_NAME].update(visible=False) window[key_manager.SETTINGS_NAME].update(visible=False) window[key_manager.VRCFT_MODULE_SETTINGS_NAME].update(visible=False) window[key_manager.ALGO_SETTINGS_NAME].update(visible=False) diff --git a/EyeTrackApp/haar_surround_feature.py b/EyeTrackApp/haar_surround_feature.py index cab2821..3378df2 100644 --- a/EyeTrackApp/haar_surround_feature.py +++ b/EyeTrackApp/haar_surround_feature.py @@ -647,14 +647,10 @@ class HSF_cls(object): radius, pad, step, hsf = self.cvparam.get_rpsh() - # For measuring processing time of image processing - cv_start_time = timeit.default_timer() gray_frame = frame - self.timedict["to_gray"].append(timeit.default_timer() - cv_start_time) # Calculate the integral image of the frame - int_start_time = timeit.default_timer() ( frame_pad, frame_int, @@ -680,7 +676,6 @@ class HSF_cls(object): # BORDER_CONSTANT is faster than BORDER_REPLICATE There seems to be almost no negative impact when BORDER_CONSTANT is used. cv2.copyMakeBorder(gray_frame, pad, pad, pad, pad, cv2.BORDER_CONSTANT, dst=frame_pad) cv2.integral(frame_pad, sum=frame_int, sdepth=cv2.CV_32S) - self.timedict["int_img"].append(timeit.default_timer() - int_start_time) # Convolve the feature with the integral image conv_int_start_time = timeit.default_timer() @@ -709,7 +704,7 @@ class HSF_cls(object): # Pseudo-visualization of HSF # cv2.normalize(cv2.filter2D(cv2.filter2D(frame_pad, cv2.CV_64F, hsf.get_kernel()[hsf.get_kernel().shape[0]//2,:].reshape(1,-1), borderType=cv2.BORDER_CONSTANT), cv2.CV_64F, hsf.get_kernel()[:,hsf.get_kernel().shape[1]//2].reshape(-1,1), borderType=cv2.BORDER_CONSTANT),None,0,255,cv2.NORM_MINMAX,dtype=cv2.CV_8U)) - self.timedict["conv_int"].append(timeit.default_timer() - conv_int_start_time) + # self.timedict["conv_int"].append(timeit.default_timer() - conv_int_start_time) crop_start_time = timeit.default_timer() # Define the center point and radius @@ -798,8 +793,8 @@ class HSF_cls(object): # https://stackoverflow.com/questions/42771110/fastest-way-to-left-cycle-a-numpy-array-like-pop-push-for-a-queue cv_end_time = timeit.default_timer() - self.timedict["crop"].append(cv_end_time - crop_start_time) - self.timedict["total_cv"].append(cv_end_time - cv_start_time) + # self.timedict["crop"].append(cv_end_time - crop_start_time) + # self.timedict["total_cv"].append(cv_end_time - cv_start_time) # if calc_print_enable: # the lower the response the better the likelyhood of there being a pupil. you can adujst the radius and steps accordingly diff --git a/EyeTrackApp/leap.py b/EyeTrackApp/leap.py index d2e559b..4b36e47 100644 --- a/EyeTrackApp/leap.py +++ b/EyeTrackApp/leap.py @@ -59,13 +59,10 @@ def run_model(input_queue, output_queue, session): # Add the channel and batch dimensions gray_img = np.expand_dims(gray_img, axis=0) # Add channel dimension img_np = np.expand_dims(gray_img, axis=0) # Add batch dimension - # img_np = np.transpose(img_np, (2, 0, 1)) - # img_np = np.expand_dims(img_np, axis=0) + ort_inputs = {session.get_inputs()[0].name: img_np} pre_landmark = session.run(None, ort_inputs) - # pre_landmark = pre_landmark[1] - # pre_landmark = np.reshape(pre_landmark, (12, 2)) pre_landmark = np.reshape(pre_landmark, (-1, 2)) output_queue.put((frame, pre_landmark)) @@ -77,47 +74,6 @@ def run_onnx_model(queues, session, frame): break -def calculate_velocity_vectors(old_matrix, current_matrix, time_difference): - # Check if both matrices have the same number of points - if len(old_matrix) != len(current_matrix): - raise ValueError("Both matrices must have the same number of points") - - indices = [1, 2, 4, 5] - velocity_vectors = [] - - for i in indices: - old_y = old_matrix[i] - current_y = current_matrix[i] - - # Calculate displacement and velocity using the y-values - displacement_y = current_y - old_y - velocity_y = displacement_y / time_difference - - velocity_vectors.append(velocity_y) - - # Calculate the total velocity as the mean of the absolute values of the velocity vectors - total_velocity = np.mean([abs(velocity) for velocity in velocity_vectors]) - - return total_velocity - - -def calculate_polygon_area(points): - indices = [1, 2, 4, 5] - selected_points = [points[i] for i in indices] - - selected_points.append(selected_points[0]) - - # Use the Shoelace formula to calculate the area - n = len(selected_points) - area = 0 - for i in range(n - 1): - x1, y1 = selected_points[i] - x2, y2 = selected_points[i + 1] - area += x1 * y2 - x2 * y1 - - # Return the absolute value of half the computed area - return abs(area) - def to_numpy(tensor): @@ -135,12 +91,6 @@ class LEAP_C(object): self.queue_max_size = 1 # Optimize for best CPU usage, Memory, and Latency. A maxsize is needed to not create a potential memory leak. self.model_path = resource_path(models / 'LEAP071024_E16.onnx') - self.low_priority = ( - False # set process priority to low (may cause issues when unfocusing? reported by one, not reproducable) - ) - self.low_priority = ( - True # set process priority to low (may cause issues when unfocusing? reported by one, not reproducable) - ) self.print_fps = False # Init variables self.frames = 0 @@ -160,35 +110,7 @@ class LEAP_C(object): opts.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL opts.optimized_model_filepath = "" - if self.low_priority: - try: - process = psutil.Process(os.getpid()) # set process priority to low - try: - sys.getwindowsversion() - except AttributeError: - process.nice(0) # UNIX: 0 low 10 high - process.nice() - else: - process.nice(psutil.BELOW_NORMAL_PRIORITY_CLASS) # Windows - process.nice() - except: - pass - # See https://learn.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-getpriorityclass#return-value for values - else: - pass - # process = psutil.Process(os.getpid()) # set process priority to low - # try: - # sys.getwindowsversion() - # except AttributeError: - # process.nice(10) # UNIX: 0 low 10 high - # else: - # process.nice(psutil.HIGH_PRIORITY_CLASS) # Windows - # See https://learn.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-getpriorityclass#return-value for values - - min_cutoff = 0.1 - beta = 15.0 - self.one_euro_filter = OneEuroFilter(np.random.rand(12, 2), min_cutoff=min_cutoff, beta=beta) - self.one_euro_filter_float = OneEuroFilter(np.random.rand(1, 2), min_cutoff=5, beta=0.007) + self.one_euro_filter_float = OneEuroFilter(np.random.rand(1, 2), min_cutoff=0.0004, beta=0.9) #min_cutoff=5, beta=0.007 self.dmax = 0 self.dmin = 0 self.openlist = [] @@ -238,12 +160,6 @@ class LEAP_C(object): cv2.circle(imgvis, (int(x), int(y)), 1, (0, 0, 255), -1) - # x1, y1 = pre_landmark[1] - # x2, y2 = pre_landmark[3] - - # x3, y3 = pre_landmark[4] - # x4, y4 = pre_landmark[2] - d1 = math.dist(pre_landmark[1], pre_landmark[3]) # a more fancy method could be used taking into acount the relative size of the landmarks so that # weirdness can be acounted for better @@ -266,9 +182,7 @@ class LEAP_C(object): # with this we can use it as the "open state" (0.7, for expanded squeeze) # weighted values to shift slightly to max value - normal_open = np.percentile(self.openlist, 70) #((sum(self.maxlist) / len(self.maxlist)) * 0.90 + max(self.openlist) * 0.10) / ( - # 0.95 + 0.15 - # ) + normal_open = np.percentile(self.openlist, 70) except: normal_open = 0.8 @@ -281,29 +195,9 @@ class LEAP_C(object): try: per = (d - normal_open) / (np.percentile(self.openlist, 1.7) - normal_open) - - # oldper = (d - max(self.openlist)) / ( - # min(self.openlist) - max(self.openlist) - # ) # TODO: remove when testing is done - per = 1 - per per = per - 0.2 # allow for eye widen? might require a more legit math way but this makes sense. - per = min(per, 1.0) # clamp to 1.0 max - per = max(per, 0.0) # clamp to 1.0 min - - area = calculate_polygon_area(pre_landmark) - # if self.old_per > area: - # self.delta_per_neg = self.old_per - area - # print(area, self.delta_per_neg) - - # self.old_per = area - - # self.old_per = area - - - # print(self.delta_per_neg) - # if self.delta_per_neg > 0.06: - # per = 0.0 + per = np.clip(per, 0.0, 1.0) except: per = 0.8 @@ -312,23 +206,6 @@ class LEAP_C(object): x = pre_landmark[6][0] y = pre_landmark[6][1] - current_time = time.time() # Get the current time - # Extract current matrix - current_matrix = [point[1] for point in pre_landmark] - - # Calculate time difference - if self.previous_time is not None: - time_difference = current_time - self.previous_time - - # Calculate velocity vectors if we have old data - if self.old_matrix is not None: - self.total_velocity_new = calculate_velocity_vectors(self.old_matrix, current_matrix, time_difference) - - - # Update old matrix and previous time for the next iteration - self.old_matrix = [point[1] for point in pre_landmark] - self.previous_time = current_time - self.last_lid = per calib_array = np.array([per, per]).reshape(1, 2) @@ -339,22 +216,6 @@ class LEAP_C(object): if per <= 0.25: # TODO: EXPOSE AS SETTING per = 0.0 - #print(per) - - self.total_velocity_avg = (self.total_velocity_new + self.total_velocity_old) / 2 - self.total_velocity_old = self.total_velocity_new - - # print(self.total_velocity_avg) - # if self.last_lid == 0.0: - # if self.total_velocity_avg > 1: - # pass - # else: - # per = 0.0 - - # if self.total_velocity_avg > 1.5: - # per = 0.0 - # this should be tuned, i could make this auto calib based on min from a list of per values. - return imgvis, float(x), float(y), per imgvis = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)