fix: latency with LEAP

This commit is contained in:
Prohurtz 2024-06-12 18:33:46 -05:00
parent ba42576498
commit 7dae037dd5
3 changed files with 86 additions and 184 deletions

View File

@ -218,7 +218,7 @@ class Camera:
self.fl.pop(0)
self.fl.append(self.fps)
self.fps = sum(self.fl) / len(self.fl)
# self.bps = image.nbytes
# self.bps = image.nbytes
if should_push:
self.push_image_to_queue(image, frame_number, self.fps)
except:

View File

@ -28,7 +28,6 @@ LICENSE: GNU GPLv3
"""
# LEAP = Lightweight Eyelid And Pupil
import os
os.environ["OMP_NUM_THREADS"] = "1"
import onnxruntime
import numpy as np
@ -43,40 +42,14 @@ import sys
from utils.misc_utils import resource_path
from pathlib import Path
frames = 0
models = Path("Models")
def run_model(input_queue, output_queue, session):
while True:
frame = input_queue.get()
if frame is None:
break
img_np = np.array(frame)
img_np = img_np.astype(np.float32) / 255.0
gray_img = 0.299 * img_np[:, :, 0] + 0.587 * img_np[:, :, 1] + 0.114 * img_np[:, :, 2]
# Add the channel and batch dimensions
gray_img = np.expand_dims(gray_img, axis=0) # Add channel dimension
img_np = np.expand_dims(gray_img, axis=0) # Add batch dimension
# img_np = np.transpose(img_np, (2, 0, 1))
# img_np = np.expand_dims(img_np, axis=0)
ort_inputs = {session.get_inputs()[0].name: img_np}
pre_landmark = session.run(None, ort_inputs)
# pre_landmark = pre_landmark[1]
# pre_landmark = np.reshape(pre_landmark, (12, 2))
pre_landmark = np.reshape(pre_landmark, (-1, 2))
output_queue.put((frame, pre_landmark))
class LEAP_C(object):
def __init__(self):
onnxruntime.disable_telemetry_events()
# Config variables
self.num_threads = 4 # Number of python threads to use (using ~1 more than needed to achieve wanted fps yields lower cpu usage)
self.num_threads = 1 # Number of python threads to use (using ~1 more than needed to achieve wanted fps yields lower cpu usage)
self.queue_max_size = 1 # Optimize for best CPU usage, Memory, and Latency. A maxsize is needed to not create a potential memory leak.
self.model_path = resource_path(models / 'LEAP053024.onnx')
@ -100,7 +73,7 @@ class LEAP_C(object):
self.queues.append(self.queue)
opts = onnxruntime.SessionOptions()
opts.inter_op_num_threads = 1
opts.inter_op_num_threads = 4
opts.intra_op_num_threads = 1
opts.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
opts.optimized_model_filepath = ""
@ -144,25 +117,28 @@ class LEAP_C(object):
self.ort_session1 = onnxruntime.InferenceSession(self.model_path, opts, providers=["CPUExecutionProvider"])
threads = []
for i in range(self.num_threads):
thread = threading.Thread(
target=run_model,
args=(self.queues[i], self.output_queue, self.ort_session1),
name=f"Thread {i}",
)
threads.append(thread)
thread.start()
def run_model(output_queue, session, frame):
img_np = np.array(frame)
img_np = img_np.astype(np.float32) / 255.0
gray_img = 0.299 * img_np[:, :, 0] + 0.587 * img_np[:, :, 1] + 0.114 * img_np[:, :, 2]
# Add the channel and batch dimensions
gray_img = np.expand_dims(gray_img, axis=0) # Add channel dimension
img_np = np.expand_dims(gray_img, axis=0) # Add batch dimension
# img_np = np.transpose(img_np, (2, 0, 1))
# img_np = np.expand_dims(img_np, axis=0)
ort_inputs = {session.get_inputs()[0].name: img_np}
pre_landmark = session.run(None, ort_inputs)
# pre_landmark = pre_landmark[1]
# pre_landmark = np.reshape(pre_landmark, (12, 2))
pre_landmark = np.reshape(pre_landmark, (-1, 2))
# output_queue.put((frame, pre_landmark))
return frame, pre_landmark
def to_numpy(self, tensor):
return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
def run_onnx_model(self, queues, session, frame):
for i in range(len(queues)):
if not queues[i].full():
queues[i].put(frame)
break
def leap_run(self):
img = self.current_image_gray_clean.copy()
@ -172,103 +148,90 @@ class LEAP_C(object):
frame = cv2.resize(img, (112, 112))
imgvis = self.current_image_gray.copy()
self.run_onnx_model(self.queues, self.ort_session1, frame)
if not self.output_queue.empty():
frame, pre_landmark = self.run_model(self.ort_session1, frame)
frame, pre_landmark = self.output_queue.get()
# pre_landmark = np.reshape(pre_landmark, (-1, 2))
for point in pre_landmark:
# x, y = (point*112).astype(int)
# pre_landmark = self.one_euro_filter(pre_landmark)
x, y = point # Assuming point is a tuple (x, y)
for point in pre_landmark:
# x, y = (point*112).astype(int)
# Scale the coordinates to image width and height
x = int(x * img_width)
y = int(y * img_height)
# x, y = int(x), int(y) # Ensure x and y are integers
x, y = point # Assuming point is a tuple (x, y)
# Scale the coordinates to image width and height
x = int(x * img_width)
y = int(y * img_height)
# x, y = int(x), int(y) # Ensure x and y are integers
cv2.circle(imgvis, (int(x), int(y)), 2, (255, 255, 0), -1)
cv2.circle(imgvis, (int(x), int(y)), 2, (255, 255, 0), -1)
x1, y1 = pre_landmark[1]
x2, y2 = pre_landmark[3]
x3, y3 = pre_landmark[4]
x4, y4 = pre_landmark[2]
d1 = math.dist(pre_landmark[1], pre_landmark[3])
# a more fancy method could be used taking into acount the relative size of the landmarks so that weirdness can be acounted for better
d2 = math.dist(pre_landmark[2], pre_landmark[4])
d = (d1 + d2) / 2
# by averaging both sets we can get less error? i think part of why 1 eye was better than the other is because we only considered one offset points.
# considering both should smooth things out between eyes
d1 = math.dist(pre_landmark[1], pre_landmark[3])
# a more fancy method could be used taking into acount the relative size of the landmarks so that weirdness can be acounted for better
d2 = math.dist(pre_landmark[2], pre_landmark[4])
d = (d1 + d2) / 2
# by averaging both sets we can get less error? i think part of why 1 eye was better than the other is because we only considered one offset points.
# considering both should smooth things out between eyes
try:
if d >= np.percentile(
self.openlist, 80 # do not go above 85, but this value can be tuned
): # an aditional approach could be using the place where on average it is most stable, denoting what distance is the most stable "open"
self.maxlist.append(d)
try:
if d >= np.percentile(
self.openlist, 80 # do not go above 85, but this value can be tuned
): # an aditional approach could be using the place where on average it is most stable, denoting what distance is the most stable "open"
self.maxlist.append(d)
if len(self.maxlist) > 2000: # i feel that this is very cpu intensive. think of a better method
self.maxlist.pop(0)
if len(self.maxlist) > 2000: # i feel that this is very cpu intensive. think of a better method
self.maxlist.pop(0)
# this should be the average most open value, the average of top 2000 values in rolling calibration
# with this we can use it as the "openstate" (0.7, for expanded squeeze)
# this should be the average most open value, the average of top 2000 values in rolling calibration
# with this we can use it as the "openstate" (0.7, for expanded squeeze)
# weighted values to shift slightly to max value
normal_open = ((sum(self.maxlist) / len(self.maxlist)) * 0.90 + max(self.openlist) * 0.10) / (
0.95 + 0.15
)
# weighted values to shift slightly to max value
normal_open = ((sum(self.maxlist) / len(self.maxlist)) * 0.90 + max(self.openlist) * 0.10) / (
0.95 + 0.15
)
except:
normal_open = 0.8
except:
normal_open = 0.8
if len(self.openlist) < 5000: # TODO expose as setting?
self.openlist.append(d)
else:
self.openlist.pop(0)
self.openlist.append(d)
if len(self.openlist) < 5000: # TODO expose as setting?
self.openlist.append(d)
else:
self.openlist.pop(0)
self.openlist.append(d)
try:
per = (d - normal_open) / (min(self.openlist) - normal_open)
try:
per = (d - normal_open) / (min(self.openlist) - normal_open)
oldper = (d - max(self.openlist)) / (
min(self.openlist) - max(self.openlist)
) # TODO: remove when testing is done
oldper = (d - max(self.openlist)) / (
min(self.openlist) - max(self.openlist)
) # TODO: remove when testing is done
per = 1 - per
per = per - 0.2 # allow for eye widen? might require a more legit math way but this makes sense.
per = min(per, 1.0) # clamp to 1.0 max
per = max(per, 0.0) # clamp to 1.0 min
per = 1 - per
per = per - 0.2 # allow for eye widen? might require a more legit math way but this makes sense.
per = min(per, 1.0) # clamp to 1.0 max
per = max(per, 0.0) # clamp to 1.0 min
# print("new: ", per, "vs old: ", oldper)
# print("new: ", per, "vs old: ", oldper)
except:
per = 0.8
pass
except:
per = 0.8
pass
x = pre_landmark[6][0]
y = pre_landmark[6][1]
x = pre_landmark[6][0]
y = pre_landmark[6][1]
self.last_lid = per
calib_array = np.array([per, per]).reshape(1, 2)
self.last_lid = per
calib_array = np.array([per, per]).reshape(1, 2)
per = self.one_euro_filter_float(calib_array)
per = self.one_euro_filter_float(calib_array)
per = per[0][0]
# print(per)
if per <= 0.2: # TODO: EXPOSE AS SETTING
per == 0.0
# this should be tuned, i could make this auto calib based on min from a list of per values.
per = per[0][0]
# print(per)
if per <= 0.2: # TODO: EXPOSE AS SETTING
per == 0.0
# this should be tuned, i could make this auto calib based on min from a list of per values.
return imgvis, float(x), float(y), per
return imgvis, float(x), float(y), per
imgvis = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
return imgvis, 0, 0, 0
class External_Run_LEAP(object):
@ -280,3 +243,6 @@ class External_Run_LEAP(object):
self.algo.current_image_gray_clean = current_image_gray_clean
img, x, y, per = self.algo.leap_run()
return img, x, y, per

View File

@ -161,75 +161,11 @@ def overlay_calibrate_3d(self):
var.overlay_active = False
def calculate_real_angle(angle, ipd):
return math.degrees(math.atan(math.tan(math.radians(angle)) * (ipd / 2)))
def calibrate_tracked_data(tracked_data, calibrated_data, ipd):
for point in tracked_data:
x, y, angle = point
# Find the nearest calibration point
min_distance_point = min(calibration_points, key=lambda p: math.dist((x, y), (p[0], p[1])))
cal_x, cal_y, _ = min_distance_point
# Calculate the real angle for each eye
left_eye_angle = calculate_real_angle(angle, ipd / 2)
right_eye_angle = calculate_real_angle(angle, -ipd / 2)
# Adjust the tracked data using calibration information
calibrated_x = x + (cal_x - x)
calibrated_y = y + (cal_y - y)
calibrated_angle = angle + (cal_x - x) * math.tan(math.radians(left_eye_angle))
calibrated_data.append((calibrated_x, calibrated_y, calibrated_angle))
return calibrated_data
def rotate_around_y(point, angle):
"""
Rotate a 3D point around the y-axis by a given angle.
"""
rotation_matrix = np.array(
[[math.cos(angle), 0, -math.sin(angle)], [0, 1, 0], [math.sin(angle), 0, math.cos(angle)]]
)
rotated_point = np.dot(rotation_matrix, point)
return rotated_point
def calculate_rotation_angles(target_point, ipd, eye="left"):
"""
Calculate yaw and pitch angles to converge left or right eye at the target point.
"""
if eye == "left":
x = target_point[0] - ipd
else:
x = target_point[0] + ipd
y = target_point[1]
z = target_point[2]
if x == 0:
yaw = 90.0 # Assign a specific value when b is zero
else:
yaw = math.degrees(math.atan2(z, x))
if y == 0:
pitch = 0
else:
pitch = math.degrees(math.atan2(x, y))
# print(yaw, pitch)
return yaw, pitch
class cal:
def cal_osc(self, cx, cy, angle):
# Example usage for the left eye
# Example usage for the center point
target_point_center = [0.8, 0.8, 1] # x y z
ipd = 0.058 # Interpupillary Distance in meters
calculate_rotation_angles(target_point_center, ipd, eye="left")
#print(self.eye_id)
if cx == None or cy == None:
return 0, 0
@ -249,8 +185,8 @@ class cal:
if self.grab_3d_point:
self.grab_3d_point = False
self.config.calibration_points.append((cx, cy, angle))
print(self.config.calibration_points)
self.config.calibration_points.append((cx, cy))
print(self.config.calibration_points, self.eye_id)
# print("calib")