mirror of
https://github.com/EyeTrackVR/EyeTrackVR.git
synced 2025-11-04 14:39:42 +08:00
fix: latency with LEAP
This commit is contained in:
parent
ba42576498
commit
7dae037dd5
@ -28,7 +28,6 @@ LICENSE: GNU GPLv3
|
||||
"""
|
||||
# LEAP = Lightweight Eyelid And Pupil
|
||||
import os
|
||||
|
||||
os.environ["OMP_NUM_THREADS"] = "1"
|
||||
import onnxruntime
|
||||
import numpy as np
|
||||
@ -43,40 +42,14 @@ import sys
|
||||
from utils.misc_utils import resource_path
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
frames = 0
|
||||
models = Path("Models")
|
||||
|
||||
|
||||
def run_model(input_queue, output_queue, session):
|
||||
while True:
|
||||
frame = input_queue.get()
|
||||
if frame is None:
|
||||
break
|
||||
|
||||
img_np = np.array(frame)
|
||||
img_np = img_np.astype(np.float32) / 255.0
|
||||
gray_img = 0.299 * img_np[:, :, 0] + 0.587 * img_np[:, :, 1] + 0.114 * img_np[:, :, 2]
|
||||
|
||||
# Add the channel and batch dimensions
|
||||
gray_img = np.expand_dims(gray_img, axis=0) # Add channel dimension
|
||||
img_np = np.expand_dims(gray_img, axis=0) # Add batch dimension
|
||||
# img_np = np.transpose(img_np, (2, 0, 1))
|
||||
# img_np = np.expand_dims(img_np, axis=0)
|
||||
ort_inputs = {session.get_inputs()[0].name: img_np}
|
||||
pre_landmark = session.run(None, ort_inputs)
|
||||
|
||||
# pre_landmark = pre_landmark[1]
|
||||
# pre_landmark = np.reshape(pre_landmark, (12, 2))
|
||||
pre_landmark = np.reshape(pre_landmark, (-1, 2))
|
||||
output_queue.put((frame, pre_landmark))
|
||||
|
||||
|
||||
class LEAP_C(object):
|
||||
def __init__(self):
|
||||
onnxruntime.disable_telemetry_events()
|
||||
# Config variables
|
||||
self.num_threads = 4 # Number of python threads to use (using ~1 more than needed to achieve wanted fps yields lower cpu usage)
|
||||
self.num_threads = 1 # Number of python threads to use (using ~1 more than needed to achieve wanted fps yields lower cpu usage)
|
||||
self.queue_max_size = 1 # Optimize for best CPU usage, Memory, and Latency. A maxsize is needed to not create a potential memory leak.
|
||||
self.model_path = resource_path(models / 'LEAP053024.onnx')
|
||||
|
||||
@ -100,7 +73,7 @@ class LEAP_C(object):
|
||||
self.queues.append(self.queue)
|
||||
|
||||
opts = onnxruntime.SessionOptions()
|
||||
opts.inter_op_num_threads = 1
|
||||
opts.inter_op_num_threads = 4
|
||||
opts.intra_op_num_threads = 1
|
||||
opts.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
|
||||
opts.optimized_model_filepath = ""
|
||||
@ -144,25 +117,28 @@ class LEAP_C(object):
|
||||
|
||||
self.ort_session1 = onnxruntime.InferenceSession(self.model_path, opts, providers=["CPUExecutionProvider"])
|
||||
|
||||
threads = []
|
||||
for i in range(self.num_threads):
|
||||
thread = threading.Thread(
|
||||
target=run_model,
|
||||
args=(self.queues[i], self.output_queue, self.ort_session1),
|
||||
name=f"Thread {i}",
|
||||
)
|
||||
threads.append(thread)
|
||||
thread.start()
|
||||
def run_model(output_queue, session, frame):
|
||||
|
||||
img_np = np.array(frame)
|
||||
img_np = img_np.astype(np.float32) / 255.0
|
||||
gray_img = 0.299 * img_np[:, :, 0] + 0.587 * img_np[:, :, 1] + 0.114 * img_np[:, :, 2]
|
||||
|
||||
# Add the channel and batch dimensions
|
||||
gray_img = np.expand_dims(gray_img, axis=0) # Add channel dimension
|
||||
img_np = np.expand_dims(gray_img, axis=0) # Add batch dimension
|
||||
# img_np = np.transpose(img_np, (2, 0, 1))
|
||||
# img_np = np.expand_dims(img_np, axis=0)
|
||||
ort_inputs = {session.get_inputs()[0].name: img_np}
|
||||
pre_landmark = session.run(None, ort_inputs)
|
||||
|
||||
# pre_landmark = pre_landmark[1]
|
||||
# pre_landmark = np.reshape(pre_landmark, (12, 2))
|
||||
pre_landmark = np.reshape(pre_landmark, (-1, 2))
|
||||
# output_queue.put((frame, pre_landmark))
|
||||
return frame, pre_landmark
|
||||
def to_numpy(self, tensor):
|
||||
return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
|
||||
|
||||
def run_onnx_model(self, queues, session, frame):
|
||||
for i in range(len(queues)):
|
||||
if not queues[i].full():
|
||||
queues[i].put(frame)
|
||||
break
|
||||
|
||||
def leap_run(self):
|
||||
|
||||
img = self.current_image_gray_clean.copy()
|
||||
@ -172,14 +148,8 @@ class LEAP_C(object):
|
||||
|
||||
frame = cv2.resize(img, (112, 112))
|
||||
imgvis = self.current_image_gray.copy()
|
||||
self.run_onnx_model(self.queues, self.ort_session1, frame)
|
||||
|
||||
if not self.output_queue.empty():
|
||||
|
||||
frame, pre_landmark = self.output_queue.get()
|
||||
# pre_landmark = np.reshape(pre_landmark, (-1, 2))
|
||||
|
||||
# pre_landmark = self.one_euro_filter(pre_landmark)
|
||||
frame, pre_landmark = self.run_model(self.ort_session1, frame)
|
||||
|
||||
for point in pre_landmark:
|
||||
# x, y = (point*112).astype(int)
|
||||
@ -194,11 +164,6 @@ class LEAP_C(object):
|
||||
cv2.circle(imgvis, (int(x), int(y)), 2, (255, 255, 0), -1)
|
||||
|
||||
|
||||
x1, y1 = pre_landmark[1]
|
||||
x2, y2 = pre_landmark[3]
|
||||
|
||||
x3, y3 = pre_landmark[4]
|
||||
x4, y4 = pre_landmark[2]
|
||||
|
||||
d1 = math.dist(pre_landmark[1], pre_landmark[3])
|
||||
# a more fancy method could be used taking into acount the relative size of the landmarks so that weirdness can be acounted for better
|
||||
@ -267,8 +232,6 @@ class LEAP_C(object):
|
||||
|
||||
return imgvis, float(x), float(y), per
|
||||
|
||||
imgvis = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
|
||||
return imgvis, 0, 0, 0
|
||||
|
||||
|
||||
class External_Run_LEAP(object):
|
||||
@ -280,3 +243,6 @@ class External_Run_LEAP(object):
|
||||
self.algo.current_image_gray_clean = current_image_gray_clean
|
||||
img, x, y, per = self.algo.leap_run()
|
||||
return img, x, y, per
|
||||
|
||||
|
||||
|
||||
|
||||
@ -161,75 +161,11 @@ def overlay_calibrate_3d(self):
|
||||
var.overlay_active = False
|
||||
|
||||
|
||||
def calculate_real_angle(angle, ipd):
|
||||
return math.degrees(math.atan(math.tan(math.radians(angle)) * (ipd / 2)))
|
||||
|
||||
|
||||
def calibrate_tracked_data(tracked_data, calibrated_data, ipd):
|
||||
|
||||
for point in tracked_data:
|
||||
x, y, angle = point
|
||||
|
||||
# Find the nearest calibration point
|
||||
min_distance_point = min(calibration_points, key=lambda p: math.dist((x, y), (p[0], p[1])))
|
||||
cal_x, cal_y, _ = min_distance_point
|
||||
|
||||
# Calculate the real angle for each eye
|
||||
left_eye_angle = calculate_real_angle(angle, ipd / 2)
|
||||
right_eye_angle = calculate_real_angle(angle, -ipd / 2)
|
||||
|
||||
# Adjust the tracked data using calibration information
|
||||
calibrated_x = x + (cal_x - x)
|
||||
calibrated_y = y + (cal_y - y)
|
||||
calibrated_angle = angle + (cal_x - x) * math.tan(math.radians(left_eye_angle))
|
||||
|
||||
calibrated_data.append((calibrated_x, calibrated_y, calibrated_angle))
|
||||
|
||||
return calibrated_data
|
||||
|
||||
|
||||
def rotate_around_y(point, angle):
|
||||
"""
|
||||
Rotate a 3D point around the y-axis by a given angle.
|
||||
"""
|
||||
rotation_matrix = np.array(
|
||||
[[math.cos(angle), 0, -math.sin(angle)], [0, 1, 0], [math.sin(angle), 0, math.cos(angle)]]
|
||||
)
|
||||
rotated_point = np.dot(rotation_matrix, point)
|
||||
return rotated_point
|
||||
|
||||
|
||||
def calculate_rotation_angles(target_point, ipd, eye="left"):
|
||||
"""
|
||||
Calculate yaw and pitch angles to converge left or right eye at the target point.
|
||||
"""
|
||||
if eye == "left":
|
||||
x = target_point[0] - ipd
|
||||
else:
|
||||
x = target_point[0] + ipd
|
||||
y = target_point[1]
|
||||
z = target_point[2]
|
||||
if x == 0:
|
||||
yaw = 90.0 # Assign a specific value when b is zero
|
||||
else:
|
||||
yaw = math.degrees(math.atan2(z, x))
|
||||
if y == 0:
|
||||
pitch = 0
|
||||
else:
|
||||
pitch = math.degrees(math.atan2(x, y))
|
||||
# print(yaw, pitch)
|
||||
|
||||
return yaw, pitch
|
||||
|
||||
|
||||
class cal:
|
||||
def cal_osc(self, cx, cy, angle):
|
||||
# Example usage for the left eye
|
||||
# Example usage for the center point
|
||||
target_point_center = [0.8, 0.8, 1] # x y z
|
||||
ipd = 0.058 # Interpupillary Distance in meters
|
||||
|
||||
calculate_rotation_angles(target_point_center, ipd, eye="left")
|
||||
#print(self.eye_id)
|
||||
|
||||
if cx == None or cy == None:
|
||||
return 0, 0
|
||||
@ -249,8 +185,8 @@ class cal:
|
||||
if self.grab_3d_point:
|
||||
self.grab_3d_point = False
|
||||
|
||||
self.config.calibration_points.append((cx, cy, angle))
|
||||
print(self.config.calibration_points)
|
||||
self.config.calibration_points.append((cx, cy))
|
||||
print(self.config.calibration_points, self.eye_id)
|
||||
|
||||
# print("calib")
|
||||
|
||||
|
||||
Loading…
Reference in New Issue
Block a user