mirror of
https://github.com/EyeTrackVR/EyeTrackVR.git
synced 2025-11-04 14:39:42 +08:00
fix: falloff bug
This commit is contained in:
parent
5b48d5d735
commit
584eea117f
@ -39,7 +39,6 @@ import cv2
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# from line_profiler_pycharm import profile
|
# from line_profiler_pycharm import profile
|
||||||
|
|
||||||
# memo: Old Name: CPRD
|
# memo: Old Name: CPRD
|
||||||
@ -56,9 +55,7 @@ save_video = False
|
|||||||
|
|
||||||
VideoCapture_SRC = "/Users/prohurtz/Desktop/t3c.mp4" # "demo2.mp4"
|
VideoCapture_SRC = "/Users/prohurtz/Desktop/t3c.mp4" # "demo2.mp4"
|
||||||
input_is_webcam = False
|
input_is_webcam = False
|
||||||
benchmark_flag = (
|
benchmark_flag = True if not input_is_webcam and not imshow_enable and not save_video else False
|
||||||
True if not input_is_webcam and not imshow_enable and not save_video else False
|
|
||||||
)
|
|
||||||
loop_num = 1 if imshow_enable or save_video else 10
|
loop_num = 1 if imshow_enable or save_video else 10
|
||||||
output_video_path = f"./{this_file_name}.mp4"
|
output_video_path = f"./{this_file_name}.mp4"
|
||||||
logfilename = f"./{this_file_name}.log"
|
logfilename = f"./{this_file_name}.log"
|
||||||
@ -137,9 +134,7 @@ class TimeitResult(object):
|
|||||||
@property
|
@property
|
||||||
def stdev(self):
|
def stdev(self):
|
||||||
mean = self.average
|
mean = self.average
|
||||||
return (
|
return (math.fsum([(x - mean) ** 2 for x in self.timings]) / len(self.timings)) ** 0.5
|
||||||
math.fsum([(x - mean) ** 2 for x in self.timings]) / len(self.timings)
|
|
||||||
) ** 0.5
|
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
pm = "+-"
|
pm = "+-"
|
||||||
@ -339,9 +334,7 @@ def pupil_detector_haar(img_gray, params):
|
|||||||
|
|
||||||
|
|
||||||
@lru_cache(maxsize=lru_maxsize_vvs)
|
@lru_cache(maxsize=lru_maxsize_vvs)
|
||||||
def get_empty_array(
|
def get_empty_array(frame_shape, width_min, width_max, wh_step, xy_step, roi, ratio_outer):
|
||||||
frame_shape, width_min, width_max, wh_step, xy_step, roi, ratio_outer
|
|
||||||
):
|
|
||||||
frame_int_dtype = np.intc
|
frame_int_dtype = np.intc
|
||||||
np_index_dtype = (
|
np_index_dtype = (
|
||||||
np.intc
|
np.intc
|
||||||
@ -355,62 +348,20 @@ def get_empty_array(
|
|||||||
h_arr = (w_arr / ratio_outer).astype(np.int16)
|
h_arr = (w_arr / ratio_outer).astype(np.int16)
|
||||||
|
|
||||||
# memo: It is not smart code and needs to be changed.
|
# memo: It is not smart code and needs to be changed.
|
||||||
y_out_n = np.hstack(
|
y_out_n = np.hstack([np.arange(roi[1] + h, roi[3] - h, xy_step, dtype=np_index_dtype) for h in h_arr])
|
||||||
[
|
x_out_n = np.hstack([np.arange(roi[0] + w, roi[2] - w, xy_step, dtype=np_index_dtype) for w in w_arr])
|
||||||
np.arange(roi[1] + h, roi[3] - h, xy_step, dtype=np_index_dtype)
|
y_out_h = np.hstack([np.arange(roi[1] + h, roi[3] - h, xy_step, dtype=np_index_dtype) + h for h in h_arr])
|
||||||
for h in h_arr
|
x_out_w = np.hstack([np.arange(roi[0] + w, roi[2] - w, xy_step, dtype=np_index_dtype) + w for w in w_arr])
|
||||||
]
|
|
||||||
)
|
|
||||||
x_out_n = np.hstack(
|
|
||||||
[
|
|
||||||
np.arange(roi[0] + w, roi[2] - w, xy_step, dtype=np_index_dtype)
|
|
||||||
for w in w_arr
|
|
||||||
]
|
|
||||||
)
|
|
||||||
y_out_h = np.hstack(
|
|
||||||
[
|
|
||||||
np.arange(roi[1] + h, roi[3] - h, xy_step, dtype=np_index_dtype) + h
|
|
||||||
for h in h_arr
|
|
||||||
]
|
|
||||||
)
|
|
||||||
x_out_w = np.hstack(
|
|
||||||
[
|
|
||||||
np.arange(roi[0] + w, roi[2] - w, xy_step, dtype=np_index_dtype) + w
|
|
||||||
for w in w_arr
|
|
||||||
]
|
|
||||||
)
|
|
||||||
out_h = y_out_h - y_out_n
|
out_h = y_out_h - y_out_n
|
||||||
out_w = x_out_w - x_out_n
|
out_w = x_out_w - x_out_n
|
||||||
|
|
||||||
y_in_n = np.hstack(
|
y_in_n = np.hstack([np.arange(roi[1] + h, roi[3] - h, xy_step, dtype=np_index_dtype) + int(h / 4) for h in h_arr])
|
||||||
[
|
x_in_n = np.hstack([np.arange(roi[0] + w, roi[2] - w, xy_step, dtype=np_index_dtype) + int(w / 4) for w in w_arr])
|
||||||
np.arange(roi[1] + h, roi[3] - h, xy_step, dtype=np_index_dtype)
|
|
||||||
+ int(h / 4)
|
|
||||||
for h in h_arr
|
|
||||||
]
|
|
||||||
)
|
|
||||||
x_in_n = np.hstack(
|
|
||||||
[
|
|
||||||
np.arange(roi[0] + w, roi[2] - w, xy_step, dtype=np_index_dtype)
|
|
||||||
+ int(w / 4)
|
|
||||||
for w in w_arr
|
|
||||||
]
|
|
||||||
)
|
|
||||||
y_in_h = np.hstack(
|
y_in_h = np.hstack(
|
||||||
[
|
[np.arange(roi[1] + h, roi[3] - h, xy_step, dtype=np_index_dtype) + int(h / 4) + int(h / 2) for h in h_arr]
|
||||||
np.arange(roi[1] + h, roi[3] - h, xy_step, dtype=np_index_dtype)
|
|
||||||
+ int(h / 4)
|
|
||||||
+ int(h / 2)
|
|
||||||
for h in h_arr
|
|
||||||
]
|
|
||||||
)
|
)
|
||||||
x_in_w = np.hstack(
|
x_in_w = np.hstack(
|
||||||
[
|
[np.arange(roi[0] + w, roi[2] - w, xy_step, dtype=np_index_dtype) + int(w / 4) + int(w / 2) for w in w_arr]
|
||||||
np.arange(roi[0] + w, roi[2] - w, xy_step, dtype=np_index_dtype)
|
|
||||||
+ int(w / 4)
|
|
||||||
+ int(w / 2)
|
|
||||||
for w in w_arr
|
|
||||||
]
|
|
||||||
)
|
)
|
||||||
in_h = y_in_h - y_in_n
|
in_h = y_in_h - y_in_n
|
||||||
in_w = x_in_w - x_in_n
|
in_w = x_in_w - x_in_n
|
||||||
@ -475,9 +426,7 @@ def get_empty_array(
|
|||||||
wh_in_arr = 1 / wh_in_arr # .astype(np.float32)
|
wh_in_arr = 1 / wh_in_arr # .astype(np.float32)
|
||||||
# wh_out_arr=wh_out_arr.astype(np.float64)
|
# wh_out_arr=wh_out_arr.astype(np.float64)
|
||||||
mu_outer_rect = 1 / mu_outer_rect # .astype(np.float32)
|
mu_outer_rect = 1 / mu_outer_rect # .astype(np.float32)
|
||||||
mu_outer_rect2 = (
|
mu_outer_rect2 = -1.0 * mu_outer_rect # cv2.merge([mu_outer_rect,-1.0*mu_outer_rect])
|
||||||
-1.0 * mu_outer_rect
|
|
||||||
) # cv2.merge([mu_outer_rect,-1.0*mu_outer_rect])
|
|
||||||
|
|
||||||
# 1/wh_in_arr == wh_in_arr_mul
|
# 1/wh_in_arr == wh_in_arr_mul
|
||||||
return (
|
return (
|
||||||
@ -551,9 +500,7 @@ def coarse_detection(img_gray, params):
|
|||||||
wh_out_arr,
|
wh_out_arr,
|
||||||
mu_outer_rect,
|
mu_outer_rect,
|
||||||
mu_outer_rect2,
|
mu_outer_rect2,
|
||||||
) = get_empty_array(
|
) = get_empty_array(img_blur.shape, width_min, width_max, wh_step, xy_step, roi, ratio_outer)
|
||||||
img_blur.shape, width_min, width_max, wh_step, xy_step, roi, ratio_outer
|
|
||||||
)
|
|
||||||
cv2.integral(
|
cv2.integral(
|
||||||
img_blur, sum=frame_int, sdepth=cv2.CV_32S
|
img_blur, sum=frame_int, sdepth=cv2.CV_32S
|
||||||
) # memo: It becomes slower when using float64, probably because the increase in bits from 32 to 64 causes the arrays to be larger.
|
) # memo: It becomes slower when using float64, probably because the increase in bits from 32 to 64 causes the arrays to be larger.
|
||||||
@ -674,9 +621,7 @@ def fine_detection(img_gray, pupil_rect_coarse):
|
|||||||
img_pupil_blur = cv2.GaussianBlur(img_pupil, (5, 5), 0, 0)
|
img_pupil_blur = cv2.GaussianBlur(img_pupil, (5, 5), 0, 0)
|
||||||
edges_filter = detect_edges(img_pupil_blur)
|
edges_filter = detect_edges(img_pupil_blur)
|
||||||
# fit ellipse to edges
|
# fit ellipse to edges
|
||||||
contours, hierarchy = cv2.findContours(
|
contours, hierarchy = cv2.findContours(edges_filter, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
|
||||||
edges_filter, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE
|
|
||||||
)
|
|
||||||
# sort contours by area
|
# sort contours by area
|
||||||
contours = sorted(contours, key=lambda x: cv2.contourArea(x), reverse=True)
|
contours = sorted(contours, key=lambda x: cv2.contourArea(x), reverse=True)
|
||||||
# fit ellipse to largest contour
|
# fit ellipse to largest contour
|
||||||
@ -733,9 +678,7 @@ def detect_edges(img_pupil_blur):
|
|||||||
|
|
||||||
|
|
||||||
def fit_pupil_ellipse_swirski(img_pupil, edges_filter):
|
def fit_pupil_ellipse_swirski(img_pupil, edges_filter):
|
||||||
contours, hierarchy = cv2.findContours(
|
contours, hierarchy = cv2.findContours(edges_filter, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
|
||||||
edges_filter, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE
|
|
||||||
)
|
|
||||||
max_contour_area = 0
|
max_contour_area = 0
|
||||||
max_contour = None
|
max_contour = None
|
||||||
print("contours: ", contours)
|
print("contours: ", contours)
|
||||||
@ -953,18 +896,10 @@ if __name__ == "__main__":
|
|||||||
cv2.imshow("pppp", image_brg)
|
cv2.imshow("pppp", image_brg)
|
||||||
cv2.waitKey(10)
|
cv2.waitKey(10)
|
||||||
cv2.destroyAllWindows()
|
cv2.destroyAllWindows()
|
||||||
# save images
|
|
||||||
# cv2.imwrite("coarse_detection.png", image_brg)
|
|
||||||
# webcam
|
|
||||||
# cap = cv2.VideoCapture(VideoCapture_SRC)
|
|
||||||
# CLACHE
|
|
||||||
# clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
|
|
||||||
|
|
||||||
timedict = {"to_gray": [], "coarse": [], "fine": [], "total_cv": []}
|
timedict = {"to_gray": [], "coarse": [], "fine": [], "total_cv": []}
|
||||||
# For measuring total processing time
|
# For measuring total processing time
|
||||||
main_start_time = timeit.default_timer()
|
main_start_time = timeit.default_timer()
|
||||||
# for i in range(loop_num):
|
|
||||||
# cap = cv2.VideoCapture(VideoCapture_SRC)
|
|
||||||
|
|
||||||
|
|
||||||
def External_Run_AHSF(frame_gray):
|
def External_Run_AHSF(frame_gray):
|
||||||
@ -984,26 +919,13 @@ def External_Run_AHSF(frame_gray):
|
|||||||
y_offset = (max_dimension - height) // 2
|
y_offset = (max_dimension - height) // 2
|
||||||
|
|
||||||
# Paste the rotated image onto the square background
|
# Paste the rotated image onto the square background
|
||||||
square_background[y_offset:y_offset + height, x_offset:x_offset + width] = frame_gray
|
square_background[y_offset : y_offset + height, x_offset : x_offset + width] = frame_gray
|
||||||
|
|
||||||
frame_gray = square_background
|
frame_gray = square_background
|
||||||
frame_clear_resize = frame_gray.copy()
|
frame_clear_resize = frame_gray.copy()
|
||||||
|
|
||||||
# while True:
|
wmax = frame_gray.shape[1] * 0.5 # likes to crash, might need more tuning still
|
||||||
# if not cap.isOpened():
|
wmin = frame_gray.shape[1] * 0.08
|
||||||
# break
|
|
||||||
# ret, frame = cap.read()
|
|
||||||
# if not ret:
|
|
||||||
# break
|
|
||||||
# remove 30 pixels from the right
|
|
||||||
# frame = frame[:, :-200]
|
|
||||||
# frame = cv2.resize(frame, (100, 100))
|
|
||||||
# frame = cv2.GaussianBlur(frame, (11,11), 0)
|
|
||||||
|
|
||||||
# frame_gray = cv2.resize(frame_gray, (100, 100))
|
|
||||||
|
|
||||||
wmax = (frame_gray.shape[1] * 0.5) # likes to crash, might need more tuning still
|
|
||||||
wmin = (frame_gray.shape[1] * 0.08)
|
|
||||||
params = {
|
params = {
|
||||||
"ratio_downsample": 0.5,
|
"ratio_downsample": 0.5,
|
||||||
"use_init_rect": False,
|
"use_init_rect": False,
|
||||||
@ -1068,39 +990,3 @@ def External_Run_AHSF(frame_gray):
|
|||||||
average_diameter = (major_diameter + minor_diameter) / 2
|
average_diameter = (major_diameter + minor_diameter) / 2
|
||||||
|
|
||||||
return frame_gray, frame_clear_resize, x_center, y_center, abs(width - height)
|
return frame_gray, frame_clear_resize, x_center, y_center, abs(width - height)
|
||||||
|
|
||||||
|
|
||||||
# return frame_gray, 0.0, 0.0, 0.0
|
|
||||||
# if imshow_enable:
|
|
||||||
# cv2.imshow("pppp", image_brg)
|
|
||||||
# if cv2.waitKey(1) & 0xFF == ord("q"):
|
|
||||||
# pass
|
|
||||||
# if save_video:
|
|
||||||
# video_wr.write(image_brg)
|
|
||||||
|
|
||||||
# if save_video:
|
|
||||||
# video_wr.release()
|
|
||||||
# logger.info("video output: {}".format(output_video_path))
|
|
||||||
# cap.release()
|
|
||||||
# if imshow_enable:
|
|
||||||
# cv2.destroyAllWindows()
|
|
||||||
|
|
||||||
|
|
||||||
# main_end_time = timeit.default_timer()
|
|
||||||
# main_total_time = main_end_time - main_start_time
|
|
||||||
# if not print_enable:
|
|
||||||
# del print
|
|
||||||
# or
|
|
||||||
# print = __builtins__.print
|
|
||||||
# logger.info("")
|
|
||||||
# for k, v in timedict.items():
|
|
||||||
# number=1, precision=5
|
|
||||||
# len_v = len(v)
|
|
||||||
# best = min(v) # / number
|
|
||||||
# worst = max(v) # / number
|
|
||||||
# logger.info(k + ":")
|
|
||||||
# logger.info(TimeitResult(loop_num, len_v, best, worst, v, 5))
|
|
||||||
# logger.info(FPSResult(loop_num, len_v, worst, best, v, 5))
|
|
||||||
# print("")
|
|
||||||
# logger.info("")
|
|
||||||
# logger.info(f"{this_file_basename}: ALL Finish {format_time(main_total_time)}")
|
|
||||||
|
|||||||
@ -10,6 +10,7 @@ import os
|
|||||||
import subprocess
|
import subprocess
|
||||||
import math
|
import math
|
||||||
|
|
||||||
|
|
||||||
class TimeoutError(RuntimeError):
|
class TimeoutError(RuntimeError):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -20,9 +21,7 @@ class AsyncCall(object):
|
|||||||
self.Callback = callback
|
self.Callback = callback
|
||||||
|
|
||||||
def __call__(self, *args, **kwargs):
|
def __call__(self, *args, **kwargs):
|
||||||
self.Thread = threading.Thread(
|
self.Thread = threading.Thread(target=self.run, name=self.Callable.__name__, args=args, kwargs=kwargs)
|
||||||
target=self.run, name=self.Callable.__name__, args=args, kwargs=kwargs
|
|
||||||
)
|
|
||||||
self.Thread.start()
|
self.Thread.start()
|
||||||
return self
|
return self
|
||||||
|
|
||||||
@ -104,10 +103,12 @@ def center_overlay_calibrate(self):
|
|||||||
self.calibration_frame_counter = 0
|
self.calibration_frame_counter = 0
|
||||||
var.overlay_active = False
|
var.overlay_active = False
|
||||||
|
|
||||||
# except:
|
|
||||||
# print("[WARN] Calibration overlay error. Make sure SteamVR is Running.")
|
# except:
|
||||||
# self.settings.gui_recenter_eyes = False
|
# print("[WARN] Calibration overlay error. Make sure SteamVR is Running.")
|
||||||
# var.overlay_active = False
|
# self.settings.gui_recenter_eyes = False
|
||||||
|
# var.overlay_active = False
|
||||||
|
|
||||||
|
|
||||||
@Async
|
@Async
|
||||||
def overlay_calibrate_3d(self):
|
def overlay_calibrate_3d(self):
|
||||||
@ -127,7 +128,6 @@ def overlay_calibrate_3d(self):
|
|||||||
self.settings.gui_recenter_eyes = False
|
self.settings.gui_recenter_eyes = False
|
||||||
self.grab_3d_point = True
|
self.grab_3d_point = True
|
||||||
|
|
||||||
|
|
||||||
print(message)
|
print(message)
|
||||||
except:
|
except:
|
||||||
print("[WARN] Calibration overlay error. Make sure SteamVR is Running.")
|
print("[WARN] Calibration overlay error. Make sure SteamVR is Running.")
|
||||||
@ -138,8 +138,8 @@ def overlay_calibrate_3d(self):
|
|||||||
def calculate_real_angle(angle, ipd):
|
def calculate_real_angle(angle, ipd):
|
||||||
return math.degrees(math.atan(math.tan(math.radians(angle)) * (ipd / 2)))
|
return math.degrees(math.atan(math.tan(math.radians(angle)) * (ipd / 2)))
|
||||||
|
|
||||||
def calibrate_tracked_data(tracked_data, calibrated_data, ipd):
|
|
||||||
|
|
||||||
|
def calibrate_tracked_data(tracked_data, calibrated_data, ipd):
|
||||||
|
|
||||||
for point in tracked_data:
|
for point in tracked_data:
|
||||||
x, y, angle = point
|
x, y, angle = point
|
||||||
@ -161,21 +161,23 @@ def calibrate_tracked_data(tracked_data, calibrated_data, ipd):
|
|||||||
|
|
||||||
return calibrated_data
|
return calibrated_data
|
||||||
|
|
||||||
|
|
||||||
def rotate_around_y(point, angle):
|
def rotate_around_y(point, angle):
|
||||||
"""
|
"""
|
||||||
Rotate a 3D point around the y-axis by a given angle.
|
Rotate a 3D point around the y-axis by a given angle.
|
||||||
"""
|
"""
|
||||||
rotation_matrix = np.array([[math.cos(angle), 0, -math.sin(angle)],
|
rotation_matrix = np.array(
|
||||||
[0, 1, 0],
|
[[math.cos(angle), 0, -math.sin(angle)], [0, 1, 0], [math.sin(angle), 0, math.cos(angle)]]
|
||||||
[math.sin(angle), 0, math.cos(angle)]])
|
)
|
||||||
rotated_point = np.dot(rotation_matrix, point)
|
rotated_point = np.dot(rotation_matrix, point)
|
||||||
return rotated_point
|
return rotated_point
|
||||||
|
|
||||||
def calculate_rotation_angles(target_point, ipd, eye='left'):
|
|
||||||
|
def calculate_rotation_angles(target_point, ipd, eye="left"):
|
||||||
"""
|
"""
|
||||||
Calculate yaw and pitch angles to converge left or right eye at the target point.
|
Calculate yaw and pitch angles to converge left or right eye at the target point.
|
||||||
"""
|
"""
|
||||||
if eye == 'left':
|
if eye == "left":
|
||||||
x = target_point[0] - ipd
|
x = target_point[0] - ipd
|
||||||
else:
|
else:
|
||||||
x = target_point[0] + ipd
|
x = target_point[0] + ipd
|
||||||
@ -191,13 +193,9 @@ def calculate_rotation_angles(target_point, ipd, eye='left'):
|
|||||||
pitch = math.degrees(math.atan2(x, y))
|
pitch = math.degrees(math.atan2(x, y))
|
||||||
# print(yaw, pitch)
|
# print(yaw, pitch)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
return yaw, pitch
|
return yaw, pitch
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class cal:
|
class cal:
|
||||||
def cal_osc(self, cx, cy, angle):
|
def cal_osc(self, cx, cy, angle):
|
||||||
# Example usage for the left eye
|
# Example usage for the left eye
|
||||||
@ -205,8 +203,7 @@ class cal:
|
|||||||
target_point_center = [0.8, 0.8, 1] # x y z
|
target_point_center = [0.8, 0.8, 1] # x y z
|
||||||
ipd = 0.058 # Interpupillary Distance in meters
|
ipd = 0.058 # Interpupillary Distance in meters
|
||||||
|
|
||||||
calculate_rotation_angles(target_point_center, ipd, eye='left')
|
calculate_rotation_angles(target_point_center, ipd, eye="left")
|
||||||
|
|
||||||
|
|
||||||
if cx == None or cy == None:
|
if cx == None or cy == None:
|
||||||
return 0, 0
|
return 0, 0
|
||||||
@ -219,17 +216,17 @@ class cal:
|
|||||||
else:
|
else:
|
||||||
flipx = self.settings.gui_flip_x_axis_left
|
flipx = self.settings.gui_flip_x_axis_left
|
||||||
if self.calibration_3d_frame_counter == -621:
|
if self.calibration_3d_frame_counter == -621:
|
||||||
self.calibration_3d_frame_counter = self.calibration_3d_frame_counter -1
|
self.calibration_3d_frame_counter = self.calibration_3d_frame_counter - 1
|
||||||
overlay_calibrate_3d(self)
|
overlay_calibrate_3d(self)
|
||||||
print('yippe')
|
print("yippe")
|
||||||
|
|
||||||
if self.grab_3d_point:
|
if self.grab_3d_point:
|
||||||
self.grab_3d_point = False
|
self.grab_3d_point = False
|
||||||
|
|
||||||
self.config.calibration_points.append((cx, cy, angle))
|
self.config.calibration_points.append((cx, cy, angle))
|
||||||
print(self.config.calibration_points)
|
# print(self.config.calibration_points)
|
||||||
|
|
||||||
print("calib")
|
# print("calib")
|
||||||
|
|
||||||
if self.calibration_frame_counter == 0:
|
if self.calibration_frame_counter == 0:
|
||||||
self.calibration_frame_counter = None
|
self.calibration_frame_counter = None
|
||||||
@ -264,9 +261,7 @@ class cal:
|
|||||||
if self.ts == 0:
|
if self.ts == 0:
|
||||||
center_overlay_calibrate(self) # TODO, only call on windows machines?
|
center_overlay_calibrate(self) # TODO, only call on windows machines?
|
||||||
self.settings.gui_recenter_eyes = False
|
self.settings.gui_recenter_eyes = False
|
||||||
PlaySound(
|
PlaySound(resource_path("Audio/completed.wav"), SND_FILENAME | SND_ASYNC)
|
||||||
resource_path("Audio/completed.wav"), SND_FILENAME | SND_ASYNC
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
self.ts = self.ts - 1
|
self.ts = self.ts - 1
|
||||||
|
|
||||||
@ -299,9 +294,7 @@ class cal:
|
|||||||
yu = float((cy - self.config.calib_YOFF) / calib_diff_y_MIN)
|
yu = float((cy - self.config.calib_YOFF) / calib_diff_y_MIN)
|
||||||
yd = float((cy - self.config.calib_YOFF) / calib_diff_y_MAX)
|
yd = float((cy - self.config.calib_YOFF) / calib_diff_y_MAX)
|
||||||
|
|
||||||
if (
|
if self.settings.gui_flip_y_axis: # check config on flipped values settings and apply accordingly
|
||||||
self.settings.gui_flip_y_axis
|
|
||||||
): # check config on flipped values settings and apply accordingly
|
|
||||||
if yd >= 0:
|
if yd >= 0:
|
||||||
out_y = max(0.0, min(1.0, yd))
|
out_y = max(0.0, min(1.0, yd))
|
||||||
if yu > 0:
|
if yu > 0:
|
||||||
@ -324,16 +317,12 @@ class cal:
|
|||||||
out_x = -abs(max(0.0, min(1.0, xl)))
|
out_x = -abs(max(0.0, min(1.0, xl)))
|
||||||
|
|
||||||
if self.settings.gui_outer_side_falloff:
|
if self.settings.gui_outer_side_falloff:
|
||||||
|
print("pn")
|
||||||
run_time = time.time()
|
run_time = time.time()
|
||||||
out_x_mult = out_x * 100
|
out_x_mult = out_x * 100
|
||||||
out_y_mult = out_y * 100
|
out_y_mult = out_y * 100
|
||||||
velocity = abs(
|
velocity = abs(
|
||||||
np.sqrt(
|
np.sqrt(abs(np.square(out_x_mult - var.past_x) - np.square(out_y_mult - var.past_y)))
|
||||||
abs(
|
|
||||||
np.square(out_x_mult - var.past_x)
|
|
||||||
- np.square(out_y_mult - var.past_y)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
/ ((var.start_time - run_time) * 10)
|
/ ((var.start_time - run_time) * 10)
|
||||||
)
|
)
|
||||||
if len(var.velocity_rolling_list) < 15:
|
if len(var.velocity_rolling_list) < 15:
|
||||||
@ -341,24 +330,21 @@ class cal:
|
|||||||
else:
|
else:
|
||||||
var.velocity_rolling_list.pop(0)
|
var.velocity_rolling_list.pop(0)
|
||||||
var.velocity_rolling_list.append(float(velocity))
|
var.velocity_rolling_list.append(float(velocity))
|
||||||
var.average_velocity = sum(var.velocity_rolling_list) / len(
|
var.average_velocity = sum(var.velocity_rolling_list) / len(var.velocity_rolling_list)
|
||||||
var.velocity_rolling_list
|
|
||||||
)
|
|
||||||
var.past_x = out_x_mult
|
var.past_x = out_x_mult
|
||||||
var.past_y = out_y_mult
|
var.past_y = out_y_mult
|
||||||
|
|
||||||
|
out_x, out_y = velocity_falloff(self, var, out_x, out_y)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
noisy_point = np.array(
|
noisy_point = np.array([float(out_x), float(out_y)]) # fliter our values with a One Euro Filter
|
||||||
[float(out_x), float(out_y)]
|
|
||||||
) # fliter our values with a One Euro Filter
|
|
||||||
point_hat = self.one_euro_filter(noisy_point)
|
point_hat = self.one_euro_filter(noisy_point)
|
||||||
out_x = point_hat[0]
|
out_x = point_hat[0]
|
||||||
out_y = point_hat[1]
|
out_y = point_hat[1]
|
||||||
|
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
out_x, out_y = velocity_falloff(self, var, out_x, out_y)
|
|
||||||
|
|
||||||
return out_x, out_y, var.average_velocity
|
return out_x, out_y, var.average_velocity
|
||||||
else:
|
else:
|
||||||
if self.printcal:
|
if self.printcal:
|
||||||
|
|||||||
@ -117,13 +117,9 @@ def fit_rotated_ellipse_ransac(
|
|||||||
|
|
||||||
# These two lines are one of the bottlenecks
|
# These two lines are one of the bottlenecks
|
||||||
datamod_rng_5x5 = np.matmul(datamod_rng_swap_trans, datamod_rng_swap)
|
datamod_rng_5x5 = np.matmul(datamod_rng_swap_trans, datamod_rng_swap)
|
||||||
datamod_rng_p5smp = np.matmul(
|
datamod_rng_p5smp = np.matmul(np.linalg.inv(datamod_rng_5x5), datamod_rng_swap_trans)
|
||||||
np.linalg.inv(datamod_rng_5x5), datamod_rng_swap_trans
|
|
||||||
)
|
|
||||||
|
|
||||||
datamod_rng_p = np.matmul(
|
datamod_rng_p = np.matmul(datamod_rng_p5smp, datamod_rng6[:, :, np.newaxis]).reshape((-1, 5))
|
||||||
datamod_rng_p5smp, datamod_rng6[:, :, np.newaxis]
|
|
||||||
).reshape((-1, 5))
|
|
||||||
|
|
||||||
# I don't think it looks beautiful.
|
# I don't think it looks beautiful.
|
||||||
ellipse_y_arr = np.asarray(
|
ellipse_y_arr = np.asarray(
|
||||||
@ -137,9 +133,7 @@ def fit_rotated_ellipse_ransac(
|
|||||||
dtype=ret_dtype,
|
dtype=ret_dtype,
|
||||||
)
|
)
|
||||||
|
|
||||||
ellipse_data_arr = ellipse_model(
|
ellipse_data_arr = ellipse_model(datamod_slim, ellipse_y_arr, np.asarray(datamod_rng_p[:, 4])).transpose((1, 0))
|
||||||
datamod_slim, ellipse_y_arr, np.asarray(datamod_rng_p[:, 4])
|
|
||||||
).transpose((1, 0))
|
|
||||||
ellipse_data_abs = np.abs(ellipse_data_arr)
|
ellipse_data_abs = np.abs(ellipse_data_arr)
|
||||||
ellipse_data_index = np.argmax(np.sum(ellipse_data_abs < offset, axis=1), axis=0)
|
ellipse_data_index = np.argmax(np.sum(ellipse_data_abs < offset, axis=1), axis=0)
|
||||||
effective_data_arr = ellipse_data_arr[ellipse_data_index]
|
effective_data_arr = ellipse_data_arr[ellipse_data_index]
|
||||||
@ -301,6 +295,7 @@ def RANSAC3D(self, hsrac_en):
|
|||||||
|
|
||||||
contours, _ = cv2.findContours(th_frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
|
contours, _ = cv2.findContours(th_frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
|
||||||
hull = []
|
hull = []
|
||||||
|
# print(contours)
|
||||||
# This way is faster than contours[i]
|
# This way is faster than contours[i]
|
||||||
# But maybe this one is faster. hull = [cv2.convexHull(cnt, False) for cnt in contours]
|
# But maybe this one is faster. hull = [cv2.convexHull(cnt, False) for cnt in contours]
|
||||||
for cnt in contours:
|
for cnt in contours:
|
||||||
@ -326,7 +321,7 @@ def RANSAC3D(self, hsrac_en):
|
|||||||
# cv2.drawContours(self.current_image_gray, contours, -1, (255, 0, 0), 1)
|
# cv2.drawContours(self.current_image_gray, contours, -1, (255, 0, 0), 1)
|
||||||
# cv2.circle(self.current_image_gray, (cx, cy), 2, (0, 0, 255), -1)
|
# cv2.circle(self.current_image_gray, (cx, cy), 2, (0, 0, 255), -1)
|
||||||
# cx1, cy1, w1, h1, theta1 = fit_rotated_ellipse(maxcnt.reshape(-1, 2))
|
# cx1, cy1, w1, h1, theta1 = fit_rotated_ellipse(maxcnt.reshape(-1, 2))
|
||||||
#cv2.ellipse(self.current_image_gray, (cx, cy), (w, h), theta * 180.0 / np.pi, 0.0, 360.0, (50, 250, 200), 1, )
|
# cv2.ellipse(self.current_image_gray, (cx, cy), (w, h), theta * 180.0 / np.pi, 0.0, 360.0, (50, 250, 200), 1, )
|
||||||
|
|
||||||
# img = newImage2[y1:y2, x1:x2]
|
# img = newImage2[y1:y2, x1:x2]
|
||||||
except:
|
except:
|
||||||
@ -334,9 +329,7 @@ def RANSAC3D(self, hsrac_en):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
self.current_image_gray = frame
|
self.current_image_gray = frame
|
||||||
cv2.circle(
|
cv2.circle(self.current_image_gray, min_loc, 2, (0, 0, 255), -1) # the point of the darkest area in the image
|
||||||
self.current_image_gray, min_loc, 2, (0, 0, 255), -1
|
|
||||||
) # the point of the darkest area in the image
|
|
||||||
|
|
||||||
# However eyes are annoyingly three dimensional, so we need to take this ellipse and turn it
|
# However eyes are annoyingly three dimensional, so we need to take this ellipse and turn it
|
||||||
# into a curve patch on the surface of a sphere (the eye itself). If it's not a sphere, see your
|
# into a curve patch on the surface of a sphere (the eye itself). If it's not a sphere, see your
|
||||||
@ -360,9 +353,7 @@ def RANSAC3D(self, hsrac_en):
|
|||||||
# Black magic happens here, but after this we have our reprojected pupil/eye, and all we had
|
# Black magic happens here, but after this we have our reprojected pupil/eye, and all we had
|
||||||
# to do was sell our soul to satan and/or C++.
|
# to do was sell our soul to satan and/or C++.
|
||||||
|
|
||||||
result_3d = self.detector_3d.update_and_detect(
|
result_3d = self.detector_3d.update_and_detect(result_2d_final, self.current_image_gray)
|
||||||
result_2d_final, self.current_image_gray
|
|
||||||
)
|
|
||||||
|
|
||||||
# Now we have our pupil
|
# Now we have our pupil
|
||||||
ellipse_3d = result_3d["ellipse"]
|
ellipse_3d = result_3d["ellipse"]
|
||||||
@ -390,9 +381,7 @@ def RANSAC3D(self, hsrac_en):
|
|||||||
cy = self.rawy
|
cy = self.rawy
|
||||||
else:
|
else:
|
||||||
# print(int(cx), int(clamp(cx + ransac_lower_x, 0, csx)), ransac_lower_x, csx, "y", int(cy), int(clamp(cy + ransac_lower_y, 0, csy)), ransac_lower_y, csy)
|
# print(int(cx), int(clamp(cx + ransac_lower_x, 0, csx)), ransac_lower_x, csx, "y", int(cy), int(clamp(cy + ransac_lower_y, 0, csy)), ransac_lower_y, csy)
|
||||||
cx = int(
|
cx = int(clamp(cx + ransac_lower_x, 0, csx)) # dunno why this is being weird
|
||||||
clamp(cx + ransac_lower_x, 0, csx)
|
|
||||||
) # dunno why this is being weird
|
|
||||||
cy = int(clamp(cy + ransac_lower_y, 0, csy))
|
cy = int(clamp(cy + ransac_lower_y, 0, csy))
|
||||||
|
|
||||||
# print(contours)
|
# print(contours)
|
||||||
@ -429,7 +418,6 @@ def RANSAC3D(self, hsrac_en):
|
|||||||
for item in self.blink_list:
|
for item in self.blink_list:
|
||||||
file.write(str(item) + "\n")
|
file.write(str(item) + "\n")
|
||||||
|
|
||||||
|
|
||||||
if self.eye_id in [EyeId.RIGHT]:
|
if self.eye_id in [EyeId.RIGHT]:
|
||||||
with open("RANSAC_BLINK_RIGHT.cfg", "w") as file:
|
with open("RANSAC_BLINK_RIGHT.cfg", "w") as file:
|
||||||
for item in self.blink_list:
|
for item in self.blink_list:
|
||||||
@ -447,9 +435,7 @@ def RANSAC3D(self, hsrac_en):
|
|||||||
blink = 0.0
|
blink = 0.0
|
||||||
|
|
||||||
try:
|
try:
|
||||||
cv2.drawContours(
|
cv2.drawContours(self.current_image_gray, contours, -1, (255, 0, 0), 1) # TODO: fix visualizations with HSRAC
|
||||||
self.current_image_gray, contours, -1, (255, 0, 0), 1
|
|
||||||
) # TODO: fix visualizations with HSRAC
|
|
||||||
cv2.circle(self.current_image_gray, (int(cx), int(cy)), 2, (0, 0, 255), -1)
|
cv2.circle(self.current_image_gray, (int(cx), int(cy)), 2, (0, 0, 255), -1)
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|||||||
@ -1,13 +1,16 @@
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
from enum import IntEnum
|
from enum import IntEnum
|
||||||
|
|
||||||
|
|
||||||
class EyeId(IntEnum):
|
class EyeId(IntEnum):
|
||||||
RIGHT = 0
|
RIGHT = 0
|
||||||
LEFT = 1
|
LEFT = 1
|
||||||
BOTH = 2
|
BOTH = 2
|
||||||
SETTINGS = 3
|
SETTINGS = 3
|
||||||
|
|
||||||
|
|
||||||
def velocity_falloff(self, var, out_x, out_y):
|
def velocity_falloff(self, var, out_x, out_y):
|
||||||
|
print("call")
|
||||||
# Calculate the distance between the two eyes
|
# Calculate the distance between the two eyes
|
||||||
dist = np.sqrt(np.square(var.l_eye_x - var.r_eye_x) + np.square(var.left_y - var.right_y))
|
dist = np.sqrt(np.square(var.l_eye_x - var.r_eye_x) + np.square(var.left_y - var.right_y))
|
||||||
if self.eye_id == EyeId.LEFT:
|
if self.eye_id == EyeId.LEFT:
|
||||||
@ -18,7 +21,6 @@ def velocity_falloff(self, var, out_x, out_y):
|
|||||||
var.r_eye_x = out_x
|
var.r_eye_x = out_x
|
||||||
var.right_y = out_y
|
var.right_y = out_y
|
||||||
|
|
||||||
|
|
||||||
# Check if the distance is greater than the threshold
|
# Check if the distance is greater than the threshold
|
||||||
if dist > self.settings.gui_eye_dominant_diff_thresh:
|
if dist > self.settings.gui_eye_dominant_diff_thresh:
|
||||||
|
|
||||||
@ -40,5 +42,4 @@ def velocity_falloff(self, var, out_x, out_y):
|
|||||||
# If the distance is within the threshold, do not mirror the eyes
|
# If the distance is within the threshold, do not mirror the eyes
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
return out_x, out_y
|
return out_x, out_y
|
||||||
Loading…
Reference in New Issue
Block a user