mirror of
https://github.com/EyeTrackVR/EyeTrackVR.git
synced 2025-11-04 14:39:42 +08:00
Merge pull request #42 from ShyAssassin/Cleanup
Update derecated dependency + small code cleanup
This commit is contained in:
commit
efa969fe44
@ -9,6 +9,8 @@ from camera import Camera, CameraState
|
||||
from osc import EyeId
|
||||
import cv2
|
||||
from winsound import PlaySound, SND_FILENAME, SND_ASYNC
|
||||
import traceback
|
||||
|
||||
|
||||
class CameraWidget:
|
||||
def __init__(self, widget_id: EyeId, main_config: EyeTrackConfig, osc_queue: Queue):
|
||||
@ -27,7 +29,6 @@ class CameraWidget:
|
||||
self.gui_recenter_eyes = f"-RECENTEREYES{widget_id}-"
|
||||
self.gui_mode_readout = f"-APPMODE{widget_id}-"
|
||||
self.gui_circular_crop = f"-CIRCLECROP{widget_id}-"
|
||||
# self.gui_show_color_image = f"-SHOWCOLORIMAGE{widget_id}-"
|
||||
self.gui_roi_message = f"-ROIMESSAGE{widget_id}-"
|
||||
|
||||
self.osc_queue = osc_queue
|
||||
@ -41,7 +42,6 @@ class CameraWidget:
|
||||
self.config = main_config.right_eye
|
||||
elif self.eye_id == EyeId.LEFT:
|
||||
self.config = main_config.left_eye
|
||||
|
||||
else:
|
||||
raise RuntimeError("Cannot have a camera widget represent both eyes!")
|
||||
|
||||
@ -86,7 +86,9 @@ class CameraWidget:
|
||||
sg.Button("Recenter Eyes", key=self.gui_recenter_eyes, button_color='#6f4ca1'),
|
||||
|
||||
],
|
||||
[sg.Text("Mode:", background_color='#424042'), sg.Text("Calibrating", key=self.gui_mode_readout, background_color='#424042'),
|
||||
[
|
||||
sg.Text("Mode:", background_color='#424042'),
|
||||
sg.Text("Calibrating", key=self.gui_mode_readout, background_color='#424042'),
|
||||
sg.Checkbox(
|
||||
"Circle crop:",
|
||||
default=self.config.gui_circular_crop,
|
||||
@ -115,9 +117,7 @@ class CameraWidget:
|
||||
sg.InputText(self.config.capture_source, key=self.gui_camera_addr),
|
||||
],
|
||||
[
|
||||
sg.Button(
|
||||
"Save and Restart Tracking", key=self.gui_save_tracking_button, button_color = '#6f4ca1'
|
||||
),
|
||||
sg.Button("Save and Restart Tracking", key=self.gui_save_tracking_button, button_color='#6f4ca1'),
|
||||
],
|
||||
[
|
||||
sg.Button("Tracking Mode", key=self.gui_tracking_button, button_color='#6f4ca1'),
|
||||
@ -211,9 +211,9 @@ class CameraWidget:
|
||||
self.config.rotation_angle = int(values[self.gui_rotation_slider])
|
||||
changed = True
|
||||
|
||||
# if self.config.show_color_image != values[self.gui_show_color_image]:
|
||||
# self.config.show_color_image = values[self.gui_show_color_image]
|
||||
# changed = True
|
||||
if self.config.gui_circular_crop != values[self.gui_circular_crop]:
|
||||
self.config.gui_circular_crop = values[self.gui_circular_crop]
|
||||
changed = True
|
||||
|
||||
if changed:
|
||||
self.main_config.save()
|
||||
@ -224,13 +224,15 @@ class CameraWidget:
|
||||
self.camera.set_output_queue(self.capture_queue)
|
||||
window[self.gui_roi_layout].update(visible=False)
|
||||
window[self.gui_tracking_layout].update(visible=True)
|
||||
elif event == self.gui_roi_button:
|
||||
|
||||
if event == self.gui_roi_button:
|
||||
print("Move to roi mode")
|
||||
self.in_roi_mode = True
|
||||
self.camera.set_output_queue(self.roi_queue)
|
||||
window[self.gui_roi_layout].update(visible=True)
|
||||
window[self.gui_tracking_layout].update(visible=False)
|
||||
elif event == "{}+UP".format(self.gui_roi_selection):
|
||||
|
||||
if event == "{}+UP".format(self.gui_roi_selection):
|
||||
# Event for mouse button up in ROI mode
|
||||
self.is_mouse_up = True
|
||||
if abs(self.x0 - self.x1) != 0 and abs(self.y0 - self.y1) != 0:
|
||||
@ -239,24 +241,20 @@ class CameraWidget:
|
||||
self.config.roi_window_w = abs(self.x0 - self.x1)
|
||||
self.config.roi_window_h = abs(self.y0 - self.y1)
|
||||
self.main_config.save()
|
||||
elif event == self.gui_roi_selection:
|
||||
|
||||
if event == self.gui_roi_selection:
|
||||
# Event for mouse button down or mouse drag in ROI mode
|
||||
if self.is_mouse_up:
|
||||
self.is_mouse_up = False
|
||||
self.x0, self.y0 = values[self.gui_roi_selection]
|
||||
self.x1, self.y1 = values[self.gui_roi_selection]
|
||||
elif event == self.gui_restart_calibration:
|
||||
|
||||
if event == self.gui_restart_calibration:
|
||||
self.ransac.calibration_frame_counter = 300
|
||||
PlaySound('Audio/start.wav', SND_FILENAME | SND_ASYNC)
|
||||
|
||||
|
||||
elif event == self.gui_recenter_eyes:
|
||||
if event == self.gui_recenter_eyes:
|
||||
self.settings.gui_recenter_eyes = True
|
||||
if self.config.gui_circular_crop != values[self.gui_circular_crop]:
|
||||
self.config.gui_circular_crop = values[self.gui_circular_crop]
|
||||
changed = True
|
||||
|
||||
#self.ransac.recenter_eye = True
|
||||
|
||||
needs_roi_set = self.config.roi_window_h <= 0 or self.config.roi_window_w <= 0
|
||||
|
||||
@ -311,14 +309,10 @@ class CameraWidget:
|
||||
graph = window[self.gui_output_graph]
|
||||
graph.erase()
|
||||
|
||||
if (
|
||||
eye_info.info_type != InformationOrigin.FAILURE
|
||||
and not eye_info.blink
|
||||
):
|
||||
if eye_info.info_type != InformationOrigin.FAILURE and not eye_info.blink:
|
||||
graph.update(background_color="white")
|
||||
|
||||
try:
|
||||
|
||||
graph.draw_circle(
|
||||
(eye_info.x * -100, eye_info.y * -100),
|
||||
25,
|
||||
@ -333,7 +327,6 @@ class CameraWidget:
|
||||
graph.update(background_color="red")
|
||||
# Relay information to OSC
|
||||
if eye_info.info_type != InformationOrigin.FAILURE:
|
||||
|
||||
self.osc_queue.put((self.eye_id, eye_info))
|
||||
except Empty:
|
||||
return
|
||||
pass
|
||||
|
||||
@ -2,6 +2,7 @@ from operator import truth
|
||||
from dataclasses import dataclass
|
||||
import sys
|
||||
import asyncio
|
||||
|
||||
sys.path.append(".")
|
||||
from config import EyeTrackCameraConfig
|
||||
from config import EyeTrackSettingsConfig
|
||||
@ -17,6 +18,8 @@ from one_euro_filter import OneEuroFilter
|
||||
from sympy import symbols, Eq, solve
|
||||
from winsound import PlaySound, SND_FILENAME, SND_ASYNC
|
||||
import scipy.signal as sp
|
||||
|
||||
|
||||
class InformationOrigin(Enum):
|
||||
RANSAC = 1
|
||||
BLOB = 2
|
||||
@ -30,13 +33,17 @@ class EyeInformation:
|
||||
y: float
|
||||
pupil_dialation: int
|
||||
blink: bool
|
||||
|
||||
|
||||
lowb = np.array(0)
|
||||
|
||||
|
||||
def run_once(f):
|
||||
def wrapper(*args, **kwargs):
|
||||
if not wrapper.has_run:
|
||||
wrapper.has_run = True
|
||||
return f(*args, **kwargs)
|
||||
|
||||
wrapper.has_run = False
|
||||
return wrapper
|
||||
|
||||
@ -46,6 +53,7 @@ async def delayed_setting_change(setting, value):
|
||||
setting = value
|
||||
PlaySound('Audio/compleated.wav', SND_FILENAME | SND_ASYNC)
|
||||
|
||||
|
||||
def fit_rotated_ellipse_ransac(
|
||||
data, iter=5, sample_num=10, offset=80 # 80.0, 10, 80
|
||||
): # before changing these values, please read up on the ransac algorithm
|
||||
@ -151,7 +159,6 @@ class EyeProcessor:
|
||||
self.config = config
|
||||
self.settings = settings
|
||||
|
||||
|
||||
# Cross-thread communication management
|
||||
self.capture_queue_incoming = capture_queue_incoming
|
||||
self.image_queue_outgoing = image_queue_outgoing
|
||||
@ -160,7 +167,6 @@ class EyeProcessor:
|
||||
self.eye_id = eye_id
|
||||
|
||||
# Cross algo state
|
||||
|
||||
self.lkg_projected_sphere = None
|
||||
self.xc = None
|
||||
self.yc = None
|
||||
@ -204,32 +210,7 @@ class EyeProcessor:
|
||||
beta=beta
|
||||
)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def output_images_and_update(
|
||||
self, threshold_image, output_information: EyeInformation
|
||||
):
|
||||
# if self.config.show_color_image:
|
||||
# image_stack = np.concatenate(
|
||||
# (
|
||||
# self.current_image,
|
||||
# cv2.cvtColor(self.current_image_gray, cv2.COLOR_GRAY2BGR),
|
||||
# cv2.cvtColor(threshold_image, cv2.COLOR_GRAY2BGR),
|
||||
# ),
|
||||
# axis=1,
|
||||
# )
|
||||
# else:
|
||||
def output_images_and_update(self, threshold_image, output_information: EyeInformation):
|
||||
image_stack = np.concatenate(
|
||||
(
|
||||
cv2.cvtColor(self.current_image_gray, cv2.COLOR_GRAY2BGR),
|
||||
@ -275,11 +256,8 @@ class EyeProcessor:
|
||||
return True
|
||||
|
||||
def blob_tracking_fallback(self):
|
||||
|
||||
|
||||
# define circle
|
||||
|
||||
if self.config.gui_circular_crop == True:
|
||||
if self.config.gui_circular_crop:
|
||||
if self.cct == 0:
|
||||
try:
|
||||
ht, wd = self.current_image_gray.shape[:2]
|
||||
@ -306,7 +284,6 @@ class EyeProcessor:
|
||||
else:
|
||||
self.cct = self.cct - 1
|
||||
|
||||
|
||||
# Increase our threshold value slightly, in order to have a better possibility of getting back
|
||||
# something to do blob tracking on.
|
||||
hist = cv2.calcHist([self.current_image_gray], [0], None, [256], [0, 256])
|
||||
@ -325,11 +302,6 @@ class EyeProcessor:
|
||||
)
|
||||
return
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
try:
|
||||
# Try rebuilding our contours
|
||||
contours, _ = cv2.findContours(
|
||||
@ -354,19 +326,14 @@ class EyeProcessor:
|
||||
# TODO This should be scaled based on camera resolution.
|
||||
|
||||
if not self.settings.gui_blob_minsize <= h <= self.settings.gui_blob_maxsize or not self.settings.gui_blob_minsize <= w <= self.settings.gui_blob_maxsize:
|
||||
|
||||
continue
|
||||
|
||||
cx = x + int(w / 2)
|
||||
|
||||
cy = y + int(h / 2)
|
||||
|
||||
xrlb = (
|
||||
cx - self.lkg_projected_sphere["center"][0]
|
||||
) / self.lkg_projected_sphere["axes"][0]
|
||||
eyeyb = (
|
||||
cy - self.lkg_projected_sphere["center"][1]
|
||||
) / self.lkg_projected_sphere["axes"][1]
|
||||
xrlb = (cx - self.lkg_projected_sphere["center"][0]) / self.lkg_projected_sphere["axes"][0]
|
||||
eyeyb = (cy - self.lkg_projected_sphere["center"][1]) / self.lkg_projected_sphere["axes"][1]
|
||||
cv2.line(
|
||||
self.current_image_gray,
|
||||
(x + int(w / 2), 0),
|
||||
@ -413,27 +380,22 @@ class EyeProcessor:
|
||||
else:
|
||||
self.ts = 10
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
xl = float(
|
||||
((cx - self.xoff)) / (self.xmax - self.xoff)
|
||||
(cx - self.xoff) / (self.xmax - self.xoff)
|
||||
)
|
||||
xr = float(
|
||||
((cx - self.xoff)) / (self.xmin - self.xoff)
|
||||
(cx - self.xoff) / (self.xmin - self.xoff)
|
||||
)
|
||||
yu = float(
|
||||
((cy - self.yoff)) / (self.ymin - self.yoff)
|
||||
(cy - self.yoff) / (self.ymin - self.yoff)
|
||||
)
|
||||
yd = float(
|
||||
((cy - self.yoff)) / (self.ymax - self.yoff)
|
||||
(cy - self.yoff) / (self.ymax - self.yoff)
|
||||
)
|
||||
|
||||
# print(self.)
|
||||
out_x = 0
|
||||
out_y = 0
|
||||
if self.settings.gui_flip_y_axis == True: #check config on flipped values settings and apply accordingly
|
||||
if self.settings.gui_flip_y_axis: # check config on flipped values settings and apply accordingly
|
||||
if yd > 0:
|
||||
out_y = max(0.0, min(1.0, yd))
|
||||
if yu > 0:
|
||||
@ -444,7 +406,7 @@ class EyeProcessor:
|
||||
if yu > 0:
|
||||
out_y = max(0.0, min(1.0, yu))
|
||||
|
||||
if self.settings.gui_flip_x_axis_right == True:
|
||||
if self.settings.gui_flip_x_axis_right:
|
||||
if xr > 0:
|
||||
out_x = -abs(max(0.0, min(1.0, xr)))
|
||||
if xl > 0:
|
||||
@ -463,9 +425,6 @@ class EyeProcessor:
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
|
||||
|
||||
self.output_images_and_update(
|
||||
larger_threshold,
|
||||
EyeInformation(InformationOrigin.BLOB, out_x, out_y, 0, False),
|
||||
@ -479,18 +438,14 @@ class EyeProcessor:
|
||||
def run(self):
|
||||
camera_model = None
|
||||
detector_3d = None
|
||||
|
||||
out_pupil_dialation = 1
|
||||
|
||||
if self.eye_id == "EyeId.RIGHT":
|
||||
flipx = self.settings.gui_flip_x_axis_right
|
||||
#elif self.eye_id == "EyeId.LEFT":
|
||||
# flipx = self.config.gui_flip_x_axis_left
|
||||
else:
|
||||
flipx = self.settings.gui_flip_x_axis_left
|
||||
while True:
|
||||
# oef = init_filter()
|
||||
|
||||
while True:
|
||||
# Check to make sure we haven't been requested to close
|
||||
if self.cancellation_event.is_set():
|
||||
print("Exiting RANSAC thread")
|
||||
@ -504,11 +459,9 @@ class EyeProcessor:
|
||||
continue
|
||||
|
||||
# If our ROI configuration has changed, reset our model and detector
|
||||
if (
|
||||
camera_model is None
|
||||
if (camera_model is None
|
||||
or detector_3d is None
|
||||
or camera_model.resolution
|
||||
!= (
|
||||
or camera_model.resolution != (
|
||||
self.config.roi_window_w,
|
||||
self.config.roi_window_h,
|
||||
)
|
||||
@ -549,14 +502,11 @@ class EyeProcessor:
|
||||
self.current_image, cv2.COLOR_BGR2GRAY
|
||||
)
|
||||
|
||||
#print(self.config.gui_circular_crop)
|
||||
# print(self.cct)
|
||||
if self.config.gui_circular_crop == True:
|
||||
if self.cct == 0:
|
||||
try:
|
||||
ht, wd = self.current_image_gray.shape[:2]
|
||||
|
||||
|
||||
radius = int(float(self.lkg_projected_sphere["axes"][0]))
|
||||
self.xc = int(float(self.lkg_projected_sphere["center"][0]))
|
||||
self.yc = int(float(self.lkg_projected_sphere["center"][1]))
|
||||
@ -582,7 +532,6 @@ class EyeProcessor:
|
||||
else:
|
||||
self.cct = 300
|
||||
|
||||
|
||||
# Using Histogram based thresholding. Improves robustness insanely
|
||||
hist = cv2.calcHist([self.current_image_gray], [0], None, [256], [0, 256])
|
||||
histr = hist.ravel()
|
||||
@ -592,12 +541,6 @@ class EyeProcessor:
|
||||
thresh = cv2.inRange(self.current_image_gray, lowb, thresholdoptics) # faster than cv2.threshold
|
||||
thresh = cv2.bitwise_not(thresh)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# Set up morphological transforms, for smoothing and clearing the image we get out of the
|
||||
# thresholding operation. After this, we'd really like to just have a black blob in the middle
|
||||
# of a bunch of white area.
|
||||
@ -618,9 +561,8 @@ class EyeProcessor:
|
||||
|
||||
# If we have no convex maidens, we have no pupil, and can't progress from here. Dump back to
|
||||
# using blob tracking.
|
||||
#
|
||||
if len(convex_hulls) == 0:
|
||||
if self.settings.gui_blob_fallback == True:
|
||||
if self.settings.gui_blob_fallback:
|
||||
self.blob_tracking_fallback()
|
||||
else:
|
||||
print("[INFO] Blob fallback disabled. Assuming blink.")
|
||||
@ -639,7 +581,7 @@ class EyeProcessor:
|
||||
largest_hull.reshape(-1, 2)
|
||||
)
|
||||
except:
|
||||
if self.settings.gui_blob_fallback == True:
|
||||
if self.settings.gui_blob_fallback:
|
||||
self.blob_tracking_fallback()
|
||||
else:
|
||||
print("[INFO] Blob fallback disabled. Assuming blink.")
|
||||
@ -675,11 +617,8 @@ class EyeProcessor:
|
||||
exm = ellipse_3d["center"][0]
|
||||
eym = ellipse_3d["center"][1]
|
||||
|
||||
|
||||
d = result_3d["diameter_3d"]
|
||||
|
||||
|
||||
|
||||
if self.calibration_frame_counter == 0:
|
||||
self.calibration_frame_counter = None
|
||||
self.xoff = cx
|
||||
@ -695,7 +634,7 @@ class EyeProcessor:
|
||||
if eym < self.ymin:
|
||||
self.ymin = eym
|
||||
self.calibration_frame_counter -= 1
|
||||
if self.settings.gui_recenter_eyes == True:
|
||||
if self.settings.gui_recenter_eyes:
|
||||
self.xoff = cx
|
||||
self.yoff = cy
|
||||
if self.ts == 0:
|
||||
@ -706,35 +645,23 @@ class EyeProcessor:
|
||||
else:
|
||||
self.ts = 20
|
||||
|
||||
|
||||
#print(self.yoff)
|
||||
|
||||
|
||||
# noisy_point = np.array([cx, cy]) #fliter our values with a One Euro Filter
|
||||
# point_hat = self.one_euro_filter(noisy_point)
|
||||
# cx = point_hat[0]
|
||||
# cy = point_hat[1]
|
||||
|
||||
|
||||
xl = float(
|
||||
((cx - self.xoff)) / (self.xmax - self.xoff)
|
||||
(cx - self.xoff) / (self.xmax - self.xoff)
|
||||
)
|
||||
xr = float(
|
||||
((cx - self.xoff)) / (self.xmin - self.xoff)
|
||||
(cx - self.xoff) / (self.xmin - self.xoff)
|
||||
)
|
||||
yu = float(
|
||||
((cy - self.yoff)) / (self.ymin - self.yoff)
|
||||
(cy - self.yoff) / (self.ymin - self.yoff)
|
||||
)
|
||||
yd = float(
|
||||
((cy - self.yoff)) / (self.ymax - self.yoff)
|
||||
(cy - self.yoff) / (self.ymax - self.yoff)
|
||||
)
|
||||
|
||||
|
||||
|
||||
out_x = 0
|
||||
out_y = 0
|
||||
|
||||
if self.settings.gui_flip_y_axis == True:
|
||||
if self.settings.gui_flip_y_axis:
|
||||
if yd > 0:
|
||||
out_y = max(0.0, min(1.0, yd))
|
||||
if yu > 0:
|
||||
@ -745,7 +672,7 @@ class EyeProcessor:
|
||||
if yu > 0:
|
||||
out_y = max(0.0, min(1.0, yu))
|
||||
|
||||
if flipx == True:
|
||||
if flipx:
|
||||
if xr > 0:
|
||||
out_x = -abs(max(0.0, min(1.0, xr)))
|
||||
if xl > 0:
|
||||
@ -756,7 +683,6 @@ class EyeProcessor:
|
||||
if xl > 0:
|
||||
out_x = -abs(max(0.0, min(1.0, xl)))
|
||||
|
||||
|
||||
try:
|
||||
noisy_point = np.array([out_x, out_y]) # fliter our values with a One Euro Filter
|
||||
point_hat = self.one_euro_filter(noisy_point)
|
||||
@ -765,21 +691,15 @@ class EyeProcessor:
|
||||
except:
|
||||
pass
|
||||
|
||||
# print(cy, self.yoff, self.ymin, self.ymax, out_y)
|
||||
# print(out_y, yu, yd)
|
||||
|
||||
output_info = EyeInformation(InformationOrigin.RANSAC, out_x, out_y, out_pupil_dialation, False)
|
||||
|
||||
# Draw our image and stack it for visual output
|
||||
try:
|
||||
cv2.drawContours(self.current_image_gray, contours, -1, (255, 0, 0), 1)
|
||||
cv2.circle(self.current_image_gray, (int(cx), int(cy)), 2, (0, 0, 255), -1)
|
||||
# draw pupil
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
|
||||
try:
|
||||
cv2.ellipse(
|
||||
self.current_image_gray,
|
||||
@ -795,7 +715,6 @@ class EyeProcessor:
|
||||
# validity beforehand, but for now just pass. It usually fixes itself on the next frame.
|
||||
pass
|
||||
|
||||
|
||||
try:
|
||||
# print(self.lkg_projected_sphere["angle"], self.lkg_projected_sphere["axes"], self.lkg_projected_sphere["center"])
|
||||
cv2.ellipse(
|
||||
@ -807,13 +726,9 @@ class EyeProcessor:
|
||||
360, # start/end angle for drawing
|
||||
(0, 255, 0), # color (BGR): red
|
||||
)
|
||||
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
|
||||
|
||||
# draw line from center of eyeball to center of pupil
|
||||
cv2.line(
|
||||
self.current_image_gray,
|
||||
@ -824,4 +739,3 @@ class EyeProcessor:
|
||||
|
||||
# Shove a concatenated image out to the main GUI thread for rendering
|
||||
self.output_images_and_update(thresh, output_info)
|
||||
|
||||
|
||||
@ -21,6 +21,7 @@ RIGHT_EYE_RADIO_NAME = "-RIGHTEYERADIO-"
|
||||
BOTH_EYE_RADIO_NAME = "-BOTHEYERADIO-"
|
||||
SETTINGS_RADIO_NAME = '-SETTINGSRADIO-'
|
||||
|
||||
|
||||
def main():
|
||||
# Get Configuration
|
||||
config: EyeTrackConfig = EyeTrackConfig.load()
|
||||
@ -40,16 +41,9 @@ def main():
|
||||
# start worker threads
|
||||
osc_thread.start()
|
||||
|
||||
# t2s_queue: "queue.Queue[str | None]" = queue.Queue()
|
||||
# t2s_engine = SpeechEngine(t2s_queue)
|
||||
# t2s_thread = threading.Thread(target=t2s_engine.run)
|
||||
# t2s_thread.start()
|
||||
# t2s_queue.put("App Starting")
|
||||
|
||||
eyes = [
|
||||
CameraWidget(EyeId.RIGHT, config, osc_queue),
|
||||
CameraWidget(EyeId.LEFT, config, osc_queue),
|
||||
# CameraWidget(EyeId.SETTINGS, config, osc_queue),
|
||||
]
|
||||
|
||||
settings = [
|
||||
@ -148,9 +142,6 @@ def main():
|
||||
# and then call the pythonosc shutdown function
|
||||
osc_receiver.shutdown()
|
||||
osc_receiver_thread.join()
|
||||
# t2s_engine.force_stop()
|
||||
# t2s_queue.put(None)
|
||||
# t2s_thread.join()
|
||||
print("Exiting EyeTrackApp")
|
||||
return
|
||||
|
||||
@ -206,3 +197,4 @@ def main():
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@ -9,7 +9,7 @@ future==0.18.2
|
||||
idna==3.3
|
||||
importlib-metadata==4.8.3
|
||||
joblib==1.1.0
|
||||
msgpack-python==0.5.6
|
||||
msgpack==1.0.4
|
||||
numpy==1.19.5
|
||||
opencv-python==4.5.3.56
|
||||
pefile==2022.5.30
|
||||
|
||||
Loading…
Reference in New Issue
Block a user