mirror of
https://github.com/EyeTrackVR/EyeTrackVR.git
synced 2025-11-04 14:39:42 +08:00
Merge remote-tracking branch 'upstream/HSF-and-new-algos-feature-branch' into feature/config-rewrite-with-defualts-reset
# Conflicts: # EyeTrackApp/algo_settings_widget.py # EyeTrackApp/general_settings_widget.py # EyeTrackApp/osc.py
This commit is contained in:
commit
61b2c88960
20
CHANGELOG.md
20
CHANGELOG.md
@ -3,6 +3,26 @@
|
|||||||
[](https://semver.org)
|
[](https://semver.org)
|
||||||
> All notable changes to this project will be documented in this file
|
> All notable changes to this project will be documented in this file
|
||||||
|
|
||||||
|
## [1.0.0-HSF-and-new-algos-feature-branch.10](https://github.com/EyeTrackVR/EyeTrackVR/compare/v1.0.0-HSF-and-new-algos-feature-branch.9...v1.0.0-HSF-and-new-algos-feature-branch.10) (2023-10-05)
|
||||||
|
|
||||||
|
|
||||||
|
### 🍕 Features
|
||||||
|
|
||||||
|
* pupil dilation initial imp ([6200ddf](https://github.com/EyeTrackVR/EyeTrackVR/commit/6200ddfcab2297e59c689edfa72ddb47fbd8faa1))
|
||||||
|
* pupil dilation work start ([e694054](https://github.com/EyeTrackVR/EyeTrackVR/commit/e694054cb43d72977cdae746576bf45f02c4fc14))
|
||||||
|
|
||||||
|
## [1.0.0-HSF-and-new-algos-feature-branch.9](https://github.com/EyeTrackVR/EyeTrackVR/compare/v1.0.0-HSF-and-new-algos-feature-branch.8...v1.0.0-HSF-and-new-algos-feature-branch.9) (2023-10-03)
|
||||||
|
|
||||||
|
|
||||||
|
### 🍕 Features
|
||||||
|
|
||||||
|
* stage one of new output formats ([8e28092](https://github.com/EyeTrackVR/EyeTrackVR/commit/8e2809260180f7e091ceba97c19eb52f883a3b91))
|
||||||
|
|
||||||
|
|
||||||
|
### 🐛 Bug Fixes
|
||||||
|
|
||||||
|
* temp disable leap low priority for MetalCanyon ([9bab5b4](https://github.com/EyeTrackVR/EyeTrackVR/commit/9bab5b45551f4744e79ee9a715221b4f2719e08f))
|
||||||
|
|
||||||
## [1.0.0-HSF-and-new-algos-feature-branch.8](https://github.com/EyeTrackVR/EyeTrackVR/compare/v1.0.0-HSF-and-new-algos-feature-branch.7...v1.0.0-HSF-and-new-algos-feature-branch.8) (2023-10-01)
|
## [1.0.0-HSF-and-new-algos-feature-branch.8](https://github.com/EyeTrackVR/EyeTrackVR/compare/v1.0.0-HSF-and-new-algos-feature-branch.7...v1.0.0-HSF-and-new-algos-feature-branch.8) (2023-10-01)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -16,8 +16,8 @@ WAIT_TIME = 0.1
|
|||||||
# header-type (2 bytes)
|
# header-type (2 bytes)
|
||||||
# packet-size (2 bytes)
|
# packet-size (2 bytes)
|
||||||
# packet (packet-size bytes)
|
# packet (packet-size bytes)
|
||||||
ETVR_HEADER = b'\xff\xa0'
|
ETVR_HEADER = b"\xff\xa0"
|
||||||
ETVR_HEADER_FRAME = b'\xff\xa1'
|
ETVR_HEADER_FRAME = b"\xff\xa1"
|
||||||
ETVR_HEADER_LEN = 6
|
ETVR_HEADER_LEN = 6
|
||||||
|
|
||||||
|
|
||||||
@ -26,15 +26,16 @@ class CameraState(Enum):
|
|||||||
CONNECTED = 1
|
CONNECTED = 1
|
||||||
DISCONNECTED = 2
|
DISCONNECTED = 2
|
||||||
|
|
||||||
|
|
||||||
class Camera:
|
class Camera:
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
config: EyeTrackConfig,
|
config: EyeTrackConfig,
|
||||||
camera_index: int,
|
camera_index: int,
|
||||||
cancellation_event: "threading.Event",
|
cancellation_event: "threading.Event",
|
||||||
capture_event: "threading.Event",
|
capture_event: "threading.Event",
|
||||||
camera_status_outgoing: "queue.Queue[CameraState]",
|
camera_status_outgoing: "queue.Queue[CameraState]",
|
||||||
camera_output_outgoing: "queue.Queue",
|
camera_output_outgoing: "queue.Queue(maxsize=2)",
|
||||||
):
|
):
|
||||||
self.camera_status = CameraState.CONNECTING
|
self.camera_status = CameraState.CONNECTING
|
||||||
self.config = config
|
self.config = config
|
||||||
@ -53,7 +54,7 @@ class Camera:
|
|||||||
self.fps = 0
|
self.fps = 0
|
||||||
self.bps = 0
|
self.bps = 0
|
||||||
self.start = True
|
self.start = True
|
||||||
self.buffer = b''
|
self.buffer = b""
|
||||||
self.pf_fps = 0
|
self.pf_fps = 0
|
||||||
self.prevft = 0
|
self.prevft = 0
|
||||||
self.newft = 0
|
self.newft = 0
|
||||||
@ -70,8 +71,10 @@ class Camera:
|
|||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
OPENCV_PARAMS = [
|
OPENCV_PARAMS = [
|
||||||
cv2.CAP_PROP_OPEN_TIMEOUT_MSEC, 5000,
|
cv2.CAP_PROP_OPEN_TIMEOUT_MSEC,
|
||||||
cv2.CAP_PROP_READ_TIMEOUT_MSEC, 5000,
|
5000,
|
||||||
|
cv2.CAP_PROP_READ_TIMEOUT_MSEC,
|
||||||
|
5000,
|
||||||
]
|
]
|
||||||
while True:
|
while True:
|
||||||
if self.cancellation_event.is_set():
|
if self.cancellation_event.is_set():
|
||||||
@ -80,25 +83,23 @@ class Camera:
|
|||||||
should_push = True
|
should_push = True
|
||||||
# If things aren't open, retry until they are. Don't let read requests come in any earlier
|
# If things aren't open, retry until they are. Don't let read requests come in any earlier
|
||||||
# than this, otherwise we can deadlock ourselves.
|
# than this, otherwise we can deadlock ourselves.
|
||||||
if (
|
if self.config.capture_source != None and self.config.capture_source != "":
|
||||||
self.config.capture_source != None and self.config.capture_source != ""
|
|
||||||
):
|
|
||||||
|
|
||||||
if "COM" in str(self.current_capture_source):
|
if "COM" in str(self.current_capture_source):
|
||||||
if (
|
if (
|
||||||
self.serial_connection is None
|
self.serial_connection is None
|
||||||
or self.camera_status == CameraState.DISCONNECTED
|
or self.camera_status == CameraState.DISCONNECTED
|
||||||
or self.config.capture_source != self.current_capture_source
|
or self.config.capture_source != self.current_capture_source
|
||||||
):
|
):
|
||||||
port = self.config.capture_source
|
port = self.config.capture_source
|
||||||
self.current_capture_source = port
|
self.current_capture_source = port
|
||||||
self.start_serial_connection(port)
|
self.start_serial_connection(port)
|
||||||
else:
|
else:
|
||||||
if (
|
if (
|
||||||
self.cv2_camera is None
|
self.cv2_camera is None
|
||||||
or not self.cv2_camera.isOpened()
|
or not self.cv2_camera.isOpened()
|
||||||
or self.camera_status == CameraState.DISCONNECTED
|
or self.camera_status == CameraState.DISCONNECTED
|
||||||
or self.config.capture_source != self.current_capture_source
|
or self.config.capture_source != self.current_capture_source
|
||||||
):
|
):
|
||||||
print(self.error_message.format(self.config.capture_source))
|
print(self.error_message.format(self.config.capture_source))
|
||||||
# This requires a wait, otherwise we can error and possible screw up the camera
|
# This requires a wait, otherwise we can error and possible screw up the camera
|
||||||
@ -106,7 +107,7 @@ class Camera:
|
|||||||
if self.cancellation_event.wait(WAIT_TIME):
|
if self.cancellation_event.wait(WAIT_TIME):
|
||||||
return
|
return
|
||||||
self.current_capture_source = self.config.capture_source
|
self.current_capture_source = self.config.capture_source
|
||||||
# self.cv2_camera = cv2.VideoCapture(self.current_capture_source)
|
# self.cv2_camera = cv2.VideoCapture(self.current_capture_source)
|
||||||
|
|
||||||
self.cv2_camera = cv2.VideoCapture()
|
self.cv2_camera = cv2.VideoCapture()
|
||||||
self.cv2_camera.setExceptionMode(True)
|
self.cv2_camera.setExceptionMode(True)
|
||||||
@ -137,7 +138,9 @@ class Camera:
|
|||||||
ret, image = self.cv2_camera.read()
|
ret, image = self.cv2_camera.read()
|
||||||
height, width = image.shape[:2] # Calculate the aspect ratio
|
height, width = image.shape[:2] # Calculate the aspect ratio
|
||||||
if int(width) > 680:
|
if int(width) > 680:
|
||||||
aspect_ratio = float(width) / float(height) # Determine the new height based on the desired maximum width
|
aspect_ratio = float(width) / float(
|
||||||
|
height
|
||||||
|
) # Determine the new height based on the desired maximum width
|
||||||
new_height = int(680 / aspect_ratio)
|
new_height = int(680 / aspect_ratio)
|
||||||
image = cv2.resize(image, (680, new_height))
|
image = cv2.resize(image, (680, new_height))
|
||||||
if not ret:
|
if not ret:
|
||||||
@ -162,11 +165,13 @@ class Camera:
|
|||||||
self.fl.pop(0)
|
self.fl.pop(0)
|
||||||
self.fl.append(self.fps)
|
self.fl.append(self.fps)
|
||||||
self.fps = sum(self.fl) / len(self.fl)
|
self.fps = sum(self.fl) / len(self.fl)
|
||||||
#self.bps = image.nbytes
|
# self.bps = image.nbytes
|
||||||
if should_push:
|
if should_push:
|
||||||
self.push_image_to_queue(image, frame_number, self.fps)
|
self.push_image_to_queue(image, frame_number, self.fps)
|
||||||
except:
|
except:
|
||||||
print(f"{Fore.YELLOW}[WARN] Capture source problem, assuming camera disconnected, waiting for reconnect.{Fore.RESET}")
|
print(
|
||||||
|
f"{Fore.YELLOW}[WARN] Capture source problem, assuming camera disconnected, waiting for reconnect.{Fore.RESET}"
|
||||||
|
)
|
||||||
self.camera_status = CameraState.DISCONNECTED
|
self.camera_status = CameraState.DISCONNECTED
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -186,8 +191,8 @@ class Camera:
|
|||||||
|
|
||||||
def get_next_jpeg_frame(self):
|
def get_next_jpeg_frame(self):
|
||||||
beg, end = self.get_next_packet_bounds()
|
beg, end = self.get_next_packet_bounds()
|
||||||
jpeg = self.buffer[beg+ETVR_HEADER_LEN:end+ETVR_HEADER_LEN]
|
jpeg = self.buffer[beg + ETVR_HEADER_LEN : end + ETVR_HEADER_LEN]
|
||||||
self.buffer = self.buffer[end+ETVR_HEADER_LEN:]
|
self.buffer = self.buffer[end + ETVR_HEADER_LEN :]
|
||||||
return jpeg
|
return jpeg
|
||||||
|
|
||||||
def get_serial_camera_picture(self, should_push):
|
def get_serial_camera_picture(self, should_push):
|
||||||
@ -199,16 +204,22 @@ class Camera:
|
|||||||
jpeg = self.get_next_jpeg_frame()
|
jpeg = self.get_next_jpeg_frame()
|
||||||
if jpeg:
|
if jpeg:
|
||||||
# Create jpeg frame from byte string
|
# Create jpeg frame from byte string
|
||||||
image = cv2.imdecode(np.fromstring(jpeg, dtype=np.uint8), cv2.IMREAD_UNCHANGED)
|
image = cv2.imdecode(
|
||||||
|
np.fromstring(jpeg, dtype=np.uint8), cv2.IMREAD_UNCHANGED
|
||||||
|
)
|
||||||
if image is None:
|
if image is None:
|
||||||
print(f"{Fore.YELLOW}[WARN] Frame drop. Corrupted JPEG.{Fore.RESET}")
|
print(
|
||||||
|
f"{Fore.YELLOW}[WARN] Frame drop. Corrupted JPEG.{Fore.RESET}"
|
||||||
|
)
|
||||||
return
|
return
|
||||||
# Discard the serial buffer. This is due to the fact that it
|
# Discard the serial buffer. This is due to the fact that it
|
||||||
# may build up some outdated frames. A bit of a workaround here tbh.
|
# may build up some outdated frames. A bit of a workaround here tbh.
|
||||||
if conn.in_waiting >= 32768:
|
if conn.in_waiting >= 32768:
|
||||||
print(f"{Fore.CYAN}[INFO] Discarding the serial buffer ({conn.in_waiting} bytes){Fore.RESET}")
|
print(
|
||||||
|
f"{Fore.CYAN}[INFO] Discarding the serial buffer ({conn.in_waiting} bytes){Fore.RESET}"
|
||||||
|
)
|
||||||
conn.reset_input_buffer()
|
conn.reset_input_buffer()
|
||||||
self.buffer = b''
|
self.buffer = b""
|
||||||
# Calculate the fps.
|
# Calculate the fps.
|
||||||
current_frame_time = time.time()
|
current_frame_time = time.time()
|
||||||
delta_time = current_frame_time - self.last_frame_time
|
delta_time = current_frame_time - self.last_frame_time
|
||||||
@ -230,7 +241,9 @@ class Camera:
|
|||||||
if should_push:
|
if should_push:
|
||||||
self.push_image_to_queue(image, self.frame_number, self.fps)
|
self.push_image_to_queue(image, self.frame_number, self.fps)
|
||||||
except Exception:
|
except Exception:
|
||||||
print(f"{Fore.YELLOW}[WARN] Serial capture source problem, assuming camera disconnected, waiting for reconnect.{Fore.RESET}")
|
print(
|
||||||
|
f"{Fore.YELLOW}[WARN] Serial capture source problem, assuming camera disconnected, waiting for reconnect.{Fore.RESET}"
|
||||||
|
)
|
||||||
conn.close()
|
conn.close()
|
||||||
self.camera_status = CameraState.DISCONNECTED
|
self.camera_status = CameraState.DISCONNECTED
|
||||||
pass
|
pass
|
||||||
@ -248,15 +261,14 @@ class Camera:
|
|||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
conn = serial.Serial(
|
conn = serial.Serial(
|
||||||
baudrate = 3000000,
|
baudrate=3000000, port=port, xonxoff=False, dsrdtr=False, rtscts=False
|
||||||
port = port,
|
)
|
||||||
xonxoff=False,
|
|
||||||
dsrdtr=False,
|
|
||||||
rtscts=False)
|
|
||||||
# Set explicit buffer size for serial.
|
# Set explicit buffer size for serial.
|
||||||
conn.set_buffer_size(rx_size = 32768, tx_size = 32768)
|
conn.set_buffer_size(rx_size=32768, tx_size=32768)
|
||||||
|
|
||||||
print(f"{Fore.CYAN}[INFO] ETVR Serial Tracker device connected on {port}{Fore.RESET}")
|
print(
|
||||||
|
f"{Fore.CYAN}[INFO] ETVR Serial Tracker device connected on {port}{Fore.RESET}"
|
||||||
|
)
|
||||||
self.serial_connection = conn
|
self.serial_connection = conn
|
||||||
self.camera_status = CameraState.CONNECTED
|
self.camera_status = CameraState.CONNECTED
|
||||||
except Exception:
|
except Exception:
|
||||||
@ -269,6 +281,7 @@ class Camera:
|
|||||||
qsize = self.camera_output_outgoing.qsize()
|
qsize = self.camera_output_outgoing.qsize()
|
||||||
if qsize > 1:
|
if qsize > 1:
|
||||||
print(
|
print(
|
||||||
f"{Fore.YELLOW}[WARN] CAPTURE QUEUE BACKPRESSURE OF {qsize}. CHECK FOR CRASH OR TIMING ISSUES IN ALGORITHM.{Fore.RESET}")
|
f"{Fore.YELLOW}[WARN] CAPTURE QUEUE BACKPRESSURE OF {qsize}. CHECK FOR CRASH OR TIMING ISSUES IN ALGORITHM.{Fore.RESET}"
|
||||||
|
)
|
||||||
self.camera_output_outgoing.put((image, frame_number, fps))
|
self.camera_output_outgoing.put((image, frame_number, fps))
|
||||||
self.capture_event.clear()
|
self.capture_event.clear()
|
||||||
@ -10,9 +10,11 @@ from camera import Camera, CameraState
|
|||||||
from osc import EyeId
|
from osc import EyeId
|
||||||
import cv2
|
import cv2
|
||||||
import sys
|
import sys
|
||||||
from utils.misc_utils import PlaySound, SND_FILENAME,SND_ASYNC, resource_path
|
from utils.misc_utils import PlaySound, SND_FILENAME, SND_ASYNC, resource_path
|
||||||
import traceback
|
import traceback
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
class CameraWidget:
|
class CameraWidget:
|
||||||
def __init__(self, widget_id: EyeId, main_config: EyeTrackConfig, osc_queue: Queue):
|
def __init__(self, widget_id: EyeId, main_config: EyeTrackConfig, osc_queue: Queue):
|
||||||
self.gui_camera_addr = f"-CAMERAADDR{widget_id}-"
|
self.gui_camera_addr = f"-CAMERAADDR{widget_id}-"
|
||||||
@ -46,7 +48,9 @@ class CameraWidget:
|
|||||||
elif self.eye_id == EyeId.LEFT:
|
elif self.eye_id == EyeId.LEFT:
|
||||||
self.config = main_config.left_eye
|
self.config = main_config.left_eye
|
||||||
else:
|
else:
|
||||||
raise RuntimeError("\033[91m[WARN] Cannot have a camera widget represent both eyes!\033[0m")
|
raise RuntimeError(
|
||||||
|
"\033[91m[WARN] Cannot have a camera widget represent both eyes!\033[0m"
|
||||||
|
)
|
||||||
|
|
||||||
self.cancellation_event = Event()
|
self.cancellation_event = Event()
|
||||||
# Set the event until start is called, otherwise we can block if shutdown is called.
|
# Set the event until start is called, otherwise we can block if shutdown is called.
|
||||||
@ -87,7 +91,7 @@ class CameraWidget:
|
|||||||
key=self.gui_roi_selection,
|
key=self.gui_roi_selection,
|
||||||
drag_submits=True,
|
drag_submits=True,
|
||||||
enable_events=True,
|
enable_events=True,
|
||||||
background_color='#424042',
|
background_color="#424042",
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
]
|
]
|
||||||
@ -95,34 +99,50 @@ class CameraWidget:
|
|||||||
# Define the window's contents
|
# Define the window's contents
|
||||||
self.tracking_layout = [
|
self.tracking_layout = [
|
||||||
[
|
[
|
||||||
sg.Text("Rotation", background_color='#424042'),
|
sg.Text("Rotation", background_color="#424042"),
|
||||||
sg.Slider(
|
sg.Slider(
|
||||||
range=(0, 360),
|
range=(0, 360),
|
||||||
default_value=self.config.rotation_angle,
|
default_value=self.config.rotation_angle,
|
||||||
orientation="h",
|
orientation="h",
|
||||||
key=self.gui_rotation_slider,
|
key=self.gui_rotation_slider,
|
||||||
background_color='#424042',
|
background_color="#424042",
|
||||||
tooltip = "Adjust the rotation of your cameras, make them level.",
|
tooltip="Adjust the rotation of your cameras, make them level.",
|
||||||
),
|
),
|
||||||
],
|
],
|
||||||
[
|
[
|
||||||
sg.Button("Start Calibration", key=self.gui_restart_calibration, button_color='#6f4ca1', tooltip = "Start eye calibration. Look all arround to all extreams without blinking until sound is heard.",),
|
sg.Button(
|
||||||
sg.Button("Stop Calibration", key=self.gui_stop_calibration, button_color='#6f4ca1', tooltip = "Stop eye calibration manualy.",),
|
"Start Calibration",
|
||||||
sg.Button("Recenter Eyes", key=self.gui_recenter_eyes, button_color='#6f4ca1', tooltip = "Make your eyes center again.",),
|
key=self.gui_restart_calibration,
|
||||||
|
button_color="#6f4ca1",
|
||||||
|
tooltip="Start eye calibration. Look all arround to all extreams without blinking until sound is heard.",
|
||||||
|
),
|
||||||
|
sg.Button(
|
||||||
|
"Stop Calibration",
|
||||||
|
key=self.gui_stop_calibration,
|
||||||
|
button_color="#6f4ca1",
|
||||||
|
tooltip="Stop eye calibration manualy.",
|
||||||
|
),
|
||||||
|
sg.Button(
|
||||||
|
"Recenter Eyes",
|
||||||
|
key=self.gui_recenter_eyes,
|
||||||
|
button_color="#6f4ca1",
|
||||||
|
tooltip="Make your eyes center again.",
|
||||||
|
),
|
||||||
],
|
],
|
||||||
[
|
[
|
||||||
sg.Text("Mode:", background_color='#424042'),
|
sg.Text("Mode:", background_color="#424042"),
|
||||||
sg.Text("Calibrating", key=self.gui_mode_readout, background_color='#424042'),
|
sg.Text(
|
||||||
sg.Text("", key=self.gui_tracking_fps, background_color='#424042'),
|
"Calibrating", key=self.gui_mode_readout, background_color="#424042"
|
||||||
sg.Text("", key=self.gui_tracking_bps, background_color='#424042'),
|
),
|
||||||
# sg.Checkbox(
|
sg.Text("", key=self.gui_tracking_fps, background_color="#424042"),
|
||||||
# "Circle crop:",
|
sg.Text("", key=self.gui_tracking_bps, background_color="#424042"),
|
||||||
# default=self.config.gui_circular_crop,
|
# sg.Checkbox(
|
||||||
# key=self.gui_circular_crop,
|
# "Circle crop:",
|
||||||
# background_color='#424042',
|
# default=self.config.gui_circular_crop,
|
||||||
# tooltip = "Circle crop only applies to RANSAC3D and Blob.",
|
# key=self.gui_circular_crop,
|
||||||
# ),
|
# background_color='#424042',
|
||||||
|
# tooltip = "Circle crop only applies to RANSAC3D and Blob.",
|
||||||
|
# ),
|
||||||
],
|
],
|
||||||
[sg.Image(filename="", key=self.gui_tracking_image)],
|
[sg.Image(filename="", key=self.gui_tracking_image)],
|
||||||
[
|
[
|
||||||
@ -135,25 +155,57 @@ class CameraWidget:
|
|||||||
drag_submits=True,
|
drag_submits=True,
|
||||||
enable_events=True,
|
enable_events=True,
|
||||||
),
|
),
|
||||||
sg.Text("Please set an Eye Cropping.", key=self.gui_roi_message, background_color='#424042', visible=False),
|
sg.Text(
|
||||||
|
"Please set an Eye Cropping.",
|
||||||
|
key=self.gui_roi_message,
|
||||||
|
background_color="#424042",
|
||||||
|
visible=False,
|
||||||
|
),
|
||||||
],
|
],
|
||||||
]
|
]
|
||||||
|
|
||||||
self.widget_layout = [
|
self.widget_layout = [
|
||||||
[
|
[
|
||||||
sg.Text("Camera Address", background_color='#424042'),
|
sg.Text("Camera Address", background_color="#424042"),
|
||||||
sg.InputText(self.config.capture_source, key=self.gui_camera_addr, tooltip = "Enter the IP address or UVC port of your camera. (Include the 'http://')",),
|
sg.InputText(
|
||||||
|
self.config.capture_source,
|
||||||
|
key=self.gui_camera_addr,
|
||||||
|
tooltip="Enter the IP address or UVC port of your camera. (Include the 'http://')",
|
||||||
|
),
|
||||||
],
|
],
|
||||||
[
|
[
|
||||||
sg.Button("Save and Restart Tracking", key=self.gui_save_tracking_button, button_color='#6f4ca1'),
|
sg.Button(
|
||||||
|
"Save and Restart Tracking",
|
||||||
|
key=self.gui_save_tracking_button,
|
||||||
|
button_color="#6f4ca1",
|
||||||
|
),
|
||||||
],
|
],
|
||||||
[
|
[
|
||||||
sg.Button("Tracking Mode", key=self.gui_tracking_button, button_color='#6f4ca1', tooltip = "Go here to track your eye.",),
|
sg.Button(
|
||||||
sg.Button("Cropping Mode", key=self.gui_roi_button, button_color='#6f4ca1', tooltip = "Go here to crop out your eye.",),
|
"Tracking Mode",
|
||||||
|
key=self.gui_tracking_button,
|
||||||
|
button_color="#6f4ca1",
|
||||||
|
tooltip="Go here to track your eye.",
|
||||||
|
),
|
||||||
|
sg.Button(
|
||||||
|
"Cropping Mode",
|
||||||
|
key=self.gui_roi_button,
|
||||||
|
button_color="#6f4ca1",
|
||||||
|
tooltip="Go here to crop out your eye.",
|
||||||
|
),
|
||||||
],
|
],
|
||||||
[
|
[
|
||||||
sg.Column(self.tracking_layout, key=self.gui_tracking_layout, background_color='#424042'),
|
sg.Column(
|
||||||
sg.Column(self.roi_layout, key=self.gui_roi_layout, background_color='#424042', visible=False),
|
self.tracking_layout,
|
||||||
|
key=self.gui_tracking_layout,
|
||||||
|
background_color="#424042",
|
||||||
|
),
|
||||||
|
sg.Column(
|
||||||
|
self.roi_layout,
|
||||||
|
key=self.gui_roi_layout,
|
||||||
|
background_color="#424042",
|
||||||
|
visible=False,
|
||||||
|
),
|
||||||
],
|
],
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -203,7 +255,11 @@ class CameraWidget:
|
|||||||
event == self.gui_save_tracking_button
|
event == self.gui_save_tracking_button
|
||||||
and values[self.gui_camera_addr] != self.config.capture_source
|
and values[self.gui_camera_addr] != self.config.capture_source
|
||||||
):
|
):
|
||||||
print("\033[94m[INFO] New value: {}\033[0m".format(values[self.gui_camera_addr]))
|
print(
|
||||||
|
"\033[94m[INFO] New value: {}\033[0m".format(
|
||||||
|
values[self.gui_camera_addr]
|
||||||
|
)
|
||||||
|
)
|
||||||
try:
|
try:
|
||||||
# Try storing ints as ints, for those using wired cameras.
|
# Try storing ints as ints, for those using wired cameras.
|
||||||
self.config.capture_source = int(values[self.gui_camera_addr])
|
self.config.capture_source = int(values[self.gui_camera_addr])
|
||||||
@ -211,20 +267,24 @@ class CameraWidget:
|
|||||||
if values[self.gui_camera_addr] == "":
|
if values[self.gui_camera_addr] == "":
|
||||||
self.config.capture_source = None
|
self.config.capture_source = None
|
||||||
else:
|
else:
|
||||||
if len(values[self.gui_camera_addr]) > 5 and "http" not in values[self.gui_camera_addr] and ".mp4" not in values[self.gui_camera_addr]: # If http is not in camera address, add it.
|
if (
|
||||||
self.config.capture_source = f"http://{values[self.gui_camera_addr]}/"
|
len(values[self.gui_camera_addr]) > 5
|
||||||
|
and "http" not in values[self.gui_camera_addr]
|
||||||
|
and ".mp4" not in values[self.gui_camera_addr]
|
||||||
|
): # If http is not in camera address, add it.
|
||||||
|
self.config.capture_source = (
|
||||||
|
f"http://{values[self.gui_camera_addr]}/"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
self.config.capture_source = values[self.gui_camera_addr]
|
self.config.capture_source = values[self.gui_camera_addr]
|
||||||
changed = True
|
changed = True
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if self.config.rotation_angle != values[self.gui_rotation_slider]:
|
if self.config.rotation_angle != values[self.gui_rotation_slider]:
|
||||||
self.config.rotation_angle = int(values[self.gui_rotation_slider])
|
self.config.rotation_angle = int(values[self.gui_rotation_slider])
|
||||||
changed = True
|
changed = True
|
||||||
|
|
||||||
# if self.config.gui_circular_crop != values[self.gui_circular_crop]:
|
# if self.config.gui_circular_crop != values[self.gui_circular_crop]:
|
||||||
# self.config.gui_circular_crop = values[self.gui_circular_crop]
|
# self.config.gui_circular_crop = values[self.gui_circular_crop]
|
||||||
# changed = True
|
# changed = True
|
||||||
|
|
||||||
if changed:
|
if changed:
|
||||||
@ -248,9 +308,9 @@ class CameraWidget:
|
|||||||
# Event for mouse button up in ROI mode
|
# Event for mouse button up in ROI mode
|
||||||
self.is_mouse_up = True
|
self.is_mouse_up = True
|
||||||
if self.x1 < 0:
|
if self.x1 < 0:
|
||||||
self.x1 = 0
|
self.x1 = 0
|
||||||
if self.y1 < 0:
|
if self.y1 < 0:
|
||||||
self.y1 = 0
|
self.y1 = 0
|
||||||
if abs(self.x0 - self.x1) != 0 and abs(self.y0 - self.y1) != 0:
|
if abs(self.x0 - self.x1) != 0 and abs(self.y0 - self.y1) != 0:
|
||||||
self.config.roi_window_x = min([self.x0, self.x1])
|
self.config.roi_window_x = min([self.x0, self.x1])
|
||||||
self.config.roi_window_y = min([self.y0, self.y1])
|
self.config.roi_window_y = min([self.y0, self.y1])
|
||||||
@ -258,7 +318,6 @@ class CameraWidget:
|
|||||||
self.config.roi_window_h = abs(self.y0 - self.y1)
|
self.config.roi_window_h = abs(self.y0 - self.y1)
|
||||||
self.main_config.save()
|
self.main_config.save()
|
||||||
|
|
||||||
|
|
||||||
if event == self.gui_roi_selection:
|
if event == self.gui_roi_selection:
|
||||||
# Event for mouse button down or mouse drag in ROI mode
|
# Event for mouse button down or mouse drag in ROI mode
|
||||||
if self.is_mouse_up:
|
if self.is_mouse_up:
|
||||||
@ -269,7 +328,7 @@ class CameraWidget:
|
|||||||
if event == self.gui_restart_calibration:
|
if event == self.gui_restart_calibration:
|
||||||
self.ransac.calibration_frame_counter = self.settings.calibration_samples
|
self.ransac.calibration_frame_counter = self.settings.calibration_samples
|
||||||
self.ransac.ibo.clear_filter()
|
self.ransac.ibo.clear_filter()
|
||||||
PlaySound(resource_path('Audio/start.wav'), SND_FILENAME | SND_ASYNC)
|
PlaySound(resource_path("Audio/start.wav"), SND_FILENAME | SND_ASYNC)
|
||||||
|
|
||||||
if event == self.gui_stop_calibration:
|
if event == self.gui_stop_calibration:
|
||||||
self.ransac.calibration_frame_counter = 0
|
self.ransac.calibration_frame_counter = 0
|
||||||
@ -280,8 +339,8 @@ class CameraWidget:
|
|||||||
needs_roi_set = self.config.roi_window_h <= 0 or self.config.roi_window_w <= 0
|
needs_roi_set = self.config.roi_window_h <= 0 or self.config.roi_window_w <= 0
|
||||||
|
|
||||||
# TODO: Refactor if statements below...
|
# TODO: Refactor if statements below...
|
||||||
window[self.gui_tracking_fps].update('')
|
window[self.gui_tracking_fps].update("")
|
||||||
window[self.gui_tracking_bps].update('')
|
window[self.gui_tracking_bps].update("")
|
||||||
if self.config.capture_source is None or self.config.capture_source == "":
|
if self.config.capture_source is None or self.config.capture_source == "":
|
||||||
window[self.gui_mode_readout].update("Waiting for camera address")
|
window[self.gui_mode_readout].update("Waiting for camera address")
|
||||||
window[self.gui_roi_message].update(visible=False)
|
window[self.gui_roi_message].update(visible=False)
|
||||||
@ -329,6 +388,7 @@ class CameraWidget:
|
|||||||
window[self.gui_roi_message].update(visible=False)
|
window[self.gui_roi_message].update(visible=False)
|
||||||
window[self.gui_output_graph].update(visible=True)
|
window[self.gui_output_graph].update(visible=True)
|
||||||
(maybe_image, eye_info) = self.image_queue.get(block=False)
|
(maybe_image, eye_info) = self.image_queue.get(block=False)
|
||||||
|
|
||||||
imgbytes = cv2.imencode(".ppm", maybe_image)[1].tobytes()
|
imgbytes = cv2.imencode(".ppm", maybe_image)[1].tobytes()
|
||||||
window[self.gui_tracking_image].update(data=imgbytes)
|
window[self.gui_tracking_image].update(data=imgbytes)
|
||||||
|
|
||||||
@ -336,13 +396,15 @@ class CameraWidget:
|
|||||||
graph = window[self.gui_output_graph]
|
graph = window[self.gui_output_graph]
|
||||||
graph.erase()
|
graph.erase()
|
||||||
|
|
||||||
if eye_info.info_type != EyeInfoOrigin.FAILURE: #and not eye_info.blink:
|
if (
|
||||||
|
eye_info.info_type != EyeInfoOrigin.FAILURE
|
||||||
|
): # and not eye_info.blink:
|
||||||
graph.update(background_color="white")
|
graph.update(background_color="white")
|
||||||
if not np.isnan(eye_info.x) and not np.isnan(eye_info.y):
|
if not np.isnan(eye_info.x) and not np.isnan(eye_info.y):
|
||||||
|
|
||||||
graph.draw_circle(
|
graph.draw_circle(
|
||||||
(eye_info.x * -100, eye_info.y * -100),
|
(eye_info.x * -100, eye_info.y * -100),
|
||||||
20,
|
eye_info.pupil_dilation * 25,
|
||||||
fill_color="black",
|
fill_color="black",
|
||||||
line_color="white",
|
line_color="white",
|
||||||
)
|
)
|
||||||
@ -356,12 +418,20 @@ class CameraWidget:
|
|||||||
|
|
||||||
if not np.isnan(eye_info.blink):
|
if not np.isnan(eye_info.blink):
|
||||||
|
|
||||||
graph.draw_line((-100, eye_info.blink * 2 * 200), (-100, 100), color="#6f4ca1", width=10)
|
graph.draw_line(
|
||||||
|
(-100, abs(eye_info.blink) * 2 * 200),
|
||||||
|
(-100, 100),
|
||||||
|
color="#6f4ca1",
|
||||||
|
width=10,
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
graph.draw_line((-100, 0.5 * 200), (-100, 100), color="#6f4ca1", width=10)
|
graph.draw_line(
|
||||||
|
(-100, 0.5 * 200), (-100, 100), color="#6f4ca1", width=10
|
||||||
|
)
|
||||||
|
|
||||||
if eye_info.blink <= 0.0:
|
if eye_info.blink <= 0.0:
|
||||||
graph.update(background_color="#6f4ca1")
|
graph.update(background_color="#6f4ca1")
|
||||||
|
|
||||||
elif eye_info.info_type == EyeInfoOrigin.FAILURE:
|
elif eye_info.info_type == EyeInfoOrigin.FAILURE:
|
||||||
graph.update(background_color="red")
|
graph.update(background_color="red")
|
||||||
# Relay information to OSC
|
# Relay information to OSC
|
||||||
|
|||||||
@ -63,7 +63,6 @@ class EyeTrackSettingsConfig(BaseModel):
|
|||||||
gui_thresh_add: int = 11
|
gui_thresh_add: int = 11
|
||||||
gui_update_check: bool = False
|
gui_update_check: bool = False
|
||||||
gui_ROSC: bool = False
|
gui_ROSC: bool = False
|
||||||
gui_vrc_native: bool = True
|
|
||||||
gui_circular_crop_right: bool = False
|
gui_circular_crop_right: bool = False
|
||||||
gui_circular_crop_left: bool = False
|
gui_circular_crop_left: bool = False
|
||||||
ibo_filter_samples: int = 400
|
ibo_filter_samples: int = 400
|
||||||
@ -81,6 +80,10 @@ class EyeTrackSettingsConfig(BaseModel):
|
|||||||
gui_legacy_ransac_thresh_right: int = 80
|
gui_legacy_ransac_thresh_right: int = 80
|
||||||
gui_legacy_ransac_thresh_left: int = 80
|
gui_legacy_ransac_thresh_left: int = 80
|
||||||
gui_LEAP_lid: bool = False
|
gui_LEAP_lid: bool = False
|
||||||
|
gui_osc_vrcft_v1: bool = False
|
||||||
|
gui_osc_vrcft_v2: bool = False
|
||||||
|
gui_vrc_native: bool = True
|
||||||
|
gui_pupil_dilation: bool = True
|
||||||
|
|
||||||
|
|
||||||
class EyeTrackConfig(BaseModel):
|
class EyeTrackConfig(BaseModel):
|
||||||
|
|||||||
399
EyeTrackApp/ellipse_based_pupil_dilation.py
Normal file
399
EyeTrackApp/ellipse_based_pupil_dilation.py
Normal file
@ -0,0 +1,399 @@
|
|||||||
|
"""
|
||||||
|
------------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
,@@@@@@
|
||||||
|
@@@@@@@@@@@ @@@
|
||||||
|
@@@@@@@@@@@@ @@@@@@@@@@@
|
||||||
|
@@@@@@@@@@@@@ @@@@@@@@@@@@@@
|
||||||
|
@@@@@@@/ ,@@@@@@@@@@@@@
|
||||||
|
/@@@@@@@@@@@@@@@ @@@@@@@@
|
||||||
|
@@@@@@@@@@@@@@@@@@@@@@@@ @@@@@
|
||||||
|
@@@@@@@@ @@@@@
|
||||||
|
,@@@ @@@@&
|
||||||
|
@@@@@@. @@@@
|
||||||
|
@@@ @@@@@@@@@/ @@@@@
|
||||||
|
,@@@. @@@@@@((@ @@@@(
|
||||||
|
//@@@ ,, @@@@ @@@@@
|
||||||
|
@@@( @@@@@@@
|
||||||
|
@@@ @ @@@@@@@@#
|
||||||
|
@@@@@@@@@@@@@@@@@
|
||||||
|
@@@@@@@@@@@@@(
|
||||||
|
|
||||||
|
Ellipse Based Pupil Dilation By: Prohurtz, PallasNeko (Optimization)
|
||||||
|
Algorithm App Implementations By: Prohurtz
|
||||||
|
|
||||||
|
Copyright (c) 2023 EyeTrackVR <3
|
||||||
|
------------------------------------------------------------------------------------------------------
|
||||||
|
"""
|
||||||
|
import numpy
|
||||||
|
import numpy as np
|
||||||
|
import time
|
||||||
|
import os
|
||||||
|
import cv2
|
||||||
|
from enums import EyeLR
|
||||||
|
from one_euro_filter import OneEuroFilter
|
||||||
|
from utils.img_utils import safe_crop
|
||||||
|
from enum import IntEnum
|
||||||
|
import psutil
|
||||||
|
import sys
|
||||||
|
|
||||||
|
process = psutil.Process(os.getpid()) # set process priority to low
|
||||||
|
try: # medium chance this does absolutely nothing but eh
|
||||||
|
sys.getwindowsversion()
|
||||||
|
except AttributeError:
|
||||||
|
process.nice(0) # UNIX: 0 low 10 high
|
||||||
|
process.nice()
|
||||||
|
else:
|
||||||
|
process.nice(psutil.BELOW_NORMAL_PRIORITY_CLASS) # Windows
|
||||||
|
process.nice()
|
||||||
|
|
||||||
|
|
||||||
|
class EyeId(IntEnum):
|
||||||
|
RIGHT = 0
|
||||||
|
LEFT = 1
|
||||||
|
BOTH = 2
|
||||||
|
SETTINGS = 3
|
||||||
|
|
||||||
|
|
||||||
|
# Note.
|
||||||
|
# OpenCV on Windows will generate an error if the file path contains non-ASCII characters when using cv2.imread(), cv2.imwrite(), etc.
|
||||||
|
# https://stackoverflow.com/questions/43185605/how-do-i-read-an-image-from-a-path-with-unicode-characters
|
||||||
|
# https://github.com/opencv/opencv/issues/18305
|
||||||
|
|
||||||
|
|
||||||
|
def csv2data(frameshape, filepath):
|
||||||
|
# For data checking
|
||||||
|
frameshape = (frameshape[0], frameshape[1] + 1)
|
||||||
|
out = np.zeros(frameshape, dtype=np.uint32)
|
||||||
|
xy_list = []
|
||||||
|
val_list = []
|
||||||
|
with open(filepath, mode="r", encoding="utf-8") as in_f:
|
||||||
|
# Skip header.
|
||||||
|
_ = in_f.readline()
|
||||||
|
for s in in_f:
|
||||||
|
xyval = [int(val) for val in s.strip().split(",")]
|
||||||
|
xy_list.append((xyval[0], xyval[1]))
|
||||||
|
val_list.append(xyval[2])
|
||||||
|
xy_list = np.array(xy_list)
|
||||||
|
val_list = np.array(val_list)
|
||||||
|
out[xy_list[:, 1], xy_list[:, 0]] = val_list[:]
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
def data2csv(data_u32, filepath):
|
||||||
|
# For data checking
|
||||||
|
nonzero_index = np.nonzero(data_u32) # (row,col)
|
||||||
|
data_list = data_u32[nonzero_index].tolist()
|
||||||
|
datalines = [
|
||||||
|
"{},{},{}\n".format(x, y, val) for y, x, val in zip(*nonzero_index, data_list)
|
||||||
|
]
|
||||||
|
with open(filepath, "w", encoding="utf-8") as out_f:
|
||||||
|
out_f.write("x,y,eyedilation\n")
|
||||||
|
out_f.writelines(datalines)
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
def u32_1ch_to_u16_3ch(img):
|
||||||
|
out = np.zeros((*img.shape[:2], 3), dtype=np.uint16)
|
||||||
|
# https://github.com/numpy/numpy/issues/2524
|
||||||
|
# https://stackoverflow.com/questions/52782511/why-is-numpy-slower-than-python-for-left-bit-shifts
|
||||||
|
out[:, :, 0] = img & np.uint32(65535)
|
||||||
|
out[:, :, 1] = (img >> np.uint32(16)) & np.uint32(65535)
|
||||||
|
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
def u16_3ch_to_u32_1ch(img):
|
||||||
|
# The image format with the most bits that can be displayed on Windows without additional software and that opencv can handle is PNG's uint16
|
||||||
|
out = img[:, :, 0].astype(np.float64) # float64 = max 2^53
|
||||||
|
cv2.add(
|
||||||
|
out, img[:, :, 1].astype(np.float64) * np.float64(65536), dst=out
|
||||||
|
) # opencv did not have uint32 type
|
||||||
|
return out.astype(np.uint32) # cast
|
||||||
|
|
||||||
|
|
||||||
|
def newdata(frameshape):
|
||||||
|
print("\033[94m[INFO] Initialise data for dilation.\033[0m")
|
||||||
|
return np.zeros(frameshape, dtype=np.uint32)
|
||||||
|
|
||||||
|
|
||||||
|
# EBPD
|
||||||
|
class EllipseBasedPupilDilation:
|
||||||
|
def __init__(self, eye_id):
|
||||||
|
# todo: It is necessary to consider whether the filename can be changed in the configuration file, etc.
|
||||||
|
if eye_id in [EyeId.LEFT]:
|
||||||
|
self.imgfile = "EBPD_LEFT.png"
|
||||||
|
else:
|
||||||
|
pass
|
||||||
|
if eye_id in [EyeId.RIGHT]:
|
||||||
|
self.imgfile = "EBPD_RIGHT.png"
|
||||||
|
else:
|
||||||
|
pass
|
||||||
|
# self.data[0, -1] = maxval, [1, -1] = rotation, [2, -1] = x, [3, -1] = y
|
||||||
|
self.data = None
|
||||||
|
self.lct = None
|
||||||
|
self.maxval = 0
|
||||||
|
# self.img_roi = self.now_roi == {"rotation": 0, "x": 0, "y": 0}
|
||||||
|
self.img_roi = np.zeros(3, dtype=np.int32)
|
||||||
|
self.now_roi = np.zeros(3, dtype=np.int32)
|
||||||
|
self.prev_val = 0.5
|
||||||
|
self.avg_dilation = 0.0
|
||||||
|
self.old = []
|
||||||
|
self.color = []
|
||||||
|
self.x = []
|
||||||
|
self.fc = 0
|
||||||
|
self.filterlist = []
|
||||||
|
self.averageList = []
|
||||||
|
self.openlist = []
|
||||||
|
self.eye_id = eye_id
|
||||||
|
self.maxinten = 0
|
||||||
|
self.tri_filter = []
|
||||||
|
# try:
|
||||||
|
# min_cutoff = float(self.settings.gui_min_cutoff) # 0.0004
|
||||||
|
# beta = float(self.settings.gui_speed_coefficient) # 0.9
|
||||||
|
# except:
|
||||||
|
print("\033[93m[WARN] OneEuroFilter values must be a legal number.\033[0m")
|
||||||
|
min_cutoff = 0.0004
|
||||||
|
beta = 0.9
|
||||||
|
noisy_point = np.array([1, 1])
|
||||||
|
self.one_euro_filter = OneEuroFilter(
|
||||||
|
noisy_point, min_cutoff=min_cutoff, beta=beta
|
||||||
|
)
|
||||||
|
|
||||||
|
def check(self, frameshape):
|
||||||
|
# 0 in data is used as the initial value.
|
||||||
|
# When assigning a value, +1 is added to the value to be assigned.
|
||||||
|
self.load(frameshape)
|
||||||
|
# self.maxval = self.data[0, -1]
|
||||||
|
if self.lct is None:
|
||||||
|
self.lct = time.time()
|
||||||
|
|
||||||
|
def load(self, frameshape):
|
||||||
|
req_newdata = False
|
||||||
|
# Not very clever, but increase the width by 1px to save the maximum value.
|
||||||
|
frameshape = (frameshape[0], frameshape[1] + 1)
|
||||||
|
if self.data is None:
|
||||||
|
print(
|
||||||
|
f"\033[92m[INFO] Loaded data for pupil dilation: {self.imgfile}\033[0m"
|
||||||
|
)
|
||||||
|
if os.path.isfile(self.imgfile):
|
||||||
|
try:
|
||||||
|
img = cv2.imread(self.imgfile, flags=cv2.IMREAD_UNCHANGED)
|
||||||
|
# check code: cv2.absdiff(img,u32_1ch_to_u16_3ch(u16_3ch_to_u32_1ch(img)))
|
||||||
|
if img.shape[:2] != frameshape:
|
||||||
|
print("[WARN] Size does not match the input frame.")
|
||||||
|
req_newdata = True
|
||||||
|
else:
|
||||||
|
self.data = u16_3ch_to_u32_1ch(img)
|
||||||
|
self.img_roi[:] = self.data[1:4, -1]
|
||||||
|
if not np.array_equal(self.img_roi, self.now_roi):
|
||||||
|
# If the ROI recorded in the image file differs from the current ROI
|
||||||
|
req_newdata = True
|
||||||
|
else:
|
||||||
|
self.maxval = self.data[0, -1]
|
||||||
|
except:
|
||||||
|
print("[ERROR] File read error: {}".format(self.imgfile))
|
||||||
|
req_newdata = True
|
||||||
|
else:
|
||||||
|
print("\033[94m[INFO] File does not exist.\033[0m")
|
||||||
|
req_newdata = True
|
||||||
|
else:
|
||||||
|
if self.data.shape != frameshape or not np.array_equal(
|
||||||
|
self.img_roi, self.now_roi
|
||||||
|
):
|
||||||
|
# If the ROI recorded in the image file differs from the current ROI
|
||||||
|
# todo: Using the previous and current frame sizes and centre positions from the original, etc., the data can be ported to some extent, but there may be many areas where code changes are required.
|
||||||
|
print("[INFO] \033[94mFrame size changed.\033[0m")
|
||||||
|
req_newdata = True
|
||||||
|
if req_newdata:
|
||||||
|
self.data = newdata(frameshape)
|
||||||
|
self.maxval = 0
|
||||||
|
self.img_roi = self.now_roi.copy()
|
||||||
|
# data2csv(self.data, "a.csv")
|
||||||
|
# csv2data(frameshape,"a.csv")
|
||||||
|
|
||||||
|
def save(self):
|
||||||
|
self.data[0, -1] = self.maxval
|
||||||
|
self.data[1:4, -1] = self.now_roi
|
||||||
|
cv2.imwrite(self.imgfile, u32_1ch_to_u16_3ch(self.data))
|
||||||
|
# print("SAVED: {}".format(self.imgfile))
|
||||||
|
|
||||||
|
def change_roi(self, roiinfo: dict):
|
||||||
|
self.now_roi[:] = [v for v in roiinfo.values()]
|
||||||
|
|
||||||
|
def clear_filter(self):
|
||||||
|
self.data = None
|
||||||
|
self.filterlist.clear()
|
||||||
|
self.averageList.clear()
|
||||||
|
if os.path.exists(self.imgfile):
|
||||||
|
os.remove(self.imgfile)
|
||||||
|
|
||||||
|
def intense(self, w, h, x, y, frame, filterSamples, outputSamples):
|
||||||
|
# x,y = 0~(frame.shape[1 or 0]-1), frame = 1-channel frame cropped by ROI
|
||||||
|
self.check(frame.shape)
|
||||||
|
int_x, int_y = int(x), int(y)
|
||||||
|
if int_x < 0 or int_y < 0:
|
||||||
|
return self.prev_val
|
||||||
|
upper_x = min(
|
||||||
|
int_x + 25, frame.shape[1] - 1
|
||||||
|
) # TODO make this a setting NEEDS TO BE BASED ON HSF RADIUS if possible
|
||||||
|
lower_x = max(int_x - 25, 0)
|
||||||
|
upper_y = min(int_y + 25, frame.shape[0] - 1)
|
||||||
|
lower_y = max(int_y - 25, 0)
|
||||||
|
|
||||||
|
# frame_crop = frame[lower_y:upper_y, lower_x:upper_x]
|
||||||
|
# frame = safe_crop(frame, lower_x, lower_y, upper_x, upper_y, False)
|
||||||
|
# ret_, th = cv2.threshold(frame_crop, 80, 1.0, cv2.THRESH_BINARY_INV, dst=frame_crop)
|
||||||
|
frame_crop = frame
|
||||||
|
|
||||||
|
# ret, f = cv2.threshold(frame, 80, 255, cv2.THRESH_BINARY)
|
||||||
|
# ret, frame_crop = cv2.threshold(frame_crop, 80, 255, cv2.THRESH_BINARY)
|
||||||
|
|
||||||
|
# The same can be done with cv2.integral, but since there is only one area of the rectangle for which we want to know the total value, there is no advantage in terms of computational complexity.
|
||||||
|
pupil_area = numpy.pi * (w / 2) * (h / 2)
|
||||||
|
# cv2.imshow('e', frame)
|
||||||
|
# if cv2.waitKey(10) == 27:
|
||||||
|
# exit()
|
||||||
|
if len(self.filterlist) < filterSamples:
|
||||||
|
self.filterlist.append(pupil_area)
|
||||||
|
else:
|
||||||
|
self.filterlist.pop(0)
|
||||||
|
self.filterlist.append(pupil_area)
|
||||||
|
|
||||||
|
try:
|
||||||
|
if pupil_area >= np.percentile(
|
||||||
|
self.filterlist, 98
|
||||||
|
): # filter abnormally high values
|
||||||
|
# print('filter, assume blink')
|
||||||
|
pupil_area = self.maxval
|
||||||
|
|
||||||
|
# if intensity <= np.percentile( # TODO test this
|
||||||
|
# self.filterlist, 0.3
|
||||||
|
# ): # filter abnormally low values
|
||||||
|
# print('filter, assume blink')
|
||||||
|
# intensity = self.data[int_y, int_x]
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
# self.tri_filter.append(intensity)
|
||||||
|
# if len(self.tri_filter) > 3:
|
||||||
|
# self.tri_filter.pop(0)
|
||||||
|
# intensity = sum(self.tri_filter) / 3
|
||||||
|
# avg_color_per_row = np.average(frame_crop, axis=0)
|
||||||
|
# avg_color = np.average(avg_color_per_row, axis=0)
|
||||||
|
# ar, ag, ab = avg_color
|
||||||
|
# intensity = int(ar * 8) #higher = closed
|
||||||
|
|
||||||
|
# if cv2.waitKey(1) & 0xFF == ord("q"):
|
||||||
|
# pass
|
||||||
|
|
||||||
|
# numpy:np.sum(),ndarray.sum()
|
||||||
|
# opencv:cv2.sumElems()
|
||||||
|
# I don't know which is faster.
|
||||||
|
changed = False
|
||||||
|
newval_flg = False
|
||||||
|
oob = False
|
||||||
|
|
||||||
|
if int_x >= frame.shape[1]:
|
||||||
|
int_x = frame.shape[1] - 1
|
||||||
|
oob = True
|
||||||
|
# print('CAUGHT X OUT OF BOUNDS')
|
||||||
|
|
||||||
|
if int_x < 0:
|
||||||
|
int_x = True
|
||||||
|
oob = True
|
||||||
|
# print('CAUGHT X UNDER BOUNDS')
|
||||||
|
|
||||||
|
if int_y >= frame.shape[0]:
|
||||||
|
int_y = frame.shape[0] - 1
|
||||||
|
oob = True
|
||||||
|
# print('CAUGHT Y OUT OF BOUNDS')
|
||||||
|
|
||||||
|
if int_y < 0:
|
||||||
|
int_y = 1
|
||||||
|
oob = True
|
||||||
|
# print('CAUGHT Y UNDER BOUNDS')
|
||||||
|
|
||||||
|
if oob != True and self.data.any():
|
||||||
|
data_val = self.data[int_y, int_x]
|
||||||
|
else:
|
||||||
|
data_val = 0
|
||||||
|
|
||||||
|
# max pupil per cord
|
||||||
|
if data_val == 0:
|
||||||
|
# The value of the specified coordinates has not yet been recorded.
|
||||||
|
self.data[int_y, int_x] = pupil_area
|
||||||
|
changed = True
|
||||||
|
newval_flg = True
|
||||||
|
else:
|
||||||
|
if (
|
||||||
|
pupil_area < data_val
|
||||||
|
): # if current intensity value is less (more pupil), save that
|
||||||
|
self.data[int_y, int_x] = pupil_area # set value
|
||||||
|
changed = True
|
||||||
|
else:
|
||||||
|
pupil_areaa = max(
|
||||||
|
data_val + 5000, 1
|
||||||
|
) # if current intensity value is not less use this is an agressive adjust, test
|
||||||
|
self.data[int_y, int_x] = pupil_areaa # set value
|
||||||
|
changed = True
|
||||||
|
|
||||||
|
# min pupil global
|
||||||
|
if self.maxval == 0: # that value is not yet saved
|
||||||
|
self.maxval = pupil_area # set value at 0 index
|
||||||
|
else:
|
||||||
|
if (
|
||||||
|
pupil_area > self.maxval
|
||||||
|
): # if current intensity value is more (less pupil), save that NOTE: we have the
|
||||||
|
self.maxval = pupil_area - 5 # set value at 0 index
|
||||||
|
else:
|
||||||
|
pupil_aread = max(
|
||||||
|
(self.maxval - 5), 1
|
||||||
|
) # continuously adjust closed intensity, will be set when user blink, used to allow eyes to close when lighting changes
|
||||||
|
self.maxval = pupil_aread # set value at 0 index
|
||||||
|
# print(intensityd, intensity)
|
||||||
|
if newval_flg:
|
||||||
|
# Do the same thing as in the original version.
|
||||||
|
eyedilation = self.prev_val # 0.9
|
||||||
|
else:
|
||||||
|
maxp = float(self.data[int_y, int_x])
|
||||||
|
minp = float(self.maxval)
|
||||||
|
|
||||||
|
eyedilation = (pupil_area - maxp) / (
|
||||||
|
minp - maxp
|
||||||
|
) # for whatever reason when input and maxp are too close it outputs high
|
||||||
|
eyedilation = 1 - eyedilation
|
||||||
|
|
||||||
|
if outputSamples > 0:
|
||||||
|
if len(self.averageList) < outputSamples:
|
||||||
|
self.averageList.append(eyedilation)
|
||||||
|
else:
|
||||||
|
self.averageList.pop(0)
|
||||||
|
self.averageList.append(eyedilation)
|
||||||
|
eyedilation = np.average(self.averageList)
|
||||||
|
|
||||||
|
if eyedilation > 1: # clamp values
|
||||||
|
eyedilation = 1.0
|
||||||
|
|
||||||
|
if eyedilation < 0:
|
||||||
|
eyedilation = 0.0
|
||||||
|
|
||||||
|
if changed and (
|
||||||
|
(time.time() - self.lct) > 5
|
||||||
|
): # save every 5 seconds if something changed to save disk usage
|
||||||
|
self.save()
|
||||||
|
self.lct = time.time()
|
||||||
|
|
||||||
|
self.prev_val = eyedilation
|
||||||
|
try:
|
||||||
|
noisy_point = np.array(
|
||||||
|
[float(eyedilation), float(eyedilation)]
|
||||||
|
) # fliter our values with a One Euro Filter
|
||||||
|
point_hat = self.one_euro_filter(noisy_point)
|
||||||
|
eyedilationx = point_hat[0]
|
||||||
|
eyedilationy = point_hat[1]
|
||||||
|
eyedilation = (eyedilationx + eyedilationy) / 2
|
||||||
|
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return eyedilation
|
||||||
@ -1,6 +1,7 @@
|
|||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from enum import Enum, IntEnum
|
from enum import Enum, IntEnum
|
||||||
|
|
||||||
|
|
||||||
class EyeId(IntEnum):
|
class EyeId(IntEnum):
|
||||||
RIGHT = 0
|
RIGHT = 0
|
||||||
LEFT = 1
|
LEFT = 1
|
||||||
@ -24,5 +25,5 @@ class EyeInfo:
|
|||||||
info_type: EyeInfoOrigin
|
info_type: EyeInfoOrigin
|
||||||
x: float
|
x: float
|
||||||
y: float
|
y: float
|
||||||
pupil_dialation: float
|
pupil_dilation: float
|
||||||
blink: float
|
blink: float
|
||||||
|
|||||||
@ -59,6 +59,7 @@ from blink import *
|
|||||||
from utils.img_utils import circle_crop
|
from utils.img_utils import circle_crop
|
||||||
from eye import EyeInfo, EyeInfoOrigin
|
from eye import EyeInfo, EyeInfoOrigin
|
||||||
from intensity_based_openness import *
|
from intensity_based_openness import *
|
||||||
|
from ellipse_based_pupil_dilation import *
|
||||||
|
|
||||||
|
|
||||||
def run_once(f):
|
def run_once(f):
|
||||||
@ -140,6 +141,7 @@ class EyeProcessor:
|
|||||||
self.er_daddy = None
|
self.er_daddy = None
|
||||||
self.er_leap = None
|
self.er_leap = None
|
||||||
self.ibo = IntensityBasedOpeness(self.eye_id)
|
self.ibo = IntensityBasedOpeness(self.eye_id)
|
||||||
|
self.ebpd = EllipseBasedPupilDilation(self.eye_id)
|
||||||
self.roi_include_set = {"rotation_angle", "roi_window_x", "roi_window_y"}
|
self.roi_include_set = {"rotation_angle", "roi_window_x", "roi_window_y"}
|
||||||
self.failed = 0
|
self.failed = 0
|
||||||
self.skip_blink_detect = False
|
self.skip_blink_detect = False
|
||||||
@ -164,6 +166,8 @@ class EyeProcessor:
|
|||||||
self.ran_blink_check_for_file = True
|
self.ran_blink_check_for_file = True
|
||||||
self.bd_blink = False
|
self.bd_blink = False
|
||||||
self.current_algo = EyeInfoOrigin.HSRAC
|
self.current_algo = EyeInfoOrigin.HSRAC
|
||||||
|
self.puipil_width = 0.0
|
||||||
|
self.pupil_height = 0.0
|
||||||
|
|
||||||
try:
|
try:
|
||||||
min_cutoff = float(self.settings.gui_min_cutoff) # 0.0004
|
min_cutoff = float(self.settings.gui_min_cutoff) # 0.0004
|
||||||
@ -249,7 +253,20 @@ class EyeProcessor:
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
def UPDATE(self):
|
def UPDATE(self):
|
||||||
# print(self.eyeopen)
|
# print(self.eyeopen)
|
||||||
|
if self.settings.gui_pupil_dilation:
|
||||||
|
self.pupil_dilation = self.ebpd.intense(
|
||||||
|
self.pupil_width,
|
||||||
|
self.pupil_height,
|
||||||
|
self.rawx,
|
||||||
|
self.rawy,
|
||||||
|
self.current_image_white,
|
||||||
|
self.settings.ibo_filter_samples,
|
||||||
|
self.settings.ibo_average_output_samples,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self.pupil_dilation = 0.5
|
||||||
|
|
||||||
if self.settings.gui_BLINK:
|
if self.settings.gui_BLINK:
|
||||||
self.eyeopen = BLINK(self)
|
self.eyeopen = BLINK(self)
|
||||||
|
|
||||||
@ -288,7 +305,7 @@ class EyeProcessor:
|
|||||||
self.rawy,
|
self.rawy,
|
||||||
self.eyeopen,
|
self.eyeopen,
|
||||||
) = self.er_leap.run(self.current_image_gray)
|
) = self.er_leap.run(self.current_image_gray)
|
||||||
# print(self.eyeopen)
|
# print(self.eyeopen)
|
||||||
|
|
||||||
if (
|
if (
|
||||||
len(self.prev_y_list) >= 100
|
len(self.prev_y_list) >= 100
|
||||||
@ -301,9 +318,9 @@ class EyeProcessor:
|
|||||||
# print(abs(self.eyeopen - self.past_blink))
|
# print(abs(self.eyeopen - self.past_blink))
|
||||||
blink_vec = min(abs(self.eyeopen - self.past_blink), 1) # clamp to 1
|
blink_vec = min(abs(self.eyeopen - self.past_blink), 1) # clamp to 1
|
||||||
|
|
||||||
#if blink_vec >= 0.2:
|
# if blink_vec >= 0.2:
|
||||||
if blink_vec >= 0.15 or blink_vec == 0.0 and (self.out_y - self.prev_y) < 0.0:
|
if blink_vec >= 0.15 or blink_vec == 0.0 and (self.out_y - self.prev_y) < 0.0:
|
||||||
#self.out_x = sum(self.prev_x_list) / len(self.prev_x_list)
|
# self.out_x = sum(self.prev_x_list) / len(self.prev_x_list)
|
||||||
self.out_y = sum(self.prev_y_list) / len(self.prev_y_list)
|
self.out_y = sum(self.prev_y_list) / len(self.prev_y_list)
|
||||||
# print('AVG', self.out_y, len(self.prev_y_list))
|
# print('AVG', self.out_y, len(self.prev_y_list))
|
||||||
|
|
||||||
@ -313,7 +330,13 @@ class EyeProcessor:
|
|||||||
|
|
||||||
self.output_images_and_update(
|
self.output_images_and_update(
|
||||||
self.thresh,
|
self.thresh,
|
||||||
EyeInfo(self.current_algo, self.out_x, self.out_y, 0, self.eyeopen),
|
EyeInfo(
|
||||||
|
self.current_algo,
|
||||||
|
self.out_x,
|
||||||
|
self.out_y,
|
||||||
|
self.pupil_dilation,
|
||||||
|
self.eyeopen,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
if self.settings.gui_RANSACBLINK and self.eyeopen == 0.0:
|
if self.settings.gui_RANSACBLINK and self.eyeopen == 0.0:
|
||||||
pass
|
pass
|
||||||
@ -331,7 +354,8 @@ class EyeProcessor:
|
|||||||
self.thresh = self.current_image_gray.copy()
|
self.thresh = self.current_image_gray.copy()
|
||||||
self.out_x, self.out_y = cal.cal_osc(self, self.rawx, self.rawy)
|
self.out_x, self.out_y = cal.cal_osc(self, self.rawx, self.rawy)
|
||||||
self.current_algorithm = EyeInfoOrigin.LEAP
|
self.current_algorithm = EyeInfoOrigin.LEAP
|
||||||
# print(self.eyeopen)
|
|
||||||
|
# print(self.eyeopen)
|
||||||
|
|
||||||
def DADDYM(self):
|
def DADDYM(self):
|
||||||
# todo: We should have a proper variable for drawing.
|
# todo: We should have a proper variable for drawing.
|
||||||
@ -361,10 +385,17 @@ class EyeProcessor:
|
|||||||
self.rawx, self.rawy, self.thresh, self.radius = self.er_hsf.run(
|
self.rawx, self.rawy, self.thresh, self.radius = self.er_hsf.run(
|
||||||
self.current_image_gray
|
self.current_image_gray
|
||||||
)
|
)
|
||||||
self.rawx, self.rawy, self.thresh, ranblink = RANSAC3D(self, True)
|
(
|
||||||
|
self.rawx,
|
||||||
|
self.rawy,
|
||||||
|
self.thresh,
|
||||||
|
ranblink,
|
||||||
|
self.pupil_width,
|
||||||
|
self.pupil_height,
|
||||||
|
) = RANSAC3D(self, True)
|
||||||
if self.settings.gui_RANSACBLINK: # might be redundant
|
if self.settings.gui_RANSACBLINK: # might be redundant
|
||||||
self.eyeopen = ranblink
|
self.eyeopen = ranblink
|
||||||
# print("RANBLINK", ranblink)
|
# print("RANBLINK", ranblink)
|
||||||
|
|
||||||
# print(self.radius)
|
# print(self.radius)
|
||||||
# if self.prev_x is None:
|
# if self.prev_x is None:
|
||||||
|
|||||||
@ -13,7 +13,7 @@ from utils.misc_utils import is_nt, resource_path
|
|||||||
|
|
||||||
if is_nt:
|
if is_nt:
|
||||||
from winotify import Notification
|
from winotify import Notification
|
||||||
os.system('color') # init ANSI color
|
os.system("color") # init ANSI color
|
||||||
|
|
||||||
# Random environment variable to speed up webcam opening on the MSMF backend.
|
# Random environment variable to speed up webcam opening on the MSMF backend.
|
||||||
# https://github.com/opencv/opencv/issues/17687
|
# https://github.com/opencv/opencv/issues/17687
|
||||||
@ -30,7 +30,7 @@ SETTINGS_RADIO_NAME = "-SETTINGSRADIO-"
|
|||||||
ALGO_SETTINGS_RADIO_NAME = "-ALGOSETTINGSRADIO-"
|
ALGO_SETTINGS_RADIO_NAME = "-ALGOSETTINGSRADIO-"
|
||||||
|
|
||||||
page_url = "https://github.com/RedHawk989/EyeTrackVR/releases/latest"
|
page_url = "https://github.com/RedHawk989/EyeTrackVR/releases/latest"
|
||||||
appversion = "EyeTrackApp 0.2.0 BETA 7"
|
appversion = "EyeTrackApp 0.2.0 BETA 8"
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
@ -49,7 +49,7 @@ def main():
|
|||||||
)
|
)
|
||||||
latestversion = response.json()["name"]
|
latestversion = response.json()["name"]
|
||||||
if (
|
if (
|
||||||
appversion == latestversion
|
appversion == latestversion
|
||||||
): # If what we scraped and hardcoded versions are same, assume we are up to date.
|
): # If what we scraped and hardcoded versions are same, assume we are up to date.
|
||||||
print(f"\033[92m[INFO] App is the latest version! [{latestversion}]\033[0m")
|
print(f"\033[92m[INFO] App is the latest version! [{latestversion}]\033[0m")
|
||||||
else:
|
else:
|
||||||
@ -59,7 +59,7 @@ def main():
|
|||||||
try:
|
try:
|
||||||
if is_nt:
|
if is_nt:
|
||||||
cwd = os.getcwd()
|
cwd = os.getcwd()
|
||||||
#icon = cwd + "\Images\logo.ico"
|
# icon = cwd + "\Images\logo.ico"
|
||||||
icon = resource_path("Images/logo.ico")
|
icon = resource_path("Images/logo.ico")
|
||||||
toast = Notification(
|
toast = Notification(
|
||||||
app_id="EyeTrackApp",
|
app_id="EyeTrackApp",
|
||||||
@ -183,7 +183,10 @@ def main():
|
|||||||
|
|
||||||
# Create the window
|
# Create the window
|
||||||
window = sg.Window(
|
window = sg.Window(
|
||||||
f"{appversion}", layout, icon=resource_path("Images/logo.ico"), background_color="#292929"
|
f"{appversion}",
|
||||||
|
layout,
|
||||||
|
icon=resource_path("Images/logo.ico"),
|
||||||
|
background_color="#292929",
|
||||||
)
|
)
|
||||||
|
|
||||||
# GUI Render loop
|
# GUI Render loop
|
||||||
@ -259,8 +262,10 @@ def main():
|
|||||||
config.eye_display_id = EyeId.SETTINGS
|
config.eye_display_id = EyeId.SETTINGS
|
||||||
config.save()
|
config.save()
|
||||||
|
|
||||||
|
elif (
|
||||||
elif values[ALGO_SETTINGS_RADIO_NAME] and config.eye_display_id != EyeId.ALGOSETTINGS:
|
values[ALGO_SETTINGS_RADIO_NAME]
|
||||||
|
and config.eye_display_id != EyeId.ALGOSETTINGS
|
||||||
|
):
|
||||||
eyes[0].stop()
|
eyes[0].stop()
|
||||||
eyes[1].stop()
|
eyes[1].stop()
|
||||||
settings[0].stop()
|
settings[0].stop()
|
||||||
|
|||||||
@ -37,7 +37,7 @@ import psutil
|
|||||||
import sys
|
import sys
|
||||||
|
|
||||||
process = psutil.Process(os.getpid()) # set process priority to low
|
process = psutil.Process(os.getpid()) # set process priority to low
|
||||||
try: # medium chance this does absolutely nothing but eh
|
try: # medium chance this does absolutely nothing but eh
|
||||||
sys.getwindowsversion()
|
sys.getwindowsversion()
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
process.nice(0) # UNIX: 0 low 10 high
|
process.nice(0) # UNIX: 0 low 10 high
|
||||||
@ -46,6 +46,7 @@ else:
|
|||||||
process.nice(psutil.BELOW_NORMAL_PRIORITY_CLASS) # Windows
|
process.nice(psutil.BELOW_NORMAL_PRIORITY_CLASS) # Windows
|
||||||
process.nice()
|
process.nice()
|
||||||
|
|
||||||
|
|
||||||
class EyeId(IntEnum):
|
class EyeId(IntEnum):
|
||||||
RIGHT = 0
|
RIGHT = 0
|
||||||
LEFT = 1
|
LEFT = 1
|
||||||
@ -247,7 +248,7 @@ class IntensityBasedOpeness:
|
|||||||
upper_y = min(int_y + 25, frame.shape[0] - 1)
|
upper_y = min(int_y + 25, frame.shape[0] - 1)
|
||||||
lower_y = max(int_y - 25, 0)
|
lower_y = max(int_y - 25, 0)
|
||||||
|
|
||||||
# frame_crop = frame[lower_y:upper_y, lower_x:upper_x]
|
# frame_crop = frame[lower_y:upper_y, lower_x:upper_x]
|
||||||
# frame = safe_crop(frame, lower_x, lower_y, upper_x, upper_y, False)
|
# frame = safe_crop(frame, lower_x, lower_y, upper_x, upper_y, False)
|
||||||
# ret_, th = cv2.threshold(frame_crop, 80, 1.0, cv2.THRESH_BINARY_INV, dst=frame_crop)
|
# ret_, th = cv2.threshold(frame_crop, 80, 1.0, cv2.THRESH_BINARY_INV, dst=frame_crop)
|
||||||
frame_crop = frame
|
frame_crop = frame
|
||||||
@ -274,10 +275,10 @@ class IntensityBasedOpeness:
|
|||||||
intensity = self.maxval
|
intensity = self.maxval
|
||||||
|
|
||||||
# if intensity <= np.percentile( # TODO test this
|
# if intensity <= np.percentile( # TODO test this
|
||||||
# self.filterlist, 0.3
|
# self.filterlist, 0.3
|
||||||
# ): # filter abnormally low values
|
# ): # filter abnormally low values
|
||||||
# print('filter, assume blink')
|
# print('filter, assume blink')
|
||||||
# intensity = self.data[int_y, int_x]
|
# intensity = self.data[int_y, int_x]
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
# self.tri_filter.append(intensity)
|
# self.tri_filter.append(intensity)
|
||||||
@ -341,10 +342,9 @@ class IntensityBasedOpeness:
|
|||||||
intensitya = max(
|
intensitya = max(
|
||||||
data_val + 5000, 1
|
data_val + 5000, 1
|
||||||
) # if current intensity value is not less use this is an agressive adjust, test
|
) # if current intensity value is not less use this is an agressive adjust, test
|
||||||
self.data[int_y, int_x] = intensitya # set value
|
self.data[int_y, int_x] = intensitya # set value
|
||||||
changed = True
|
changed = True
|
||||||
|
|
||||||
|
|
||||||
# min pupil global
|
# min pupil global
|
||||||
if self.maxval == 0: # that value is not yet saved
|
if self.maxval == 0: # that value is not yet saved
|
||||||
self.maxval = intensity # set value at 0 index
|
self.maxval = intensity # set value at 0 index
|
||||||
@ -371,7 +371,6 @@ class IntensityBasedOpeness:
|
|||||||
) # for whatever reason when input and maxp are too close it outputs high
|
) # for whatever reason when input and maxp are too close it outputs high
|
||||||
eyeopen = 1 - eyeopen
|
eyeopen = 1 - eyeopen
|
||||||
|
|
||||||
|
|
||||||
if outputSamples > 0:
|
if outputSamples > 0:
|
||||||
if len(self.averageList) < outputSamples:
|
if len(self.averageList) < outputSamples:
|
||||||
self.averageList.append(eyeopen)
|
self.averageList.append(eyeopen)
|
||||||
@ -386,8 +385,6 @@ class IntensityBasedOpeness:
|
|||||||
if eyeopen < 0:
|
if eyeopen < 0:
|
||||||
eyeopen = 0.0
|
eyeopen = 0.0
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if changed and (
|
if changed and (
|
||||||
(time.time() - self.lct) > 5
|
(time.time() - self.lct) > 5
|
||||||
): # save every 5 seconds if something changed to save disk usage
|
): # save every 5 seconds if something changed to save disk usage
|
||||||
|
|||||||
@ -85,8 +85,8 @@ class LEAP_C(object):
|
|||||||
else:
|
else:
|
||||||
self.model_path = resource_path("Models/mommy072623.onnx")
|
self.model_path = resource_path("Models/mommy072623.onnx")
|
||||||
self.interval = 1 # FPS print update rate
|
self.interval = 1 # FPS print update rate
|
||||||
self.low_priority = True # set process priority to low
|
self.low_priority = True # set process priority to low (may cause issues when unfocusing? reported by one, not reproducable)
|
||||||
self.print_fps = True
|
self.print_fps = False
|
||||||
# Init variables
|
# Init variables
|
||||||
self.frames = 0
|
self.frames = 0
|
||||||
self.queues = []
|
self.queues = []
|
||||||
@ -106,9 +106,6 @@ class LEAP_C(object):
|
|||||||
onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
|
onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
|
||||||
)
|
)
|
||||||
opts.optimized_model_filepath = ""
|
opts.optimized_model_filepath = ""
|
||||||
self.ort_session = onnxruntime.InferenceSession(
|
|
||||||
self.model_path, opts, providers=["CPUExecutionProvider"]
|
|
||||||
)
|
|
||||||
|
|
||||||
if self.low_priority:
|
if self.low_priority:
|
||||||
process = psutil.Process(os.getpid()) # set process priority to low
|
process = psutil.Process(os.getpid()) # set process priority to low
|
||||||
@ -128,9 +125,9 @@ class LEAP_C(object):
|
|||||||
self.one_euro_filter = OneEuroFilter(
|
self.one_euro_filter = OneEuroFilter(
|
||||||
np.random.rand(7, 2), min_cutoff=min_cutoff, beta=beta
|
np.random.rand(7, 2), min_cutoff=min_cutoff, beta=beta
|
||||||
)
|
)
|
||||||
#self.one_euro_filter_open = OneEuroFilter(
|
# self.one_euro_filter_open = OneEuroFilter(
|
||||||
# np.random.rand(1, 2), min_cutoff=0.01, beta=0.04
|
# np.random.rand(1, 2), min_cutoff=0.01, beta=0.04
|
||||||
#)
|
# )
|
||||||
self.dmax = 0
|
self.dmax = 0
|
||||||
self.dmin = 0
|
self.dmin = 0
|
||||||
self.openlist = []
|
self.openlist = []
|
||||||
@ -227,11 +224,11 @@ class LEAP_C(object):
|
|||||||
if len(self.openlist) < 5000: # TODO expose as setting?
|
if len(self.openlist) < 5000: # TODO expose as setting?
|
||||||
self.openlist.append(d)
|
self.openlist.append(d)
|
||||||
else:
|
else:
|
||||||
# if d >= np.percentile(self.openlist, 99) or d <= np.percentile(
|
# if d >= np.percentile(self.openlist, 99) or d <= np.percentile(
|
||||||
# self.openlist, 1
|
# self.openlist, 1
|
||||||
# ):
|
# ):
|
||||||
# pass
|
# pass
|
||||||
#else:
|
# else:
|
||||||
self.openlist.pop(0)
|
self.openlist.pop(0)
|
||||||
self.openlist.append(d)
|
self.openlist.append(d)
|
||||||
|
|
||||||
@ -243,18 +240,18 @@ class LEAP_C(object):
|
|||||||
except:
|
except:
|
||||||
per = 0.7
|
per = 0.7
|
||||||
pass
|
pass
|
||||||
# print(d, per)
|
# print(d, per)
|
||||||
x = pre_landmark[6][0]
|
x = pre_landmark[6][0]
|
||||||
y = pre_landmark[6][1]
|
y = pre_landmark[6][1]
|
||||||
frame = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
|
frame = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
|
||||||
|
|
||||||
# per = d - 0.1
|
# per = d - 0.1
|
||||||
self.last_lid = per
|
self.last_lid = per
|
||||||
# pera = np.array([per, per])
|
# pera = np.array([per, per])
|
||||||
#self.one_euro_filter_open(pera)
|
# self.one_euro_filter_open(pera)
|
||||||
if per <= 0.2: #TODO: EXPOSE AS SETTING
|
if per <= 0.2: # TODO: EXPOSE AS SETTING
|
||||||
per == 0.0
|
per == 0.0
|
||||||
# print(per)
|
# print(per)
|
||||||
return frame, float(x), float(y), per
|
return frame, float(x), float(y), per
|
||||||
|
|
||||||
frame = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
|
frame = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
|
||||||
|
|||||||
@ -1,9 +1,8 @@
|
|||||||
|
|
||||||
from pythonosc import udp_client
|
from pythonosc import udp_client
|
||||||
from pythonosc import osc_server
|
from pythonosc import osc_server
|
||||||
from pythonosc import dispatcher
|
from pythonosc import dispatcher
|
||||||
from config import EyeTrackConfig
|
from config import EyeTrackConfig
|
||||||
from utils.misc_utils import PlaySound,SND_FILENAME,SND_ASYNC
|
from utils.misc_utils import PlaySound, SND_FILENAME, SND_ASYNC
|
||||||
import queue
|
import queue
|
||||||
import threading
|
import threading
|
||||||
from enum import IntEnum
|
from enum import IntEnum
|
||||||
@ -18,7 +17,9 @@ class EyeId(IntEnum):
|
|||||||
ALGOSETTINGS = 4
|
ALGOSETTINGS = 4
|
||||||
|
|
||||||
|
|
||||||
def eyelid_transformer(self,eye_blink):
|
|
||||||
|
|
||||||
|
def eyelid_transformer(self, eye_blink):
|
||||||
if self.config.osc_invert_eye_close:
|
if self.config.osc_invert_eye_close:
|
||||||
return float(1 - eye_blink)
|
return float(1 - eye_blink)
|
||||||
else:
|
else:
|
||||||
@ -26,158 +27,261 @@ def eyelid_transformer(self,eye_blink):
|
|||||||
|
|
||||||
|
|
||||||
se = False
|
se = False
|
||||||
def output_osc(eye_x, eye_y, eye_blink, last_blink, self):
|
|
||||||
global se
|
|
||||||
|
|
||||||
if not self.config.gui_vrc_native:
|
|
||||||
|
|
||||||
if self.main_config.eye_display_id in [EyeId.RIGHT, EyeId.LEFT]: #we are in single eye mode
|
|
||||||
se = True
|
|
||||||
|
|
||||||
self.client.send_message(self.config.osc_left_eye_x_address, eye_x)
|
|
||||||
self.client.send_message(self.config.osc_right_eye_x_address, eye_x)
|
|
||||||
self.client.send_message(self.config.osc_eyes_y_address, eye_y)
|
|
||||||
|
|
||||||
self.config.osc_left_eye_close_address
|
|
||||||
|
|
||||||
self.client.send_message(self.config.osc_right_eye_close_address, eyelid_transformer(self,eye_blink))
|
|
||||||
self.client.send_message(self.config.osc_left_eye_close_address, eyelid_transformer(self,eye_blink))
|
|
||||||
else:
|
|
||||||
se = False
|
|
||||||
|
|
||||||
if self.eye_id in [EyeId.LEFT] and not se: #left eye, send data to left
|
|
||||||
self.l_eye_x = eye_x
|
|
||||||
self.l_eye_blink = eye_blink
|
|
||||||
|
|
||||||
if self.l_eye_blink == 0.0:
|
|
||||||
if last_blink > 0.15: #when binary blink is on, blinks may be too fast for OSC so we repeat them.
|
|
||||||
for i in range(4):
|
|
||||||
self.client.send_message(self.config.osc_left_eye_close_address, eyelid_transformer(self,self.l_eye_blink))
|
|
||||||
last_blink = time.time() - last_blink
|
|
||||||
if self.config.gui_eye_falloff:
|
|
||||||
if self.r_eye_blink == 0.0: #if both eyes closed and DEF is enables, blink
|
|
||||||
self.client.send_message(self.config.osc_left_eye_close_address, eyelid_transformer(self,self.l_eye_blink))
|
|
||||||
self.client.send_message(self.config.osc_right_eye_close_address, eyelid_transformer(self,self.l_eye_blink))
|
|
||||||
self.l_eye_x = self.r_eye_x
|
|
||||||
|
|
||||||
self.client.send_message(self.config.osc_left_eye_x_address, self.l_eye_x)
|
|
||||||
self.left_y = eye_y
|
|
||||||
|
|
||||||
self.client.send_message(self.config.osc_left_eye_close_address, eyelid_transformer(self,self.l_eye_blink))
|
|
||||||
|
|
||||||
|
|
||||||
elif self.eye_id in [EyeId.RIGHT] and not se: #Right eye, send data to right
|
def output_osc(eye_x, eye_y, eye_blink, last_blink, pupil_dilation, self):
|
||||||
self.r_eye_x = eye_x
|
print(pupil_dilation)
|
||||||
self.r_eye_blink = eye_blink
|
global se
|
||||||
|
# self.config.gui_osc_vrcft_v2
|
||||||
|
# self.config.gui_osc_vrcft_v1
|
||||||
|
# self.config.gui_vrc_native
|
||||||
|
|
||||||
if self.r_eye_blink == 0.0:
|
if self.config.gui_osc_vrcft_v1:
|
||||||
if last_blink > 0.15: #when binary blink is on, blinks may be too fast for OSC so we repeat them.
|
|
||||||
print("REPEATING R BLINK")
|
|
||||||
for i in range(4):
|
|
||||||
self.client.send_message(self.config.osc_right_eye_close_address, eyelid_transformer(self,self.r_eye_blink))
|
|
||||||
last_blink = time.time() - last_blink
|
|
||||||
if self.config.gui_eye_falloff:
|
|
||||||
if self.l_eye_blink == 0.0: #if both eyes closed and DEF is enables, blink
|
|
||||||
self.client.send_message(self.config.osc_left_eye_close_address, eyelid_transformer(self,self.r_eye_blink))
|
|
||||||
self.client.send_message(self.config.osc_right_eye_close_address, eyelid_transformer(self,self.r_eye_blink))
|
|
||||||
|
|
||||||
self.r_eye_x = self.l_eye_x
|
if self.main_config.eye_display_id in [
|
||||||
|
EyeId.RIGHT,
|
||||||
|
EyeId.LEFT,
|
||||||
|
]: # we are in single eye mode
|
||||||
|
se = True
|
||||||
|
|
||||||
self.client.send_message(self.config.osc_right_eye_x_address, eye_x)
|
self.client.send_message(self.config.osc_left_eye_x_address, eye_x)
|
||||||
self.right_y = eye_y
|
self.client.send_message(self.config.osc_right_eye_x_address, eye_x)
|
||||||
|
self.client.send_message(self.config.osc_eyes_y_address, eye_y)
|
||||||
|
|
||||||
self.client.send_message(self.config.osc_right_eye_close_address, eyelid_transformer(self,self.r_eye_blink))
|
self.config.osc_left_eye_close_address
|
||||||
|
|
||||||
if self.main_config.eye_display_id in [EyeId.BOTH] and self.right_y != 621 and self.left_y != 621:
|
self.client.send_message(
|
||||||
y = (self.right_y + self.left_y) / 2
|
self.config.osc_right_eye_close_address,
|
||||||
self.client.send_message(self.config.osc_eyes_y_address, y)
|
eyelid_transformer(self, eye_blink),
|
||||||
|
)
|
||||||
|
self.client.send_message(
|
||||||
|
self.config.osc_left_eye_close_address,
|
||||||
|
eyelid_transformer(self, eye_blink),
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
se = False
|
||||||
|
|
||||||
|
if self.eye_id in [EyeId.LEFT] and not se: # left eye, send data to left
|
||||||
|
self.l_eye_x = eye_x
|
||||||
|
self.l_eye_blink = eye_blink
|
||||||
|
|
||||||
|
if self.l_eye_blink == 0.0:
|
||||||
|
if (
|
||||||
|
last_blink > 0.15
|
||||||
|
): # when binary blink is on, blinks may be too fast for OSC so we repeat them.
|
||||||
|
for i in range(4):
|
||||||
|
self.client.send_message(
|
||||||
|
self.config.osc_left_eye_close_address,
|
||||||
|
eyelid_transformer(self, self.l_eye_blink),
|
||||||
|
)
|
||||||
|
last_blink = time.time() - last_blink
|
||||||
|
if self.config.gui_eye_falloff:
|
||||||
|
if (
|
||||||
|
self.r_eye_blink == 0.0
|
||||||
|
): # if both eyes closed and DEF is enables, blink
|
||||||
|
self.client.send_message(
|
||||||
|
self.config.osc_left_eye_close_address,
|
||||||
|
eyelid_transformer(self, self.l_eye_blink),
|
||||||
|
)
|
||||||
|
self.client.send_message(
|
||||||
|
self.config.osc_right_eye_close_address,
|
||||||
|
eyelid_transformer(self, self.l_eye_blink),
|
||||||
|
)
|
||||||
|
self.l_eye_x = self.r_eye_x
|
||||||
|
|
||||||
else: # VRC NATIVE
|
self.client.send_message(self.config.osc_left_eye_x_address, self.l_eye_x)
|
||||||
|
self.left_y = eye_y
|
||||||
|
|
||||||
if self.main_config.eye_display_id in [EyeId.RIGHT, EyeId.LEFT]: # we are in single eye mode
|
self.client.send_message(
|
||||||
se = True
|
self.config.osc_left_eye_close_address,
|
||||||
if eye_blink == 0.0:
|
eyelid_transformer(self, self.l_eye_blink),
|
||||||
if last_blink > 0.2: # when binary blink is on, blinks may be too fast for OSC so we repeat them.
|
)
|
||||||
for i in range(5):
|
|
||||||
self.client.send_message("/tracking/eye/EyesClosedAmount",
|
|
||||||
float(1 - eye_blink))
|
|
||||||
eye_blink += 0.02 #TODO finish tuning value
|
|
||||||
last_blink = time.time() - last_blink
|
|
||||||
|
|
||||||
else:
|
elif self.eye_id in [EyeId.RIGHT] and not se: # Right eye, send data to right
|
||||||
self.client.send_message("/tracking/eye/EyesClosedAmount", float(1 - eye_blink))
|
self.r_eye_x = eye_x
|
||||||
self.client.send_message("/tracking/eye/LeftRightVec", [float(eye_x), float(eye_y), 1.0, float(eye_x), float(eye_y), 1.0]) # vrc native ET
|
self.r_eye_blink = eye_blink
|
||||||
|
|
||||||
|
if self.r_eye_blink == 0.0:
|
||||||
|
if (
|
||||||
|
last_blink > 0.15
|
||||||
|
): # when binary blink is on, blinks may be too fast for OSC so we repeat them.
|
||||||
|
print("REPEATING R BLINK")
|
||||||
|
for i in range(4):
|
||||||
|
self.client.send_message(
|
||||||
|
self.config.osc_right_eye_close_address,
|
||||||
|
eyelid_transformer(self, self.r_eye_blink),
|
||||||
|
)
|
||||||
|
last_blink = time.time() - last_blink
|
||||||
|
if self.config.gui_eye_falloff:
|
||||||
|
if (
|
||||||
|
self.l_eye_blink == 0.0
|
||||||
|
): # if both eyes closed and DEF is enables, blink
|
||||||
|
self.client.send_message(
|
||||||
|
self.config.osc_left_eye_close_address,
|
||||||
|
eyelid_transformer(self, self.r_eye_blink),
|
||||||
|
)
|
||||||
|
self.client.send_message(
|
||||||
|
self.config.osc_right_eye_close_address,
|
||||||
|
eyelid_transformer(self, self.r_eye_blink),
|
||||||
|
)
|
||||||
|
|
||||||
|
self.r_eye_x = self.l_eye_x
|
||||||
|
|
||||||
|
self.client.send_message(self.config.osc_right_eye_x_address, eye_x)
|
||||||
|
self.right_y = eye_y
|
||||||
|
|
||||||
|
self.client.send_message(
|
||||||
|
self.config.osc_right_eye_close_address,
|
||||||
|
eyelid_transformer(self, self.r_eye_blink),
|
||||||
|
)
|
||||||
|
|
||||||
|
if (
|
||||||
|
self.main_config.eye_display_id in [EyeId.BOTH]
|
||||||
|
and self.right_y != 621
|
||||||
|
and self.left_y != 621
|
||||||
|
):
|
||||||
|
y = (self.right_y + self.left_y) / 2
|
||||||
|
self.client.send_message(self.config.osc_eyes_y_address, y)
|
||||||
|
|
||||||
|
if self.config.gui_vrc_native: # VRC NATIVE
|
||||||
|
|
||||||
|
if self.main_config.eye_display_id in [
|
||||||
|
EyeId.RIGHT,
|
||||||
|
EyeId.LEFT,
|
||||||
|
]: # we are in single eye mode
|
||||||
|
se = True
|
||||||
|
if eye_blink == 0.0:
|
||||||
|
if (
|
||||||
|
last_blink > 0.2
|
||||||
|
): # when binary blink is on, blinks may be too fast for OSC so we repeat them.
|
||||||
|
for i in range(5):
|
||||||
|
self.client.send_message(
|
||||||
|
"/tracking/eye/EyesClosedAmount", float(1 - eye_blink)
|
||||||
|
)
|
||||||
|
eye_blink += 0.02 # TODO finish tuning value
|
||||||
|
last_blink = time.time() - last_blink
|
||||||
|
|
||||||
else:
|
else:
|
||||||
se = False
|
self.client.send_message(
|
||||||
|
"/tracking/eye/EyesClosedAmount", float(1 - eye_blink)
|
||||||
|
)
|
||||||
|
self.client.send_message(
|
||||||
|
"/tracking/eye/LeftRightVec",
|
||||||
|
[float(eye_x), float(eye_y), 1.0, float(eye_x), float(eye_y), 1.0],
|
||||||
|
) # vrc native ET
|
||||||
|
|
||||||
if self.eye_id in [EyeId.LEFT] and not se: # left eye, send data to left
|
else:
|
||||||
self.l_eye_x = eye_x
|
se = False
|
||||||
self.l_eye_blink = eye_blink
|
|
||||||
self.left_y = eye_y
|
|
||||||
self.client.send_message(self.config.osc_left_eye_close_address,eyelid_transformer(self,eye_blink))
|
|
||||||
|
|
||||||
if self.l_eye_blink == 0.0:
|
if self.eye_id in [EyeId.LEFT] and not se: # left eye, send data to left
|
||||||
if last_blink > 0.2: # when binary blink is on, blinks may be too fast for OSC so we repeat them.
|
self.l_eye_x = eye_x
|
||||||
for i in range(5):
|
self.l_eye_blink = eye_blink
|
||||||
self.client.send_message("/tracking/eye/EyesClosedAmount",
|
self.left_y = eye_y
|
||||||
float(1 - eye_blink))
|
self.client.send_message(
|
||||||
last_blink = time.time() - last_blink
|
self.config.osc_left_eye_close_address,
|
||||||
if self.config.gui_eye_falloff:
|
eyelid_transformer(self, eye_blink),
|
||||||
if self.r_eye_blink == 0.0: # if both eyes closed and DEF is enables, blink
|
)
|
||||||
self.client.send_message("/tracking/eye/EyesClosedAmount",
|
|
||||||
float(1 - eye_blink))
|
|
||||||
self.l_eye_x = self.r_eye_x
|
|
||||||
|
|
||||||
|
if self.l_eye_blink == 0.0:
|
||||||
|
if (
|
||||||
|
last_blink > 0.2
|
||||||
|
): # when binary blink is on, blinks may be too fast for OSC so we repeat them.
|
||||||
|
for i in range(5):
|
||||||
|
self.client.send_message(
|
||||||
|
"/tracking/eye/EyesClosedAmount", float(1 - eye_blink)
|
||||||
|
)
|
||||||
|
last_blink = time.time() - last_blink
|
||||||
|
if self.config.gui_eye_falloff:
|
||||||
|
if (
|
||||||
|
self.r_eye_blink == 0.0
|
||||||
|
): # if both eyes closed and DEF is enables, blink
|
||||||
|
self.client.send_message(
|
||||||
|
"/tracking/eye/EyesClosedAmount", float(1 - eye_blink)
|
||||||
|
)
|
||||||
|
self.l_eye_x = self.r_eye_x
|
||||||
|
|
||||||
elif self.eye_id in [EyeId.RIGHT] and not se: # Right eye, send data to right
|
elif self.eye_id in [EyeId.RIGHT] and not se: # Right eye, send data to right
|
||||||
self.r_eye_x = eye_x
|
self.r_eye_x = eye_x
|
||||||
self.r_eye_blink = eye_blink
|
self.r_eye_blink = eye_blink
|
||||||
self.right_y = eye_y
|
self.right_y = eye_y
|
||||||
self.client.send_message(self.config.osc_right_eye_close_address,eyelid_transformer(self,eye_blink))
|
self.client.send_message(
|
||||||
|
self.config.osc_right_eye_close_address,
|
||||||
|
eyelid_transformer(self, eye_blink),
|
||||||
|
)
|
||||||
|
|
||||||
if self.r_eye_blink == 0.0:
|
if self.r_eye_blink == 0.0:
|
||||||
if last_blink > 0.2: # when binary blink is on, blinks may be too fast for OSC so we repeat them.
|
if (
|
||||||
for i in range(5):
|
last_blink > 0.2
|
||||||
self.client.send_message("/tracking/eye/EyesClosedAmount",
|
): # when binary blink is on, blinks may be too fast for OSC so we repeat them.
|
||||||
float(1 - eye_blink))
|
for i in range(5):
|
||||||
last_blink = time.time() - last_blink
|
self.client.send_message(
|
||||||
if self.config.gui_eye_falloff:
|
"/tracking/eye/EyesClosedAmount", float(1 - eye_blink)
|
||||||
if self.l_eye_blink == 0.0: # if both eyes closed and DEF is enables, blink
|
)
|
||||||
self.client.send_message("/tracking/eye/EyesClosedAmount",
|
last_blink = time.time() - last_blink
|
||||||
float(0))
|
if self.config.gui_eye_falloff:
|
||||||
|
if (
|
||||||
|
self.l_eye_blink == 0.0
|
||||||
|
): # if both eyes closed and DEF is enables, blink
|
||||||
|
self.client.send_message(
|
||||||
|
"/tracking/eye/EyesClosedAmount", float(0)
|
||||||
|
)
|
||||||
|
|
||||||
self.r_eye_x = self.l_eye_x
|
self.r_eye_x = self.l_eye_x
|
||||||
|
|
||||||
|
if (
|
||||||
|
self.main_config.eye_display_id in [EyeId.BOTH]
|
||||||
|
and self.r_eye_blink != 621
|
||||||
|
and self.r_eye_blink != 621
|
||||||
|
):
|
||||||
|
if self.r_eye_blink == 0.0 or self.l_eye_blink == 0.0:
|
||||||
|
if (
|
||||||
|
last_blink > 0.2
|
||||||
|
): # when binary blink is on, blinks may be too fast for OSC so we repeat them.
|
||||||
|
for i in range(5):
|
||||||
|
self.client.send_message(
|
||||||
|
"/tracking/eye/EyesClosedAmount", float(1)
|
||||||
|
)
|
||||||
|
last_blink = time.time() - last_blink
|
||||||
|
eye_blink = (self.r_eye_blink + self.l_eye_blink) / 2
|
||||||
|
self.client.send_message(
|
||||||
|
"/tracking/eye/EyesClosedAmount", float(1 - eye_blink)
|
||||||
|
)
|
||||||
|
|
||||||
if self.main_config.eye_display_id in [EyeId.BOTH] and self.r_eye_blink != 621 and self.r_eye_blink != 621:
|
if (
|
||||||
if self.r_eye_blink == 0.0 or self.l_eye_blink == 0.0:
|
self.main_config.eye_display_id in [EyeId.BOTH]
|
||||||
if last_blink > 0.2: # when binary blink is on, blinks may be too fast for OSC so we repeat them.
|
and self.right_y != 621
|
||||||
for i in range(5):
|
and self.left_y != 621
|
||||||
self.client.send_message("/tracking/eye/EyesClosedAmount",
|
):
|
||||||
float(1))
|
eye_y = (self.right_y + self.left_y) / 2
|
||||||
last_blink = time.time() - last_blink
|
|
||||||
eye_blink = (self.r_eye_blink + self.l_eye_blink) / 2
|
|
||||||
self.client.send_message("/tracking/eye/EyesClosedAmount",
|
|
||||||
float(1 - eye_blink))
|
|
||||||
|
|
||||||
|
if not se:
|
||||||
if self.main_config.eye_display_id in [EyeId.BOTH] and self.right_y != 621 and self.left_y != 621:
|
# vrc native ET (z values may need tweaking, they act like a scalar)
|
||||||
eye_y = (self.right_y + self.left_y) / 2
|
self.client.send_message(
|
||||||
|
"/tracking/eye/LeftRightVec",
|
||||||
if not se:
|
[
|
||||||
# vrc native ET (z values may need tweaking, they act like a scalar)
|
float(self.l_eye_x),
|
||||||
self.client.send_message("/tracking/eye/LeftRightVec",[float(self.l_eye_x), float(self.left_y), 1.0, float(self.r_eye_x), float(self.right_y), 1.0])
|
float(self.left_y),
|
||||||
|
1.0,
|
||||||
|
float(self.r_eye_x),
|
||||||
|
float(self.right_y),
|
||||||
|
1.0,
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class VRChatOSC:
|
class VRChatOSC:
|
||||||
# Use a tuple of blink (true, blinking, false, not), x, y for now.
|
# Use a tuple of blink (true, blinking, false, not), x, y for now.
|
||||||
def __init__(self, cancellation_event: threading.Event, msg_queue: queue.Queue[tuple[bool, int, int]], main_config: EyeTrackConfig,):
|
def __init__(
|
||||||
|
self,
|
||||||
|
cancellation_event: threading.Event,
|
||||||
|
msg_queue: queue.Queue[tuple[bool, int, int]],
|
||||||
|
main_config: EyeTrackConfig,
|
||||||
|
):
|
||||||
self.main_config = main_config
|
self.main_config = main_config
|
||||||
self.config = main_config.settings
|
self.config = main_config.settings
|
||||||
self.client = udp_client.SimpleUDPClient(self.config.gui_osc_address, int(self.config.gui_osc_port)) # use OSC port and address that was set in the config
|
self.client = udp_client.SimpleUDPClient(
|
||||||
|
self.config.gui_osc_address, int(self.config.gui_osc_port)
|
||||||
|
) # use OSC port and address that was set in the config
|
||||||
self.cancellation_event = cancellation_event
|
self.cancellation_event = cancellation_event
|
||||||
self.msg_queue = msg_queue
|
self.msg_queue = msg_queue
|
||||||
self.eye_id = EyeId.RIGHT
|
self.eye_id = EyeId.RIGHT
|
||||||
@ -188,7 +292,6 @@ class VRChatOSC:
|
|||||||
self.r_eye_blink = 0.7
|
self.r_eye_blink = 0.7
|
||||||
self.l_eye_blink = 0.7
|
self.l_eye_blink = 0.7
|
||||||
|
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
start = time.time()
|
start = time.time()
|
||||||
last_blink = time.time()
|
last_blink = time.time()
|
||||||
@ -201,19 +304,33 @@ class VRChatOSC:
|
|||||||
except:
|
except:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
output_osc(eye_info.x, eye_info.y, eye_info.blink, last_blink, self)
|
output_osc(
|
||||||
|
eye_info.x,
|
||||||
|
eye_info.y,
|
||||||
|
eye_info.blink,
|
||||||
|
last_blink,
|
||||||
|
eye_info.pupil_dilation,
|
||||||
|
self,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class VRChatOSCReceiver:
|
class VRChatOSCReceiver:
|
||||||
def __init__(self, cancellation_event: threading.Event, main_config: EyeTrackConfig, eyes: []):
|
def __init__(
|
||||||
|
self, cancellation_event: threading.Event, main_config: EyeTrackConfig, eyes: []
|
||||||
|
):
|
||||||
self.config = main_config.settings
|
self.config = main_config.settings
|
||||||
self.cancellation_event = cancellation_event
|
self.cancellation_event = cancellation_event
|
||||||
self.dispatcher = dispatcher.Dispatcher()
|
self.dispatcher = dispatcher.Dispatcher()
|
||||||
self.eyes = eyes # we cant import CameraWidget so any type it is
|
self.eyes = eyes # we cant import CameraWidget so any type it is
|
||||||
try:
|
try:
|
||||||
self.server = osc_server.OSCUDPServer((self.config.gui_osc_address, int(self.config.gui_osc_receiver_port)), self.dispatcher)
|
self.server = osc_server.OSCUDPServer(
|
||||||
|
(self.config.gui_osc_address, int(self.config.gui_osc_receiver_port)),
|
||||||
|
self.dispatcher,
|
||||||
|
)
|
||||||
except:
|
except:
|
||||||
print(f"\033[91m[ERROR] OSC Receive port: {self.config.gui_osc_receiver_port} occupied.\033[0m")
|
print(
|
||||||
|
f"\033[91m[ERROR] OSC Receive port: {self.config.gui_osc_receiver_port} occupied.\033[0m"
|
||||||
|
)
|
||||||
|
|
||||||
def shutdown(self):
|
def shutdown(self):
|
||||||
print("\033[94m[INFO] Exiting OSC Receiver\033[0m")
|
print("\033[94m[INFO] Exiting OSC Receiver\033[0m")
|
||||||
@ -223,28 +340,40 @@ class VRChatOSCReceiver:
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
def recenter_eyes(self, address, osc_value):
|
def recenter_eyes(self, address, osc_value):
|
||||||
if type(osc_value) != bool: return # just incase we get anything other than bool
|
if type(osc_value) != bool:
|
||||||
|
return # just incase we get anything other than bool
|
||||||
if osc_value:
|
if osc_value:
|
||||||
for eye in self.eyes:
|
for eye in self.eyes:
|
||||||
eye.settings.gui_recenter_eyes = True
|
eye.settings.gui_recenter_eyes = True
|
||||||
|
|
||||||
def recalibrate_eyes(self, address, osc_value):
|
def recalibrate_eyes(self, address, osc_value):
|
||||||
if type(osc_value) != bool: return # just incase we get anything other than bool
|
if type(osc_value) != bool:
|
||||||
|
return # just incase we get anything other than bool
|
||||||
if osc_value:
|
if osc_value:
|
||||||
for eye in self.eyes:
|
for eye in self.eyes:
|
||||||
eye.ransac.ibo.clear_filter()
|
eye.ransac.ibo.clear_filter()
|
||||||
eye.ransac.calibration_frame_counter = self.config.calibration_samples
|
eye.ransac.calibration_frame_counter = self.config.calibration_samples
|
||||||
PlaySound('Audio/start.wav', SND_FILENAME | SND_ASYNC)
|
PlaySound("Audio/start.wav", SND_FILENAME | SND_ASYNC)
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
|
|
||||||
# bind what function to run when specified OSC message is received
|
# bind what function to run when specified OSC message is received
|
||||||
try:
|
try:
|
||||||
self.dispatcher.map(self.config.gui_osc_recalibrate_address, self.recalibrate_eyes)
|
self.dispatcher.map(
|
||||||
self.dispatcher.map(self.config.gui_osc_recenter_address, self.recenter_eyes)
|
self.config.gui_osc_recalibrate_address, self.recalibrate_eyes
|
||||||
|
)
|
||||||
|
self.dispatcher.map(
|
||||||
|
self.config.gui_osc_recenter_address, self.recenter_eyes
|
||||||
|
)
|
||||||
# start the server
|
# start the server
|
||||||
print("\033[92m[INFO] VRChatOSCReceiver serving on {}\033[0m".format(self.server.server_address))
|
print(
|
||||||
|
"\033[92m[INFO] VRChatOSCReceiver serving on {}\033[0m".format(
|
||||||
|
self.server.server_address
|
||||||
|
)
|
||||||
|
)
|
||||||
self.server.serve_forever()
|
self.server.serve_forever()
|
||||||
|
|
||||||
except:
|
except:
|
||||||
print(f"\033[91m[ERROR] OSC Receive port: {self.config.gui_osc_receiver_port} occupied.\033[0m")
|
print(
|
||||||
|
f"\033[91m[ERROR] OSC Receive port: {self.config.gui_osc_receiver_port} occupied.\033[0m"
|
||||||
|
)
|
||||||
|
|||||||
@ -427,7 +427,7 @@ def RANSAC3D(self, hsrac_en):
|
|||||||
with open("RANSAC_BLINK_RIGHT.cfg", "w") as file:
|
with open("RANSAC_BLINK_RIGHT.cfg", "w") as file:
|
||||||
for item in self.blink_list:
|
for item in self.blink_list:
|
||||||
file.write(str(item) + "\n")
|
file.write(str(item) + "\n")
|
||||||
# print("SAVE")
|
# print("SAVE")
|
||||||
|
|
||||||
# self.blink_list.pop(0)
|
# self.blink_list.pop(0)
|
||||||
self.blink_list.append(abs(perscalarw - perscalarh))
|
self.blink_list.append(abs(perscalarw - perscalarh))
|
||||||
@ -438,7 +438,6 @@ def RANSAC3D(self, hsrac_en):
|
|||||||
if abs(perscalarw - perscalarh) >= np.percentile(self.blink_list, 92):
|
if abs(perscalarw - perscalarh) >= np.percentile(self.blink_list, 92):
|
||||||
blink = 0.0
|
blink = 0.0
|
||||||
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
cv2.drawContours(
|
cv2.drawContours(
|
||||||
self.current_image_gray, contours, -1, (255, 0, 0), 1
|
self.current_image_gray, contours, -1, (255, 0, 0), 1
|
||||||
@ -490,7 +489,7 @@ def RANSAC3D(self, hsrac_en):
|
|||||||
thresh = cv2.resize(thresh, (x, y))
|
thresh = cv2.resize(thresh, (x, y))
|
||||||
try:
|
try:
|
||||||
self.failed = 0 # we have succeded, continue with this
|
self.failed = 0 # we have succeded, continue with this
|
||||||
return cx, cy, thresh, blink
|
return cx, cy, thresh, blink, w, h
|
||||||
except:
|
except:
|
||||||
self.failed = self.failed + 1 # we have failed, move onto next algo
|
self.failed = self.failed + 1 # we have failed, move onto next algo
|
||||||
return 0, 0, thresh, blink
|
return 0, 0, thresh, blink, 0, 0
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user