mirror of
https://github.com/EyeTrackVR/EyeTrackVR.git
synced 2025-09-26 23:09:28 +08:00
remove single cam
This commit is contained in:
parent
1b1100c559
commit
5d91ce5963
4
EyeTrackApp-SingleCam/.gitignore
vendored
4
EyeTrackApp-SingleCam/.gitignore
vendored
@ -1,4 +0,0 @@
|
||||
build
|
||||
dist
|
||||
__pycache__
|
||||
*.json
|
@ -1,42 +0,0 @@
|
||||
# How to spin up a dev environment
|
||||
|
||||
firstly, ensure that the virtualenv module is installed onto your pc.
|
||||
|
||||
pip install virtualenv
|
||||
|
||||
Next, cd into the RANSACApp directory and run:
|
||||
|
||||
python -m virtualenv venv
|
||||
|
||||
On windows, next we run:
|
||||
|
||||
venv\Scripts\activate
|
||||
|
||||
On linux we run:
|
||||
|
||||
source venv\Scripts\activate
|
||||
|
||||
Next, we install the dependancies and build:
|
||||
|
||||
pip install -r requirements.txt
|
||||
|
||||
When that is complete, move on to building:
|
||||
|
||||
pyinstaller eyetrackapp.spec
|
||||
|
||||
Now we can run the executable:
|
||||
|
||||
cd dist/eyetrackapp
|
||||
|
||||
./eyetrackapp
|
||||
|
||||
|
||||
***DISCLAIMER: I DO NOT OWN THE LINCENCE TO ALL OF THIS CODE. Part of it is from Pupil labs, distributed under the terms of the GNU Lesser General Public License (LGPL v3.0). See COPYING and COPYING.LESSER for license details.***
|
||||
|
||||
Copyright (C) 2018 Pupil Labs
|
||||
|
||||
All Rights Reserved.
|
||||
|
||||
This is Pye3d by Pupil Labs
|
||||
|
||||
The one Euro filter base comes from [https://github.com/jaantollander/OneEuroFilter](https://github.com/jaantollander/OneEuroFilter)
|
@ -1,131 +0,0 @@
|
||||
from time import sleep
|
||||
|
||||
import numpy
|
||||
|
||||
from config import RansacConfig
|
||||
import requests
|
||||
from enum import Enum
|
||||
import threading
|
||||
import queue
|
||||
import runpy
|
||||
import cv2
|
||||
|
||||
|
||||
class CameraState(Enum):
|
||||
CONNECTED = 1
|
||||
DISCONNECTED = 2
|
||||
|
||||
WAIT_TIME = 0.1
|
||||
|
||||
class Camera:
|
||||
def __init__(
|
||||
self,
|
||||
config: RansacConfig,
|
||||
camera_index: int,
|
||||
cancellation_event: "threading.Event",
|
||||
capture_event: "threading.Event",
|
||||
camera_status_outgoing: "queue.Queue[CameraState]",
|
||||
camera_output_outgoing: "queue.Queue",
|
||||
):
|
||||
self.config = config
|
||||
self.camera_index = camera_index
|
||||
self.camera_address = config.capture_source
|
||||
self.camera_status_outgoing = camera_status_outgoing
|
||||
self.camera_output_outgoing = camera_output_outgoing
|
||||
self.capture_event = capture_event
|
||||
self.cancellation_event = cancellation_event
|
||||
self.current_capture_source = config.capture_source
|
||||
self.wired_camera: "cv2.VideoCapture" = cv2.VideoCapture(config.capture_source)
|
||||
self.stream = None
|
||||
self.stream_frame_number = 0
|
||||
self.stream_bytes = bytes()
|
||||
self.previous_frame = None
|
||||
self.error_message = "Capture source {} not found, retrying in 500ms"
|
||||
|
||||
def set_output_queue(self, camera_output_outgoing: "queue.Queue"):
|
||||
self.camera_output_outgoing = camera_output_outgoing
|
||||
|
||||
def run(self):
|
||||
while True:
|
||||
if self.cancellation_event.is_set():
|
||||
print("Exiting capture thread")
|
||||
return
|
||||
should_push = True
|
||||
# If things aren't open, retry until they are. Don't let read requests come in any earlier
|
||||
# than this, otherwise we can deadlock ourselves.
|
||||
if (
|
||||
self.config.capture_source != None and self.config.capture_source != ""
|
||||
):
|
||||
if (
|
||||
self.wired_camera is None
|
||||
or not self.wired_camera.isOpened()
|
||||
|
||||
or self.config.capture_source != self.current_capture_source
|
||||
):
|
||||
print(self.error_message.format(self.config.capture_source))
|
||||
# This requires a wait, otherwise we can error and possible screw up the camera
|
||||
# firmware. Fickle things.
|
||||
if self.cancellation_event.wait(WAIT_TIME):
|
||||
return
|
||||
self.current_capture_source = self.config.capture_source
|
||||
self.wired_camera = cv2.VideoCapture(self.current_capture_source)
|
||||
|
||||
# We don't have a capture source to try yet, wait for one to show up in the GUI.
|
||||
|
||||
# Assuming we can access our capture source, wait for another thread to request a capture.
|
||||
# Cycle every so often to see if our cancellation token has fired. This basically uses a
|
||||
# python event as a contextless, resettable one-shot channel.
|
||||
if should_push and not self.capture_event.wait(timeout=0.02):
|
||||
continue
|
||||
|
||||
self.get_wired_camera_picture()
|
||||
#if not should_push:
|
||||
# if we get all the way down here, consider ourselves connected
|
||||
|
||||
|
||||
|
||||
|
||||
def get_wired_camera_picture(self):
|
||||
try:
|
||||
ret, image = self.wired_camera.read()
|
||||
if not ret:
|
||||
self.wired_camera.set(cv2.CAP_PROP_POS_FRAMES, 0)
|
||||
raise RuntimeError("Problem while getting frame")
|
||||
frame_number = self.wired_camera.get(cv2.CAP_PROP_POS_FRAMES)
|
||||
fps = self.wired_camera.get(cv2.CAP_PROP_FPS)
|
||||
self.push_image_to_queue(image, frame_number, fps)
|
||||
except:
|
||||
# print("Capture source problem, assuming camera disconnected, waiting for reconnect.")
|
||||
pass
|
||||
|
||||
def cleanup_stream(self):
|
||||
if self.stream:
|
||||
self.stream.close()
|
||||
self.stream_bytes = None
|
||||
self.previous_frame = None
|
||||
self.stream_frame_number = 0
|
||||
|
||||
@staticmethod
|
||||
def check_is_image_valid(image):
|
||||
try:
|
||||
return bool(len(image))
|
||||
except TypeError:
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def decode_image(data):
|
||||
try:
|
||||
return cv2.imdecode(data, cv2.IMREAD_COLOR)
|
||||
except:
|
||||
return None
|
||||
|
||||
def push_image_to_queue(self, image, frame_number, fps):
|
||||
# If there's backpressure, just yell. We really shouldn't have this unless we start getting
|
||||
# some sort of capture event conflict though.
|
||||
qsize = self.camera_output_outgoing.qsize()
|
||||
if qsize > 1:
|
||||
print(
|
||||
f"CAPTURE QUEUE BACKPRESSURE OF {qsize}. CHECK FOR CRASH OR TIMING ISSUES IN ALGORITHM."
|
||||
)
|
||||
self.camera_output_outgoing.put((image, frame_number, fps))
|
||||
self.capture_event.clear()
|
@ -1,37 +0,0 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import Union
|
||||
from dacite import from_dict
|
||||
import os.path
|
||||
import json
|
||||
|
||||
|
||||
# TODO Who even needs synchronization? (We do.)
|
||||
|
||||
@dataclass
|
||||
class RansacConfig:
|
||||
threshold: "int" = 0
|
||||
rotation_angle: "int" = 0
|
||||
roi_window_x: "int" = 0
|
||||
roi_window_y: "int" = 0
|
||||
roi_window_w: "int" = 640
|
||||
roi_window_h: "int" = 480
|
||||
focal_length: "int" = 30
|
||||
capture_source: "Union[int, str, None]" = None
|
||||
vrc_eye_position_scalar: "int" = 3000
|
||||
show_color_image: "bool" = False
|
||||
|
||||
@staticmethod
|
||||
def load():
|
||||
if not os.path.exists("ransac_settings.json"):
|
||||
print("No settings file, using base settings")
|
||||
return RansacConfig()
|
||||
with open("ransac_settings.json", 'r') as settings_file:
|
||||
try:
|
||||
return from_dict(data_class=RansacConfig, data=json.load(settings_file))
|
||||
except:
|
||||
print("Configuration invalid, creating new config")
|
||||
return RansacConfig()
|
||||
|
||||
def save(self):
|
||||
with open("ransac_settings.json", 'w+') as settings_file:
|
||||
json.dump(self.__dict__, settings_file)
|
File diff suppressed because one or more lines are too long
@ -1,44 +0,0 @@
|
||||
# -*- mode: py -3.6 ; coding: utf-8 -*-
|
||||
|
||||
|
||||
block_cipher = None
|
||||
|
||||
|
||||
a = Analysis(['main.py'],
|
||||
pathex=[],
|
||||
binaries=[("pye3d.libs/*", "pye3d.libs"), ("pye3d.libs/.*", "pye3d.libs")],
|
||||
datas=[("pye3dcustom/refraction_models/*", "pye3dcustom/refraction_models")],
|
||||
hiddenimports=[],
|
||||
hookspath=[],
|
||||
hooksconfig={},
|
||||
runtime_hooks=[],
|
||||
excludes=[],
|
||||
win_no_prefer_redirects=False,
|
||||
win_private_assemblies=False,
|
||||
cipher=block_cipher,
|
||||
noarchive=False)
|
||||
pyz = PYZ(a.pure, a.zipped_data,
|
||||
cipher=block_cipher)
|
||||
|
||||
exe = EXE(pyz,
|
||||
a.scripts,
|
||||
[],
|
||||
exclude_binaries=True,
|
||||
name='EyeTrackApp',
|
||||
debug=False,
|
||||
bootloader_ignore_signals=False,
|
||||
strip=False,
|
||||
upx=True,
|
||||
console=True,
|
||||
disable_windowed_traceback=False,
|
||||
target_arch=None,
|
||||
codesign_identity=None,
|
||||
entitlements_file=None )
|
||||
coll = COLLECT(exe,
|
||||
a.binaries,
|
||||
a.zipfiles,
|
||||
a.datas,
|
||||
strip=False,
|
||||
upx=True,
|
||||
upx_exclude=[],
|
||||
name='eyetrackapp')
|
@ -1,233 +0,0 @@
|
||||
from osc import VRChatOSC
|
||||
from config import RansacConfig
|
||||
from speech_engine import SpeechEngine
|
||||
from ransac import Ransac, InformationOrigin
|
||||
import queue
|
||||
import threading
|
||||
import cv2
|
||||
import camera
|
||||
import PySimpleGUI as sg
|
||||
|
||||
WINDOW_NAME = "RANSACApp"
|
||||
CAMERA_ADDR_NAME = "-CAMERAADDR-"
|
||||
THRESHOLD_SLIDER_NAME = "-THREADHOLDSLIDER-"
|
||||
ROTATION_SLIDER_NAME = "-ROTATIONSLIDER-"
|
||||
SCALAR_SLIDER_NAME = "-EYESCALARSLIDER-"
|
||||
ROI_BUTTON_NAME = "-ROIMODE-"
|
||||
ROI_LAYOUT_NAME = "-ROILAYOUT-"
|
||||
ROI_SELECTION_NAME = "-GRAPH-"
|
||||
TRACKING_BUTTON_NAME = "-TRACKINGMODE-"
|
||||
SAVE_TRACKING_BUTTON_NAME = "-SAVETRACKINGBUTTON-"
|
||||
TRACKING_LAYOUT_NAME = "-TRACKINGLAYOUT-"
|
||||
TRACKING_IMAGE_NAME = "-IMAGE-"
|
||||
OUTPUT_GRAPH_NAME = "-OUTPUTGRAPH-"
|
||||
RESTART_CALIBRATION_NAME = "-RESTARTCALIBRATION-"
|
||||
RECENTER_EYE_NAME = "-RECENTEREYE-"
|
||||
MODE_READOUT_NAME = "-APPMODE-"
|
||||
SHOW_COLOR_IMAGE_NAME = "-SHOWCOLORIMAGE-"
|
||||
|
||||
def main():
|
||||
in_roi_mode = False
|
||||
|
||||
# Get Configuration
|
||||
config: RansacConfig = RansacConfig.load()
|
||||
config.save()
|
||||
|
||||
roi_layout = [
|
||||
[sg.Graph((640, 480), (0, 480), (640, 0), key=ROI_SELECTION_NAME, drag_submits=True, enable_events=True)]
|
||||
]
|
||||
|
||||
# Define the window's contents
|
||||
tracking_layout = [
|
||||
[sg.Text("Threshold"),
|
||||
sg.Slider(range=(0, 100), default_value=config.threshold, orientation='h', key=THRESHOLD_SLIDER_NAME)],
|
||||
[sg.Text("Rotation"),
|
||||
sg.Slider(range=(0, 360), default_value=config.rotation_angle, orientation='h', key=ROTATION_SLIDER_NAME)],
|
||||
[sg.Text("Eye Position Scalar"),
|
||||
sg.Slider(range=(0, 5000), default_value=config.vrc_eye_position_scalar, orientation='h',
|
||||
key=SCALAR_SLIDER_NAME)],
|
||||
[sg.Button("Restart Calibration", key=RESTART_CALIBRATION_NAME),
|
||||
sg.Button("Recenter Eye", key=RECENTER_EYE_NAME),
|
||||
sg.Checkbox('Show Color Image:', default=config.show_color_image, key=SHOW_COLOR_IMAGE_NAME)],
|
||||
[sg.Text("Mode:"), sg.Text("Calibrating", key=MODE_READOUT_NAME)],
|
||||
[sg.Image(filename="", key=TRACKING_IMAGE_NAME)],
|
||||
[sg.Graph((200, 200), (-100, 100), (100, -100), background_color='white', key=OUTPUT_GRAPH_NAME,
|
||||
drag_submits=True, enable_events=True)]
|
||||
]
|
||||
|
||||
layout = [[[sg.Text("Camera Address"), sg.InputText(config.capture_source, key=CAMERA_ADDR_NAME),
|
||||
sg.Button("Save and Restart Tracking", key=SAVE_TRACKING_BUTTON_NAME)]],
|
||||
[sg.Button("Tracking Mode", key=TRACKING_BUTTON_NAME), sg.Button("ROI Mode", key=ROI_BUTTON_NAME)],
|
||||
[sg.Column(tracking_layout, key=TRACKING_LAYOUT_NAME),
|
||||
sg.Column(roi_layout, key=ROI_LAYOUT_NAME, visible=False)]]
|
||||
|
||||
# Create the window
|
||||
window = sg.Window('EyeTrackVR v0.0.10', layout)
|
||||
|
||||
cancellation_event = threading.Event()
|
||||
|
||||
# Check to see if we can connect to our video source first. If not, bring up camera finding
|
||||
# dialog.
|
||||
|
||||
# Check to see if we have an ROI. If not, bring up ROI finder GUI.
|
||||
|
||||
# Spawn worker threads
|
||||
osc_queue: "queue.Queue[tuple[bool, int, int]]" = queue.Queue()
|
||||
osc = VRChatOSC(cancellation_event, osc_queue)
|
||||
osc_thread = threading.Thread(target=osc.run)
|
||||
osc_thread.start()
|
||||
|
||||
# t2s_queue: "queue.Queue[str | None]" = queue.Queue()
|
||||
# t2s_engine = SpeechEngine(t2s_queue)
|
||||
# t2s_thread = threading.Thread(target=t2s_engine.run)
|
||||
# t2s_thread.start()
|
||||
# t2s_queue.put("App Starting")
|
||||
|
||||
capture_event = threading.Event()
|
||||
capture_queue = queue.Queue()
|
||||
roi_queue = queue.Queue()
|
||||
|
||||
image_queue: queue.Queue = queue.Queue()
|
||||
ransac = Ransac(config, cancellation_event, capture_event, capture_queue, image_queue)
|
||||
ransac_thread = threading.Thread(target=ransac.run)
|
||||
ransac_thread.start()
|
||||
|
||||
# Only start our camera AFTER we've brought up the RANSAC thread, otherwise we'll have no consumer
|
||||
camera_status_queue = queue.Queue()
|
||||
camera_0 = camera.Camera(config, 0, cancellation_event, capture_event, camera_status_queue, capture_queue)
|
||||
camera_0_thread = threading.Thread(target=camera_0.run)
|
||||
camera_0_thread.start()
|
||||
|
||||
x0, y0 = None, None
|
||||
x1, y1 = None, None
|
||||
figure = None
|
||||
is_mouse_up = True
|
||||
|
||||
# GUI Render loop
|
||||
|
||||
while True:
|
||||
# First off, check for any events from the GUI
|
||||
event, values = window.read(timeout=1)
|
||||
|
||||
# If we're in either mode and someone hits q, quit immediately
|
||||
if event == "Exit" or event == sg.WIN_CLOSED:
|
||||
cancellation_event.set()
|
||||
osc_thread.join()
|
||||
ransac_thread.join()
|
||||
# t2s_engine.force_stop()
|
||||
# t2s_queue.put(None)
|
||||
# t2s_thread.join()
|
||||
print("Exiting RANSAC App")
|
||||
return
|
||||
|
||||
changed = False
|
||||
# If anything has changed in our configuration settings, change/update those.
|
||||
if event == SAVE_TRACKING_BUTTON_NAME and values[CAMERA_ADDR_NAME] != config.capture_source:
|
||||
try:
|
||||
# Try storing ints as ints, for those using wired cameras.
|
||||
config.capture_source = int(values[CAMERA_ADDR_NAME])
|
||||
except ValueError:
|
||||
config.capture_source = values[CAMERA_ADDR_NAME]
|
||||
changed = True
|
||||
|
||||
if config.threshold != values[THRESHOLD_SLIDER_NAME]:
|
||||
config.threshold = int(values[THRESHOLD_SLIDER_NAME])
|
||||
changed = True
|
||||
|
||||
if config.rotation_angle != values[ROTATION_SLIDER_NAME]:
|
||||
config.rotation_angle = int(values[ROTATION_SLIDER_NAME])
|
||||
changed = True
|
||||
|
||||
if config.vrc_eye_position_scalar != values[SCALAR_SLIDER_NAME]:
|
||||
config.vrc_eye_position_scalar = int(values[SCALAR_SLIDER_NAME])
|
||||
changed = True
|
||||
|
||||
if config.show_color_image != values[SHOW_COLOR_IMAGE_NAME]:
|
||||
config.show_color_image = values[SHOW_COLOR_IMAGE_NAME]
|
||||
changed = True
|
||||
|
||||
if changed:
|
||||
config.save()
|
||||
|
||||
if event == TRACKING_BUTTON_NAME:
|
||||
print("Moving to tracking mode")
|
||||
in_roi_mode = False
|
||||
camera_0.set_output_queue(capture_queue)
|
||||
window[ROI_LAYOUT_NAME].update(visible=False)
|
||||
window[TRACKING_LAYOUT_NAME].update(visible=True)
|
||||
elif event == ROI_BUTTON_NAME:
|
||||
print("move to roi mode")
|
||||
in_roi_mode = True
|
||||
camera_0.set_output_queue(roi_queue)
|
||||
window[ROI_LAYOUT_NAME].update(visible=True)
|
||||
window[TRACKING_LAYOUT_NAME].update(visible=False)
|
||||
elif event == '-GRAPH-+UP':
|
||||
# Event for mouse button up in ROI mode
|
||||
is_mouse_up = True
|
||||
if abs(x0 - x1) != 0 and abs(y0 - y1) != 0:
|
||||
config.roi_window_x = min([x0, x1])
|
||||
config.roi_window_y = min([y0, y1])
|
||||
config.roi_window_w = abs(x0 - x1)
|
||||
config.roi_window_h = abs(y0 - y1)
|
||||
config.save()
|
||||
elif event == '-GRAPH-':
|
||||
# Event for mouse button down or mouse drag in ROI mode
|
||||
if is_mouse_up:
|
||||
is_mouse_up = False
|
||||
x0, y0 = values['-GRAPH-']
|
||||
x1, y1 = values['-GRAPH-']
|
||||
elif event == RESTART_CALIBRATION_NAME:
|
||||
ransac.calibration_frame_counter = 300
|
||||
elif event == RECENTER_EYE_NAME:
|
||||
ransac.recenter_eye = True
|
||||
|
||||
if ransac.calibration_frame_counter != None:
|
||||
window[MODE_READOUT_NAME].update("Calibration")
|
||||
else:
|
||||
window[MODE_READOUT_NAME].update("Tracking")
|
||||
|
||||
if in_roi_mode:
|
||||
try:
|
||||
if roi_queue.empty():
|
||||
capture_event.set()
|
||||
maybe_image = roi_queue.get(block=False)
|
||||
imgbytes = cv2.imencode(".ppm", maybe_image[0])[1].tobytes()
|
||||
graph = window[ROI_SELECTION_NAME]
|
||||
if figure:
|
||||
graph.delete_figure(figure)
|
||||
# INCREDIBLY IMPORTANT ERASE. Drawing images does NOT overwrite the buffer, the fucking
|
||||
# graph keeps every image fed in until you call this. Therefore we have to make sure we
|
||||
# erase before we redraw, otherwise we'll leak memory *very* quickly.
|
||||
graph.erase()
|
||||
graph.draw_image(data=imgbytes, location=(0, 0))
|
||||
if None not in (x0, y0, x1, y1):
|
||||
figure = graph.draw_rectangle((x0, y0), (x1, y1), line_color='blue')
|
||||
except queue.Empty:
|
||||
pass
|
||||
else:
|
||||
try:
|
||||
(maybe_image, eye_info) = image_queue.get(block=False)
|
||||
imgbytes = cv2.imencode(".ppm", maybe_image)[1].tobytes()
|
||||
window[TRACKING_IMAGE_NAME].update(data=imgbytes)
|
||||
|
||||
# Update the GUI
|
||||
graph = window[OUTPUT_GRAPH_NAME]
|
||||
graph.erase()
|
||||
|
||||
if eye_info.info_type != InformationOrigin.FAILURE and not eye_info.blink:
|
||||
graph.update(background_color='white')
|
||||
graph.draw_circle((eye_info.x * -100, eye_info.y * -100), 25, fill_color='black',line_color='white')
|
||||
elif eye_info.blink:
|
||||
graph.update(background_color='blue')
|
||||
elif eye_info.info_type == InformationOrigin.FAILURE:
|
||||
graph.update(background_color='red')
|
||||
|
||||
# Relay information to OSC
|
||||
if eye_info.info_type != InformationOrigin.FAILURE:
|
||||
osc_queue.put(eye_info)
|
||||
except queue.Empty:
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,50 +0,0 @@
|
||||
import numpy as np
|
||||
from time import time
|
||||
|
||||
def smoothing_factor(t_e, cutoff):
|
||||
r = 2 * np.pi * cutoff * t_e
|
||||
return r / (r + 1)
|
||||
|
||||
|
||||
def exponential_smoothing(a, x, x_prev):
|
||||
return a * x + (1 - a) * x_prev
|
||||
|
||||
|
||||
class OneEuroFilter:
|
||||
def __init__(self, x0, dx0=0.0, min_cutoff=1.0, beta=0.0,
|
||||
d_cutoff=1.0):
|
||||
"""Initialize the one euro filter."""
|
||||
# The parameters.
|
||||
self.data_shape = x0.shape
|
||||
self.min_cutoff = np.full(x0.shape, min_cutoff)
|
||||
self.beta = np.full(x0.shape, beta)
|
||||
self.d_cutoff = np.full(x0.shape, d_cutoff)
|
||||
# Previous values.
|
||||
self.x_prev = x0.astype(np.float)
|
||||
self.dx_prev = np.full(x0.shape, dx0)
|
||||
self.t_prev = time()
|
||||
|
||||
def __call__(self, x):
|
||||
"""Compute the filtered signal."""
|
||||
assert x.shape == self.data_shape
|
||||
|
||||
t = time()
|
||||
t_e = t - self.t_prev
|
||||
t_e = np.full(x.shape, t_e)
|
||||
|
||||
# The filtered derivative of the signal.
|
||||
a_d = smoothing_factor(t_e, self.d_cutoff)
|
||||
dx = (x - self.x_prev) / t_e
|
||||
dx_hat = exponential_smoothing(a_d, dx, self.dx_prev)
|
||||
|
||||
# The filtered signal.
|
||||
cutoff = self.min_cutoff + self.beta * np.abs(dx_hat)
|
||||
a = smoothing_factor(t_e, cutoff)
|
||||
x_hat = exponential_smoothing(a, x, self.x_prev)
|
||||
|
||||
# Memorize the previous values.
|
||||
self.x_prev = x_hat
|
||||
self.dx_prev = dx_hat
|
||||
self.t_prev = t
|
||||
|
||||
return x_hat
|
@ -1,42 +0,0 @@
|
||||
from pythonosc import udp_client
|
||||
import queue
|
||||
import threading
|
||||
|
||||
|
||||
class VRChatOSC:
|
||||
# VRChat OSC Networking Info. For now, we'll assume it's always local.
|
||||
OSC_IP = "127.0.0.1"
|
||||
OSC_PORT = 9000 # VR Chat OSC port
|
||||
|
||||
# Use a tuple of blink (true, blinking, false, not), x, y for now. Probably clearer as a class but
|
||||
# we're stuck in python 3.6 so still no dataclasses. God I hate python.
|
||||
def __init__(self, cancellation_event: "threading.Event", msg_queue: "queue.Queue[tuple[bool, int, int]]"):
|
||||
self.client = udp_client.SimpleUDPClient(VRChatOSC.OSC_IP, VRChatOSC.OSC_PORT)
|
||||
self.cancellation_event = cancellation_event
|
||||
self.msg_queue = msg_queue
|
||||
|
||||
def run(self):
|
||||
# Set blinking status to true when we start, just so we make sure we get to an eyelid open state
|
||||
# no matter what.
|
||||
was_blinking = True
|
||||
while True:
|
||||
if self.cancellation_event.is_set():
|
||||
print("Exiting OSC Queue")
|
||||
return
|
||||
try:
|
||||
eye_info = self.msg_queue.get(block=True, timeout=0.1)
|
||||
except queue.Empty:
|
||||
continue
|
||||
# If we're not blinking, set position
|
||||
if not eye_info.blink:
|
||||
self.client.send_message("/avatar/parameters/RightEyeX", eye_info.x)
|
||||
self.client.send_message("/avatar/parameters/LeftEyeX", eye_info.x)
|
||||
self.client.send_message("/avatar/parameters/EyesY", eye_info.y)
|
||||
if was_blinking:
|
||||
self.client.send_message("/avatar/parameters/LeftEyeLid", float(0))
|
||||
self.client.send_message("/avatar/parameters/RightEyeLid", float(0))
|
||||
was_blinking = False
|
||||
else:
|
||||
self.client.send_message("/avatar/parameters/LeftEyeLid", float(1))
|
||||
self.client.send_message("/avatar/parameters/RightEyeLid", float(1))
|
||||
was_blinking = True
|
@ -1 +0,0 @@
|
||||
vcruntime140_1.dll
|
Binary file not shown.
@ -1,34 +0,0 @@
|
||||
|
||||
|
||||
"""""" # start delvewheel patch
|
||||
def _delvewheel_init_patch_0_0_15():
|
||||
import os
|
||||
import sys
|
||||
libs_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, 'pye3d.libs'))
|
||||
if sys.version_info[:2] >= (3, 8):
|
||||
if os.path.exists(os.path.join(sys.base_prefix, 'conda-meta')):
|
||||
# backup the state of the environment variable CONDA_DLL_SEARCH_MODIFICATION_ENABLE
|
||||
conda_dll_search_modification_enable = os.environ.get("CONDA_DLL_SEARCH_MODIFICATION_ENABLE")
|
||||
os.environ['CONDA_DLL_SEARCH_MODIFICATION_ENABLE']='1'
|
||||
|
||||
os.add_dll_directory(libs_dir)
|
||||
|
||||
if os.path.exists(os.path.join(sys.base_prefix, 'conda-meta')):
|
||||
# restore the state of the environment variable CONDA_DLL_SEARCH_MODIFICATION_ENABLE
|
||||
if conda_dll_search_modification_enable is None:
|
||||
os.environ.pop("CONDA_DLL_SEARCH_MODIFICATION_ENABLE", None)
|
||||
else:
|
||||
os.environ["CONDA_DLL_SEARCH_MODIFICATION_ENABLE"] = conda_dll_search_modification_enable
|
||||
else:
|
||||
from ctypes import WinDLL
|
||||
with open(os.path.join(libs_dir, '.load-order-pye3d-0.3.0')) as file:
|
||||
load_order = file.read().split()
|
||||
for lib in load_order:
|
||||
WinDLL(os.path.join(libs_dir, lib))
|
||||
|
||||
|
||||
_delvewheel_init_patch_0_0_15()
|
||||
del _delvewheel_init_patch_0_0_15
|
||||
# end delvewheel patch
|
||||
|
||||
__version__ = "0.3.0"
|
@ -1,6 +0,0 @@
|
||||
from typing import Tuple, NamedTuple
|
||||
|
||||
|
||||
class CameraModel(NamedTuple):
|
||||
focal_length: float
|
||||
resolution: Tuple[float, float]
|
@ -1,4 +0,0 @@
|
||||
import typing as T
|
||||
|
||||
_EYE_RADIUS_DEFAULT: float = 10.392304845413264
|
||||
DEFAULT_SPHERE_CENTER: T.Tuple[float, float, float] = (0.0, 0.0, 35.0)
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -1,725 +0,0 @@
|
||||
"""
|
||||
(*)~---------------------------------------------------------------------------
|
||||
Pupil - eye tracking platform
|
||||
Copyright (C) 2012-2019 Pupil Labs
|
||||
|
||||
Distributed under the terms of the GNU
|
||||
Lesser General Public License (LGPL v3.0).
|
||||
See COPYING and COPYING.LESSER for license details.
|
||||
---------------------------------------------------------------------------~(*)
|
||||
"""
|
||||
import enum
|
||||
import logging
|
||||
import traceback
|
||||
from typing import Dict, NamedTuple, Type
|
||||
|
||||
import numpy as np
|
||||
import cv2 # Todo: DELETE
|
||||
from .geometry.projections import (
|
||||
unproject_edges_to_sphere,
|
||||
project_point_into_image_plane,
|
||||
) # Todo: DELETE
|
||||
|
||||
from .camera import CameraModel
|
||||
from .constants import _EYE_RADIUS_DEFAULT
|
||||
from .cpp.pupil_detection_3d import get_edges
|
||||
from .cpp.pupil_detection_3d import search_on_sphere as search_on_sphere
|
||||
from .geometry.primitives import Circle, Ellipse, Sphere
|
||||
from .geometry.projections import (
|
||||
project_circle_into_image_plane,
|
||||
project_sphere_into_image_plane,
|
||||
)
|
||||
from .geometry.utilities import cart2sph, sph2cart
|
||||
from .kalman import KalmanFilter
|
||||
from .observation import (
|
||||
BinBufferedObservationStorage,
|
||||
BufferedObservationStorage,
|
||||
Observation,
|
||||
)
|
||||
from .eye_model import (
|
||||
SphereCenterEstimates,
|
||||
TwoSphereModelAbstract,
|
||||
TwoSphereModel,
|
||||
TwoSphereModelAsync,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DetectorMode(enum.Enum):
|
||||
blocking = TwoSphereModel
|
||||
asynchronous = TwoSphereModelAsync
|
||||
|
||||
@classmethod
|
||||
def from_name(cls, mode_name: str):
|
||||
return {mode.name: mode for mode in cls}[mode_name]
|
||||
|
||||
|
||||
def ellipse2dict(ellipse: Ellipse) -> Dict:
|
||||
return {
|
||||
"center": (
|
||||
ellipse.center[0],
|
||||
ellipse.center[1],
|
||||
),
|
||||
"axes": (
|
||||
ellipse.minor_radius,
|
||||
ellipse.major_radius,
|
||||
),
|
||||
"angle": ellipse.angle,
|
||||
}
|
||||
|
||||
|
||||
def circle2dict(circle: Circle) -> Dict:
|
||||
return {
|
||||
"center": (
|
||||
circle.center[0],
|
||||
circle.center[1],
|
||||
circle.center[2],
|
||||
),
|
||||
"normal": (
|
||||
circle.normal[0],
|
||||
circle.normal[1],
|
||||
circle.normal[2],
|
||||
),
|
||||
"radius": float(circle.radius),
|
||||
}
|
||||
|
||||
|
||||
class Prediction(NamedTuple):
|
||||
sphere_center: np.ndarray
|
||||
pupil_circle: Circle
|
||||
|
||||
|
||||
class Search3DResult(NamedTuple):
|
||||
circle: Circle
|
||||
confidence: float
|
||||
|
||||
|
||||
def sigmoid(x, baseline=0.1, amplitude=500.0, center=0.99, width=0.02):
|
||||
return baseline + amplitude * 1.0 / (1.0 + np.exp(-(x - center) / width))
|
||||
|
||||
|
||||
class Detector3D(object):
|
||||
def __init__(
|
||||
self,
|
||||
camera: CameraModel,
|
||||
threshold_swirski=0.7,
|
||||
threshold_kalman=0.98,
|
||||
threshold_short_term=0.8,
|
||||
threshold_long_term=0.98,
|
||||
long_term_buffer_size=30,
|
||||
long_term_forget_time=5,
|
||||
long_term_forget_observations=300,
|
||||
long_term_mode: DetectorMode = DetectorMode.blocking,
|
||||
model_update_interval_long_term=1.0,
|
||||
model_update_interval_ult_long_term=10.0,
|
||||
model_warmup_duration=5.0,
|
||||
calculate_rms_residual=False,
|
||||
):
|
||||
self._camera = camera
|
||||
self._long_term_mode = long_term_mode
|
||||
self._calculate_rms_residual = calculate_rms_residual
|
||||
# NOTE: changing settings after intialization can lead to inconsistent behavior
|
||||
# if .reset() is not called.
|
||||
self._settings = {
|
||||
"threshold_swirski": threshold_swirski,
|
||||
"threshold_kalman": threshold_kalman,
|
||||
"threshold_short_term": threshold_short_term,
|
||||
"threshold_long_term": threshold_long_term,
|
||||
"long_term_buffer_size": long_term_buffer_size,
|
||||
"long_term_forget_time": long_term_forget_time,
|
||||
"long_term_forget_observations": long_term_forget_observations,
|
||||
"model_update_interval_long_term": model_update_interval_long_term,
|
||||
"model_update_interval_ult_long_term": model_update_interval_ult_long_term,
|
||||
"model_warmup_duration": model_warmup_duration,
|
||||
}
|
||||
self.reset()
|
||||
logger.debug(
|
||||
f"{type(self)} initialized with "
|
||||
f"long_term_mode={long_term_mode} "
|
||||
f"calculate_rms_residual={calculate_rms_residual} "
|
||||
f"settings={self._settings}"
|
||||
)
|
||||
|
||||
@property
|
||||
def camera(self) -> CameraModel:
|
||||
return self._camera
|
||||
|
||||
@property
|
||||
def long_term_mode(self) -> DetectorMode:
|
||||
return self._long_term_mode
|
||||
|
||||
@long_term_mode.setter
|
||||
def long_term_mode(self, mode: DetectorMode):
|
||||
needs_reset = mode != self._long_term_mode
|
||||
self._long_term_mode = mode
|
||||
if needs_reset:
|
||||
self.reset()
|
||||
|
||||
@property
|
||||
def is_long_term_model_frozen(self) -> bool:
|
||||
# If _ult_long_term_schedule is paused or not does not actually matter. The
|
||||
# _ult_long_term_model is only used for fitting the _long_term_model. If the
|
||||
# _long_term_schedule is paused, the _long_term_model is not being fitted and
|
||||
# therefore the state of _ult_long_term_model will be ignored.
|
||||
return self._long_term_schedule.is_paused
|
||||
|
||||
@is_long_term_model_frozen.setter
|
||||
def is_long_term_model_frozen(self, should_be_frozen: bool) -> None:
|
||||
# We pause/resume _ult_long_term_schedule here as well to save CPU resources
|
||||
# while the _long_term_model is frozen.
|
||||
if should_be_frozen:
|
||||
self._long_term_schedule.pause()
|
||||
self._ult_long_term_schedule.pause()
|
||||
else:
|
||||
self._long_term_schedule.resume()
|
||||
self._ult_long_term_schedule.resume()
|
||||
|
||||
def reset_camera(self, camera: CameraModel):
|
||||
"""Change camera model and reset detector state."""
|
||||
self._camera = camera
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
self._cleanup_models()
|
||||
self._initialize_models(
|
||||
long_term_model_cls=self._long_term_mode.value,
|
||||
ultra_long_term_model_cls=self._long_term_mode.value,
|
||||
)
|
||||
self._long_term_schedule = _ModelUpdateSchedule(
|
||||
update_interval=self._settings["model_update_interval_long_term"],
|
||||
warmup_duration=self._settings["model_warmup_duration"],
|
||||
)
|
||||
self._ult_long_term_schedule = _ModelUpdateSchedule(
|
||||
update_interval=self._settings["model_update_interval_ult_long_term"],
|
||||
warmup_duration=self._settings["model_warmup_duration"],
|
||||
)
|
||||
|
||||
self.kalman_filter = KalmanFilter()
|
||||
|
||||
def _initialize_models(
|
||||
self,
|
||||
short_term_model_cls: Type[TwoSphereModelAbstract] = TwoSphereModel,
|
||||
long_term_model_cls: Type[TwoSphereModelAbstract] = TwoSphereModel,
|
||||
ultra_long_term_model_cls: Type[TwoSphereModelAbstract] = TwoSphereModel,
|
||||
):
|
||||
# Recreate all models. This is required in case any of the settings (incl
|
||||
# camera) changed in the meantime.
|
||||
self.short_term_model = short_term_model_cls(
|
||||
camera=self.camera,
|
||||
storage_cls=BufferedObservationStorage,
|
||||
storage_kwargs=dict(
|
||||
confidence_threshold=self._settings["threshold_short_term"],
|
||||
buffer_length=10,
|
||||
),
|
||||
)
|
||||
self.long_term_model = long_term_model_cls(
|
||||
camera=self.camera,
|
||||
storage_cls=BinBufferedObservationStorage,
|
||||
storage_kwargs=dict(
|
||||
camera=self.camera,
|
||||
confidence_threshold=self._settings["threshold_long_term"],
|
||||
n_bins_horizontal=10,
|
||||
bin_buffer_length=self._settings["long_term_buffer_size"],
|
||||
forget_min_observations=self._settings["long_term_forget_observations"],
|
||||
forget_min_time=self._settings["long_term_forget_time"],
|
||||
),
|
||||
)
|
||||
self.ultra_long_term_model = ultra_long_term_model_cls(
|
||||
camera=self.camera,
|
||||
storage_cls=BinBufferedObservationStorage,
|
||||
storage_kwargs=dict(
|
||||
camera=self.camera,
|
||||
confidence_threshold=self._settings["threshold_long_term"],
|
||||
n_bins_horizontal=10,
|
||||
bin_buffer_length=self._settings["long_term_buffer_size"],
|
||||
forget_min_observations=(
|
||||
2 * self._settings["long_term_forget_observations"]
|
||||
),
|
||||
forget_min_time=60,
|
||||
),
|
||||
)
|
||||
|
||||
def _cleanup_models(self):
|
||||
try:
|
||||
self.short_term_model.cleanup()
|
||||
self.long_term_model.cleanup()
|
||||
self.ultra_long_term_model.cleanup()
|
||||
except AttributeError:
|
||||
pass # models have not been initialized yet
|
||||
|
||||
def update_and_detect(
|
||||
self,
|
||||
pupil_datum: Dict,
|
||||
frame: np.ndarray,
|
||||
apply_refraction_correction: bool = True,
|
||||
debug: bool = False,
|
||||
):
|
||||
# update models
|
||||
observation = self._extract_observation(pupil_datum)
|
||||
self.update_models(observation)
|
||||
|
||||
# predict target variables
|
||||
sphere_center = self.long_term_model.sphere_center
|
||||
pupil_circle = self._predict_pupil_circle(observation, frame)
|
||||
prediction_uncorrected = Prediction(sphere_center, pupil_circle)
|
||||
|
||||
# apply refraction correction
|
||||
if apply_refraction_correction:
|
||||
pupil_circle = self.long_term_model.apply_refraction_correction(
|
||||
pupil_circle
|
||||
)
|
||||
sphere_center = self.long_term_model.corrected_sphere_center
|
||||
# Falls back to uncorrected version if correction is disabled
|
||||
prediction_corrected = Prediction(sphere_center, pupil_circle)
|
||||
|
||||
result = self._prepare_result(
|
||||
observation,
|
||||
prediction_uncorrected=prediction_uncorrected,
|
||||
prediction_corrected=prediction_corrected,
|
||||
)
|
||||
|
||||
if debug:
|
||||
result["debug_info"] = self._collect_debug_info()
|
||||
|
||||
return result
|
||||
|
||||
def update_models(self, observation: Observation):
|
||||
self.short_term_model.add_observation(observation)
|
||||
self.long_term_model.add_observation(observation)
|
||||
self.ultra_long_term_model.add_observation(observation)
|
||||
|
||||
if (
|
||||
self.short_term_model.n_observations <= 0
|
||||
or self.long_term_model.n_observations <= 0
|
||||
or self.ultra_long_term_model.n_observations <= 0
|
||||
):
|
||||
return
|
||||
|
||||
try:
|
||||
if self._ult_long_term_schedule.is_update_due(observation.timestamp):
|
||||
self.ultra_long_term_model.estimate_sphere_center(
|
||||
calculate_rms_residual=self._calculate_rms_residual
|
||||
)
|
||||
|
||||
if self._long_term_schedule.is_update_due(observation.timestamp):
|
||||
# update long term model with ultra long term bias
|
||||
long_term_estimate = self.long_term_model.estimate_sphere_center(
|
||||
prior_3d=self.ultra_long_term_model.sphere_center,
|
||||
prior_strength=0.1,
|
||||
calculate_rms_residual=self._calculate_rms_residual,
|
||||
)
|
||||
else:
|
||||
# use existing sphere center estimates
|
||||
long_term_estimate = SphereCenterEstimates(
|
||||
projected=self.long_term_model.projected_sphere_center,
|
||||
three_dim=self.long_term_model.sphere_center,
|
||||
rms_residual=self.long_term_model.rms_residual,
|
||||
)
|
||||
|
||||
# update short term model with help of long-term model
|
||||
# using 2d center for disambiguation and 3d center as prior bias
|
||||
# prior strength is set as a funcition of circularity of the 2D pupil
|
||||
# when frozen: do not update
|
||||
if not self.is_long_term_model_frozen:
|
||||
circularity_mean = self.short_term_model.mean_observation_circularity()
|
||||
self.short_term_model.estimate_sphere_center(
|
||||
from_2d=long_term_estimate.projected,
|
||||
prior_3d=long_term_estimate.three_dim,
|
||||
prior_strength=sigmoid(circularity_mean),
|
||||
calculate_rms_residual=self._calculate_rms_residual,
|
||||
)
|
||||
except Exception:
|
||||
# Known issues:
|
||||
# - Can raise numpy.linalg.LinAlgError: SVD did not converge
|
||||
logger.error("Error updating models:")
|
||||
logger.debug(traceback.format_exc())
|
||||
|
||||
def _extract_observation(self, pupil_datum: Dict) -> Observation:
|
||||
width, height = self.camera.resolution
|
||||
center = (
|
||||
pupil_datum["ellipse"]["center"][0] - width / 2,
|
||||
pupil_datum["ellipse"]["center"][1] - height / 2,
|
||||
)
|
||||
minor_radius = pupil_datum["ellipse"]["axes"][0] / 2.0
|
||||
major_radius = pupil_datum["ellipse"]["axes"][1] / 2.0
|
||||
angle = (pupil_datum["ellipse"]["angle"] - 90.0) * np.pi / 180.0
|
||||
ellipse = Ellipse(center, minor_radius, major_radius, angle)
|
||||
|
||||
return Observation(
|
||||
ellipse,
|
||||
pupil_datum["confidence"],
|
||||
pupil_datum["timestamp"],
|
||||
self.camera.focal_length,
|
||||
)
|
||||
|
||||
def _predict_pupil_circle(
|
||||
self, observation: Observation, frame: np.ndarray
|
||||
) -> Circle:
|
||||
# NOTE: General idea: predict pupil circle from long and short term models based
|
||||
# on current observation. Filter results with a kalman filter.
|
||||
|
||||
# Kalman filter needs to be queried every timestamp to update it internally.
|
||||
pupil_circle_kalman = self._predict_from_kalman_filter(observation.timestamp)
|
||||
|
||||
if observation.confidence > self._settings["threshold_swirski"]:
|
||||
# high-confidence observation, use to construct pupil circle from models
|
||||
|
||||
# short-term-model is best for estimating gaze direction (circle normal) if
|
||||
# one needs to assume slippage. long-term-model ist more stable for
|
||||
# positions (center and radius)
|
||||
long_term = self.long_term_model.predict_pupil_circle(observation)
|
||||
if self.is_long_term_model_frozen:
|
||||
normal = long_term.normal
|
||||
else:
|
||||
short_term = self.short_term_model.predict_pupil_circle(observation)
|
||||
normal = short_term.normal
|
||||
pupil_circle = Circle(
|
||||
normal=normal,
|
||||
center=long_term.center,
|
||||
radius=long_term.radius,
|
||||
)
|
||||
|
||||
else:
|
||||
# low confidence: use kalman prediction to search for circles in image
|
||||
pupil_circle, confidence_3d_search = self._predict_from_3d_search(
|
||||
frame, best_guess=pupil_circle_kalman
|
||||
)
|
||||
observation.confidence = confidence_3d_search
|
||||
|
||||
if observation.confidence > self._settings["threshold_kalman"]:
|
||||
# very-high-confidence: correct kalman filter
|
||||
self._correct_kalman_filter(pupil_circle)
|
||||
|
||||
return pupil_circle
|
||||
|
||||
def _predict_from_kalman_filter(self, timestamp):
|
||||
phi, theta, pupil_radius_kalman = self.kalman_filter.predict(timestamp)
|
||||
gaze_vector_kalman = sph2cart(phi, theta)
|
||||
pupil_center_kalman = (
|
||||
self.short_term_model.sphere_center
|
||||
+ _EYE_RADIUS_DEFAULT * gaze_vector_kalman
|
||||
)
|
||||
pupil_circle_kalman = Circle(
|
||||
pupil_center_kalman, gaze_vector_kalman, pupil_radius_kalman
|
||||
)
|
||||
return pupil_circle_kalman
|
||||
|
||||
def _correct_kalman_filter(self, observed_pupil_circle: Circle):
|
||||
if observed_pupil_circle.is_null():
|
||||
return
|
||||
|
||||
phi, theta, r = observed_pupil_circle.spherical_representation()
|
||||
self.kalman_filter.correct(phi, theta, r)
|
||||
|
||||
def _predict_from_3d_search(
|
||||
# TODO: Remove debug code
|
||||
self,
|
||||
frame: np.ndarray,
|
||||
best_guess: Circle,
|
||||
debug=False,
|
||||
) -> Search3DResult:
|
||||
no_result = Search3DResult(Circle.null(), 0.0)
|
||||
|
||||
if best_guess.is_null():
|
||||
return no_result
|
||||
|
||||
frame, frame_roi, edge_frame, edges, roi = get_edges(
|
||||
frame,
|
||||
best_guess.normal,
|
||||
best_guess.radius,
|
||||
self.long_term_model.sphere_center,
|
||||
_EYE_RADIUS_DEFAULT,
|
||||
self.camera.focal_length,
|
||||
self.camera.resolution,
|
||||
major_axis_factor=2.5,
|
||||
)
|
||||
|
||||
if len(edges) <= 0:
|
||||
return no_result
|
||||
|
||||
(gaze_vector, pupil_radius, final_edges, edges_on_sphere) = search_on_sphere(
|
||||
edges,
|
||||
best_guess.normal,
|
||||
best_guess.radius,
|
||||
self.long_term_model.sphere_center,
|
||||
_EYE_RADIUS_DEFAULT,
|
||||
self.camera.focal_length,
|
||||
self.camera.resolution,
|
||||
)
|
||||
|
||||
if debug:
|
||||
frame_ = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)
|
||||
try:
|
||||
for edge in edges_on_sphere:
|
||||
edge = project_point_into_image_plane(
|
||||
edge, self.camera.focal_length
|
||||
).astype(np.int)
|
||||
edge[0] += self.camera.resolution[0] / 2
|
||||
edge[1] += self.camera.resolution[1] / 2
|
||||
cv2.rectangle(
|
||||
frame_,
|
||||
(edge[0] - roi[2], edge[1] - roi[0]),
|
||||
(edge[0] + 1 - roi[2], edge[1] + 1 - roi[0]),
|
||||
(255, 0, 0),
|
||||
2,
|
||||
)
|
||||
|
||||
for edge in final_edges:
|
||||
edge = project_point_into_image_plane(
|
||||
edge, self.camera.focal_length
|
||||
).astype(np.int)
|
||||
edge[0] += self.camera.resolution[0] / 2
|
||||
edge[1] += self.camera.resolution[1] / 2
|
||||
cv2.rectangle(
|
||||
frame_,
|
||||
(edge[0] - roi[2], edge[1] - roi[0]),
|
||||
(edge[0] + 1 - roi[2], edge[1] + 1 - roi[0]),
|
||||
(255, 255, 255),
|
||||
1,
|
||||
)
|
||||
|
||||
cv2.imshow("", frame_)
|
||||
cv2.waitKey(1)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
pupil_center = (
|
||||
self.long_term_model.sphere_center + _EYE_RADIUS_DEFAULT * gaze_vector
|
||||
)
|
||||
pupil_circle = Circle(pupil_center, gaze_vector, pupil_radius)
|
||||
|
||||
if pupil_circle.is_null():
|
||||
confidence_3d_search = 0.0
|
||||
else:
|
||||
ellipse_2d = project_circle_into_image_plane(
|
||||
pupil_circle,
|
||||
focal_length=self.camera.focal_length,
|
||||
transform=False,
|
||||
width=self.camera.resolution[0],
|
||||
height=self.camera.resolution[1],
|
||||
)
|
||||
if ellipse_2d:
|
||||
circumference = ellipse_2d.circumference()
|
||||
confidence_3d_search = np.clip(
|
||||
len(final_edges) / circumference, 0.0, 1.0
|
||||
)
|
||||
else:
|
||||
confidence_3d_search = 0.0
|
||||
|
||||
return Search3DResult(pupil_circle, confidence_3d_search * 0.6)
|
||||
|
||||
def _prepare_result(
|
||||
self,
|
||||
observation: Observation,
|
||||
prediction_uncorrected: Prediction,
|
||||
prediction_corrected: Prediction,
|
||||
) -> Dict:
|
||||
"""[summary]
|
||||
|
||||
Args:
|
||||
observation (Observation): [description]
|
||||
prediction_uncorrected (Prediction): Used for 2d projections
|
||||
prediction_corrected (Prediction): Used for 3d data
|
||||
|
||||
Returns:
|
||||
Dict: pye3d pupil detection result
|
||||
"""
|
||||
|
||||
result = {
|
||||
"timestamp": observation.timestamp,
|
||||
"sphere": {
|
||||
"center": (
|
||||
prediction_corrected.sphere_center[0],
|
||||
prediction_corrected.sphere_center[1],
|
||||
prediction_corrected.sphere_center[2],
|
||||
),
|
||||
"radius": _EYE_RADIUS_DEFAULT,
|
||||
},
|
||||
}
|
||||
|
||||
eye_sphere_projected = project_sphere_into_image_plane(
|
||||
Sphere(prediction_uncorrected.sphere_center, _EYE_RADIUS_DEFAULT),
|
||||
transform=True,
|
||||
focal_length=self.camera.focal_length,
|
||||
width=self.camera.resolution[0],
|
||||
height=self.camera.resolution[1],
|
||||
)
|
||||
result["projected_sphere"] = ellipse2dict(eye_sphere_projected)
|
||||
|
||||
result["circle_3d"] = circle2dict(prediction_corrected.pupil_circle)
|
||||
|
||||
result["diameter_3d"] = prediction_corrected.pupil_circle.radius * 2
|
||||
|
||||
projected_pupil_circle = project_circle_into_image_plane(
|
||||
prediction_uncorrected.pupil_circle,
|
||||
focal_length=self.camera.focal_length,
|
||||
transform=True,
|
||||
width=self.camera.resolution[0],
|
||||
height=self.camera.resolution[1],
|
||||
)
|
||||
if not projected_pupil_circle:
|
||||
projected_pupil_circle = Ellipse(np.asarray([0.0, 0.0]), 0.0, 0.0, 0.0)
|
||||
|
||||
result["ellipse"] = ellipse2dict(projected_pupil_circle)
|
||||
result["location"] = result["ellipse"]["center"] # pupil center in pixels
|
||||
|
||||
# projected_pupil_circle is an OpenCV ellipse, i.e. major_radius is major diameter
|
||||
result["diameter"] = projected_pupil_circle.major_radius
|
||||
|
||||
result["confidence"] = observation.confidence
|
||||
|
||||
# Model confidence:
|
||||
# - Prior to version 0.1.0, model_confidence was fixed to 1.0 as there was no
|
||||
# way to estimate it
|
||||
# - Starting with version 0.1.0, model_confidence is 1.0 by default but set to
|
||||
# 0.1 if at least one model output exceeds its physiologically reasonable
|
||||
# range. These ranges also inform the input range for the refraction
|
||||
# correction function.
|
||||
# If the ranges are exceeded, it is likely that the model is either not fit
|
||||
# well or the 2d input ellipse was a false detection.
|
||||
model_confidence_default = 1.0
|
||||
model_confidence_out_of_range = 0.1
|
||||
model_confidence_phi_theta_nan = 0.0
|
||||
|
||||
result["model_confidence"] = model_confidence_default
|
||||
|
||||
phi, theta = cart2sph(prediction_corrected.pupil_circle.normal)
|
||||
if not np.any(np.isnan([phi, theta])):
|
||||
result["theta"] = theta
|
||||
result["phi"] = phi
|
||||
|
||||
is_phi_in_range = -80 <= np.rad2deg(phi) + 90.0 <= 80
|
||||
is_theta_in_range = -80 <= np.rad2deg(theta) - 90.0 <= 80
|
||||
if not is_phi_in_range or not is_theta_in_range:
|
||||
result["model_confidence"] = model_confidence_out_of_range
|
||||
else:
|
||||
result["theta"] = 0.0
|
||||
result["phi"] = 0.0
|
||||
result["model_confidence"] = model_confidence_phi_theta_nan
|
||||
|
||||
is_center_x_in_range = -10 <= prediction_corrected.sphere_center[0] <= 10
|
||||
is_center_y_in_range = -10 <= prediction_corrected.sphere_center[1] <= 10
|
||||
is_center_z_in_range = 20 <= prediction_corrected.sphere_center[2] <= 75
|
||||
is_diameter_in_range = 1.0 <= result["diameter_3d"] <= 9.0
|
||||
parameters_in_range = (
|
||||
is_center_x_in_range,
|
||||
is_center_y_in_range,
|
||||
is_center_z_in_range,
|
||||
is_diameter_in_range,
|
||||
)
|
||||
if not all(parameters_in_range):
|
||||
result["model_confidence"] = model_confidence_out_of_range
|
||||
|
||||
return result
|
||||
|
||||
def _collect_debug_info(self):
|
||||
debug_info = {}
|
||||
|
||||
projected_short_term = project_sphere_into_image_plane(
|
||||
Sphere(self.short_term_model.sphere_center, _EYE_RADIUS_DEFAULT),
|
||||
transform=True,
|
||||
focal_length=self.camera.focal_length,
|
||||
width=self.camera.resolution[0],
|
||||
height=self.camera.resolution[1],
|
||||
)
|
||||
projected_long_term = project_sphere_into_image_plane(
|
||||
Sphere(self.long_term_model.sphere_center, _EYE_RADIUS_DEFAULT),
|
||||
transform=True,
|
||||
focal_length=self.camera.focal_length,
|
||||
width=self.camera.resolution[0],
|
||||
height=self.camera.resolution[1],
|
||||
)
|
||||
projected_ultra_long_term = project_sphere_into_image_plane(
|
||||
Sphere(self.ultra_long_term_model.sphere_center, _EYE_RADIUS_DEFAULT),
|
||||
transform=True,
|
||||
focal_length=self.camera.focal_length,
|
||||
width=self.camera.resolution[0],
|
||||
height=self.camera.resolution[1],
|
||||
)
|
||||
debug_info["projected_short_term"] = ellipse2dict(projected_short_term)
|
||||
debug_info["projected_long_term"] = ellipse2dict(projected_long_term)
|
||||
debug_info["projected_ultra_long_term"] = ellipse2dict(
|
||||
projected_ultra_long_term
|
||||
)
|
||||
|
||||
try:
|
||||
bin_data = self.long_term_model.storage.get_bin_counts()
|
||||
max_bin_level = np.max(bin_data)
|
||||
if max_bin_level >= 0:
|
||||
bin_data = bin_data / max_bin_level
|
||||
bin_data = np.flip(bin_data, axis=0)
|
||||
debug_info["bin_data"] = bin_data.tolist()
|
||||
except AttributeError:
|
||||
debug_info["bin_data"] = []
|
||||
|
||||
# TODO: Pupil visualizer_pye3d.py attempts to draw Dierkes lines. Currently we
|
||||
# don't calculate them here, we could probably do that again. Based on which
|
||||
# model? Might be hard to do when things run in the background. We might have to
|
||||
# remove this from the visualizer_pye3d.py
|
||||
debug_info["Dierkes_lines"] = []
|
||||
|
||||
return debug_info
|
||||
|
||||
# pupil-detector interface: See base class implementation as reference:
|
||||
# https://github.com/pupil-labs/pupil-detectors/blob/master/src/pupil_detectors/detector_base.pyx
|
||||
|
||||
PUBLIC_PROPERTY_NAMES = ("is_long_term_model_frozen",)
|
||||
|
||||
def get_properties(self):
|
||||
return {
|
||||
property_name: getattr(self, property_name)
|
||||
for property_name in self.PUBLIC_PROPERTY_NAMES
|
||||
if hasattr(self, property_name)
|
||||
}
|
||||
|
||||
def update_properties(self, properties):
|
||||
keys_to_update = set(self.PUBLIC_PROPERTY_NAMES)
|
||||
keys_to_update.intersection_update(properties.keys())
|
||||
for key in keys_to_update:
|
||||
expected_type = type(getattr(self, key))
|
||||
value = properties[key]
|
||||
try:
|
||||
value = expected_type(value)
|
||||
except ValueError as e:
|
||||
raise ValueError(
|
||||
f"Value `{repr(value)}` for key `{key}` could not be converted to"
|
||||
f" expected type: {expected_type}"
|
||||
) from e
|
||||
setattr(self, key, value)
|
||||
|
||||
|
||||
class _ModelUpdateSchedule:
|
||||
def __init__(self, update_interval: float, warmup_duration: float = 5.0) -> None:
|
||||
self._update_interval = update_interval
|
||||
self._warmup_duration = warmup_duration
|
||||
self._warmup_start = None
|
||||
self._paused = False
|
||||
self._last_update = None
|
||||
|
||||
@property
|
||||
def is_paused(self) -> bool:
|
||||
return self._paused
|
||||
|
||||
def pause(self) -> None:
|
||||
self._paused = True
|
||||
|
||||
def resume(self) -> None:
|
||||
self._paused = False
|
||||
self._last_update = None
|
||||
|
||||
def is_update_due(self, current_time: float):
|
||||
if self._paused:
|
||||
return False
|
||||
if self._warmup_start is None:
|
||||
self._warmup_start = current_time
|
||||
return True
|
||||
if current_time - self._warmup_start < self._warmup_duration:
|
||||
return True
|
||||
if self._last_update is None:
|
||||
self._last_update = current_time
|
||||
return True
|
||||
if current_time - self._last_update > self._update_interval:
|
||||
self._last_update = current_time
|
||||
return True
|
||||
return False
|
@ -1,22 +0,0 @@
|
||||
"""
|
||||
(*)~---------------------------------------------------------------------------
|
||||
Pupil - eye tracking platform
|
||||
Copyright (C) 2012-2019 Pupil Labs
|
||||
|
||||
Distributed under the terms of the GNU
|
||||
Lesser General Public License (LGPL v3.0).
|
||||
See COPYING and COPYING.LESSER for license details.
|
||||
---------------------------------------------------------------------------~(*)
|
||||
"""
|
||||
|
||||
from .abstract import TwoSphereModelAbstract, SphereCenterEstimates
|
||||
from .base import TwoSphereModel
|
||||
from .asynchronous import TwoSphereModelAsync
|
||||
|
||||
|
||||
__all__ = [
|
||||
"TwoSphereModelAbstract",
|
||||
"TwoSphereModel",
|
||||
"TwoSphereModelAsync",
|
||||
"SphereCenterEstimates",
|
||||
]
|
@ -1,116 +0,0 @@
|
||||
"""
|
||||
(*)~---------------------------------------------------------------------------
|
||||
Pupil - eye tracking platform
|
||||
Copyright (C) 2012-2019 Pupil Labs
|
||||
|
||||
Distributed under the terms of the GNU
|
||||
Lesser General Public License (LGPL v3.0).
|
||||
See COPYING and COPYING.LESSER for license details.
|
||||
---------------------------------------------------------------------------~(*)
|
||||
"""
|
||||
import abc
|
||||
import typing as T
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..geometry.primitives import Circle
|
||||
from ..observation import Observation, ObservationStorage
|
||||
from ..camera import CameraModel
|
||||
|
||||
|
||||
class SphereCenterEstimates(T.NamedTuple):
|
||||
projected: np.ndarray
|
||||
three_dim: np.ndarray
|
||||
rms_residual: T.Optional[float] = None
|
||||
|
||||
|
||||
class TwoSphereModelAbstract(abc.ABC):
|
||||
@abc.abstractmethod
|
||||
def __init__(
|
||||
self,
|
||||
camera: CameraModel,
|
||||
storage_cls: T.Type[ObservationStorage] = None,
|
||||
storage_kwargs: T.Dict = None,
|
||||
):
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def add_observation(self, observation: Observation):
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def n_observations(self) -> int:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def sphere_center(self) -> np.ndarray:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def corrected_sphere_center(self) -> np.ndarray:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def projected_sphere_center(self) -> np.ndarray:
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def set_sphere_center(self, new_sphere_center: np.ndarray):
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def estimate_sphere_center(
|
||||
self,
|
||||
from_2d: T.Optional[np.ndarray] = None,
|
||||
prior_3d: T.Optional[np.ndarray] = None,
|
||||
prior_strength: float = 0.0,
|
||||
calculate_rms_residual: bool = False,
|
||||
) -> SphereCenterEstimates:
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def estimate_sphere_center_2d(self) -> np.ndarray:
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def estimate_sphere_center_3d(
|
||||
self,
|
||||
sphere_center_2d: np.ndarray,
|
||||
prior_3d: T.Optional[np.ndarray] = None,
|
||||
prior_strength: float = 0.0,
|
||||
calculate_rms_residual: bool = False,
|
||||
) -> T.Tuple[np.array, T.Optional[float]]:
|
||||
raise NotImplementedError
|
||||
|
||||
# GAZE PREDICTION
|
||||
@abc.abstractmethod
|
||||
def _extract_unproject_disambiguate(self, pupil_datum: T.Dict) -> Circle:
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def _disambiguate_circle_3d_pair(
|
||||
self, circle_3d_pair: T.Tuple[Circle, Circle]
|
||||
) -> Circle:
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def predict_pupil_circle(
|
||||
self, observation: Observation, use_unprojection: bool = False
|
||||
) -> Circle:
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def apply_refraction_correction(self, pupil_circle: Circle) -> Circle:
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def mean_observation_circularity(self) -> float:
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def cleanup(self):
|
||||
raise NotImplementedError
|
@ -1,320 +0,0 @@
|
||||
"""
|
||||
(*)~---------------------------------------------------------------------------
|
||||
Pupil - eye tracking platform
|
||||
Copyright (C) 2012-2019 Pupil Labs
|
||||
|
||||
Distributed under the terms of the GNU
|
||||
Lesser General Public License (LGPL v3.0).
|
||||
See COPYING and COPYING.LESSER for license details.
|
||||
---------------------------------------------------------------------------~(*)
|
||||
"""
|
||||
|
||||
import ctypes
|
||||
import logging
|
||||
import typing as T
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..constants import DEFAULT_SPHERE_CENTER
|
||||
from .abstract import (
|
||||
TwoSphereModelAbstract,
|
||||
CameraModel,
|
||||
Circle,
|
||||
Observation,
|
||||
ObservationStorage,
|
||||
SphereCenterEstimates,
|
||||
)
|
||||
from .background_helper import BackgroundProcess, mp
|
||||
from .base import TwoSphereModel
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TwoSphereModelAsync(TwoSphereModelAbstract):
|
||||
def __init__(
|
||||
self,
|
||||
camera: CameraModel,
|
||||
storage_cls: T.Type[ObservationStorage] = None,
|
||||
storage_kwargs: T.Dict = None,
|
||||
):
|
||||
synced_sphere_center = mp.Array(ctypes.c_double, 3)
|
||||
synced_corrected_sphere_center = mp.Array(ctypes.c_double, 3)
|
||||
synced_projected_sphere_center = mp.Array(ctypes.c_double, 2)
|
||||
synced_observation_count = mp.Value(ctypes.c_long)
|
||||
synced_rms_residual = mp.Value(ctypes.c_double)
|
||||
is_estimation_ongoing_flag = mp.Event()
|
||||
|
||||
self._frontend = _TwoSphereModelSyncedFrontend(
|
||||
synced_sphere_center,
|
||||
synced_corrected_sphere_center,
|
||||
synced_projected_sphere_center,
|
||||
synced_observation_count,
|
||||
synced_rms_residual,
|
||||
is_estimation_ongoing_flag,
|
||||
camera=camera,
|
||||
)
|
||||
self._backend_process = BackgroundProcess(
|
||||
function=self._process_relayed_commands,
|
||||
setup=self._setup_backend,
|
||||
setup_args=(
|
||||
synced_sphere_center,
|
||||
synced_corrected_sphere_center,
|
||||
synced_projected_sphere_center,
|
||||
synced_observation_count,
|
||||
synced_rms_residual,
|
||||
is_estimation_ongoing_flag,
|
||||
),
|
||||
setup_kwargs=dict(
|
||||
camera=camera,
|
||||
storage_cls=storage_cls,
|
||||
storage_kwargs=storage_kwargs,
|
||||
),
|
||||
cleanup=self._cleanup_backend,
|
||||
log_handlers=logging.getLogger().handlers,
|
||||
)
|
||||
|
||||
@property
|
||||
def sphere_center(self) -> np.ndarray:
|
||||
return self._frontend.sphere_center
|
||||
|
||||
@property
|
||||
def corrected_sphere_center(self) -> np.ndarray:
|
||||
return self._frontend.corrected_sphere_center
|
||||
|
||||
@property
|
||||
def projected_sphere_center(self) -> np.ndarray:
|
||||
return self._frontend.projected_sphere_center
|
||||
|
||||
@property
|
||||
def rms_residual(self) -> float:
|
||||
return self._frontend.rms_residual
|
||||
|
||||
def relay_command(self, function_name: str, *args, **kwargs):
|
||||
self._backend_process.send(function_name, *args, **kwargs)
|
||||
|
||||
@staticmethod
|
||||
def _process_relayed_commands(
|
||||
backend: "_TwoSphereModelSyncedBackend", function_name: str, *args, **kwargs
|
||||
):
|
||||
function = getattr(backend, function_name)
|
||||
return function(*args, **kwargs)
|
||||
|
||||
@staticmethod
|
||||
def _setup_backend(*args, **kwargs) -> "_TwoSphereModelSyncedBackend":
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.debug(f"Setting up backend: {args}, {kwargs}")
|
||||
return _TwoSphereModelSyncedBackend(*args, **kwargs)
|
||||
|
||||
@staticmethod
|
||||
def _cleanup_backend(backend: "_TwoSphereModelSyncedBackend"):
|
||||
backend.cleanup()
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.debug(f"Backend cleaned")
|
||||
|
||||
def add_observation(self, observation: Observation):
|
||||
self.relay_command("add_observation", observation)
|
||||
|
||||
@property
|
||||
def n_observations(self) -> int:
|
||||
return self._frontend.n_observations
|
||||
|
||||
def set_sphere_center(self, new_sphere_center: np.ndarray):
|
||||
raise NotImplementedError
|
||||
|
||||
def estimate_sphere_center(
|
||||
self,
|
||||
from_2d: T.Optional[np.ndarray] = None,
|
||||
prior_3d: T.Optional[np.ndarray] = None,
|
||||
prior_strength: float = 0.0,
|
||||
calculate_rms_residual=False,
|
||||
) -> SphereCenterEstimates:
|
||||
if not self._frontend._is_estimation_ongoing_flag.is_set():
|
||||
self.relay_command(
|
||||
"estimate_sphere_center",
|
||||
from_2d,
|
||||
prior_3d,
|
||||
prior_strength,
|
||||
calculate_rms_residual,
|
||||
)
|
||||
self._frontend._is_estimation_ongoing_flag.set()
|
||||
projected_sphere_center = self._frontend.projected_sphere_center
|
||||
sphere_center = self._frontend.sphere_center
|
||||
rms_residual = self._frontend.rms_residual
|
||||
return SphereCenterEstimates(
|
||||
projected_sphere_center, sphere_center, rms_residual
|
||||
)
|
||||
|
||||
def estimate_sphere_center_2d(self) -> np.ndarray:
|
||||
raise NotImplementedError
|
||||
|
||||
def estimate_sphere_center_3d(
|
||||
self,
|
||||
sphere_center_2d: np.ndarray,
|
||||
prior_3d: T.Optional[np.ndarray] = None,
|
||||
prior_strength: float = 0.0,
|
||||
calculate_rms_residual: bool = False,
|
||||
) -> T.Tuple[np.array, T.Optional[float]]:
|
||||
raise NotImplementedError
|
||||
|
||||
# GAZE PREDICTION
|
||||
def _extract_unproject_disambiguate(self, pupil_datum: T.Dict) -> Circle:
|
||||
return self._frontend._extract_unproject_disambiguate(pupil_datum)
|
||||
|
||||
def _disambiguate_circle_3d_pair(
|
||||
self, circle_3d_pair: T.Tuple[Circle, Circle]
|
||||
) -> Circle:
|
||||
return self._frontend._disambiguate_circle_3d_pair(circle_3d_pair)
|
||||
|
||||
def predict_pupil_circle(
|
||||
self, observation: Observation, use_unprojection: bool = False
|
||||
) -> Circle:
|
||||
return self._frontend.predict_pupil_circle(observation, use_unprojection)
|
||||
|
||||
def apply_refraction_correction(self, pupil_circle: Circle) -> Circle:
|
||||
return self._frontend.apply_refraction_correction(pupil_circle)
|
||||
|
||||
def cleanup(self):
|
||||
logger.debug("Cancelling backend process")
|
||||
self._backend_process.cancel()
|
||||
self._frontend.cleanup()
|
||||
|
||||
def mean_observation_circularity(self) -> float:
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class _TwoSphereModelSyncedAbstract(TwoSphereModel):
|
||||
def __init__(
|
||||
self,
|
||||
synced_sphere_center: mp.Array, # c_double_Array_3
|
||||
synced_corrected_sphere_center: mp.Array, # c_double_Array_3
|
||||
synced_projected_sphere_center: mp.Array, # c_double_Array_2
|
||||
synced_observation_count: mp.Value, # c_long
|
||||
synced_rms_residual: mp.Value, # c_double
|
||||
flag_is_estimation_ongoing: mp.Event,
|
||||
**kwargs,
|
||||
):
|
||||
self._synced_sphere_center = synced_sphere_center
|
||||
self._synced_corrected_sphere_center = synced_corrected_sphere_center
|
||||
self._synced_projected_sphere_center = synced_projected_sphere_center
|
||||
self._synced_observation_count = synced_observation_count
|
||||
self._synced_rms_residual = synced_rms_residual
|
||||
self._is_estimation_ongoing_flag = flag_is_estimation_ongoing
|
||||
super().__init__(**kwargs)
|
||||
|
||||
@property
|
||||
def sphere_center(self):
|
||||
with self._synced_sphere_center:
|
||||
return np.array(self._synced_sphere_center.get_obj())
|
||||
|
||||
@sphere_center.setter
|
||||
def sphere_center(self, coordinates: np.array):
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def corrected_sphere_center(self):
|
||||
with self._synced_corrected_sphere_center:
|
||||
return np.array(self._synced_corrected_sphere_center.get_obj())
|
||||
|
||||
@corrected_sphere_center.setter
|
||||
def corrected_sphere_center(self, coordinates: np.array):
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def projected_sphere_center(self):
|
||||
with self._synced_projected_sphere_center:
|
||||
return np.array(self._synced_projected_sphere_center.get_obj())
|
||||
|
||||
@projected_sphere_center.setter
|
||||
def projected_sphere_center(self, coordinates: np.array):
|
||||
raise NotImplementedError
|
||||
|
||||
def mean_observation_circularity(self) -> float:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def rms_residual(self) -> float:
|
||||
with self._synced_rms_residual:
|
||||
return self._synced_rms_residual.value
|
||||
|
||||
@rms_residual.setter
|
||||
def rms_residual(self, residual: float):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class _TwoSphereModelSyncedFrontend(_TwoSphereModelSyncedAbstract):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
del self.storage # There is no storage in the frontend
|
||||
|
||||
def _set_default_model_params(self):
|
||||
with self._synced_sphere_center:
|
||||
self._synced_sphere_center[:] = DEFAULT_SPHERE_CENTER
|
||||
|
||||
corrected_sphere_center = self.refractionizer.correct_sphere_center(
|
||||
np.asarray([[*self.sphere_center]])
|
||||
)[0]
|
||||
with self._synced_corrected_sphere_center:
|
||||
self._synced_corrected_sphere_center[:] = corrected_sphere_center
|
||||
|
||||
@property
|
||||
def n_observations(self) -> int:
|
||||
return self._synced_observation_count.value
|
||||
|
||||
|
||||
class _TwoSphereModelSyncedBackend(_TwoSphereModelSyncedAbstract):
|
||||
@property
|
||||
def sphere_center(self):
|
||||
return super().sphere_center
|
||||
|
||||
@sphere_center.setter
|
||||
def sphere_center(self, coordinates: np.array):
|
||||
with self._synced_sphere_center:
|
||||
self._synced_sphere_center[:] = coordinates
|
||||
|
||||
@property
|
||||
def corrected_sphere_center(self):
|
||||
return super().corrected_sphere_center
|
||||
|
||||
@corrected_sphere_center.setter
|
||||
def corrected_sphere_center(self, coordinates: np.array):
|
||||
with self._synced_corrected_sphere_center:
|
||||
self._synced_corrected_sphere_center[:] = coordinates
|
||||
|
||||
@property
|
||||
def projected_sphere_center(self):
|
||||
return super().projected_sphere_center
|
||||
|
||||
@projected_sphere_center.setter
|
||||
def projected_sphere_center(self, coordinates: np.array):
|
||||
with self._synced_projected_sphere_center:
|
||||
self._synced_projected_sphere_center[:] = coordinates
|
||||
|
||||
def add_observation(self, observation: Observation):
|
||||
super().add_observation(observation=observation)
|
||||
n_observations = super().n_observations
|
||||
with self._synced_observation_count:
|
||||
self._synced_observation_count.value = n_observations
|
||||
|
||||
@property
|
||||
def n_observations(self) -> int:
|
||||
return self._synced_observation_count.value
|
||||
|
||||
def estimate_sphere_center(self, *args, **kwargs):
|
||||
result = super().estimate_sphere_center(*args, **kwargs)
|
||||
self._is_estimation_ongoing_flag.clear()
|
||||
return result
|
||||
|
||||
def estimate_sphere_center_2d(self) -> np.ndarray:
|
||||
estimated: np.ndarray = super().estimate_sphere_center_2d()
|
||||
self.projected_sphere_center = estimated
|
||||
return estimated
|
||||
|
||||
@property
|
||||
def rms_residual(self) -> float:
|
||||
with self._synced_rms_residual:
|
||||
return self._synced_rms_residual.value
|
||||
|
||||
@rms_residual.setter
|
||||
def rms_residual(self, residual: float):
|
||||
with self._synced_rms_residual:
|
||||
self._synced_rms_residual.value = residual
|
@ -1,164 +0,0 @@
|
||||
"""
|
||||
(*)~---------------------------------------------------------------------------
|
||||
Pupil - eye tracking platform
|
||||
Copyright (C) 2012-2020 Pupil Labs
|
||||
|
||||
Distributed under the terms of the GNU
|
||||
Lesser General Public License (LGPL v3.0).
|
||||
See COPYING and COPYING.LESSER for license details.
|
||||
---------------------------------------------------------------------------~(*)
|
||||
"""
|
||||
|
||||
import logging
|
||||
import multiprocessing as mp
|
||||
import queue
|
||||
import signal
|
||||
import time
|
||||
from ctypes import c_bool
|
||||
from logging import Handler
|
||||
from logging.handlers import QueueHandler, QueueListener
|
||||
import traceback
|
||||
from typing import Any, Callable, Dict, Iterable, Optional, Tuple, TypeVar
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
WorkerSetupResult = TypeVar("WorkerSetupResult")
|
||||
WorkerFunctionResult = TypeVar("WorkerFunctionResult")
|
||||
|
||||
|
||||
class BackgroundProcess:
|
||||
class StoppedError(Exception):
|
||||
"""Interaction with a BackgroundProcess that was stopped."""
|
||||
|
||||
class NothingToReceiveError(Exception):
|
||||
"""Trying to receive data from BackgroundProcess without sending input first."""
|
||||
|
||||
class MultipleSendError(Exception):
|
||||
"""Trying to send data without first receiving previous output."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
setup: Callable[..., WorkerSetupResult],
|
||||
function: Callable[[WorkerSetupResult], WorkerFunctionResult],
|
||||
cleanup: Callable[[WorkerSetupResult], None],
|
||||
setup_args: Optional[Tuple] = None,
|
||||
setup_kwargs: Optional[Dict] = None,
|
||||
log_handlers: Iterable[Handler] = (),
|
||||
):
|
||||
self._running = True
|
||||
|
||||
self._task_queue = mp.Queue(maxsize=500) # TODO: figure out good value
|
||||
|
||||
logging_queue = mp.Queue()
|
||||
self._log_listener = QueueListener(logging_queue, *log_handlers)
|
||||
self._log_listener.start()
|
||||
|
||||
self._should_terminate_flag = mp.Value(c_bool, 0)
|
||||
|
||||
self._process = mp.Process(
|
||||
name="Pye3D Background Process",
|
||||
daemon=True,
|
||||
target=BackgroundProcess._worker,
|
||||
kwargs=dict(
|
||||
setup=setup,
|
||||
function=function,
|
||||
cleanup=cleanup,
|
||||
task_queue=self._task_queue,
|
||||
should_terminate_flag=self._should_terminate_flag,
|
||||
logging_queue=logging_queue,
|
||||
setup_args=setup_args if setup_args else (),
|
||||
setup_kwargs=setup_kwargs if setup_kwargs else {},
|
||||
),
|
||||
)
|
||||
self._process.start()
|
||||
|
||||
@property
|
||||
def running(self) -> bool:
|
||||
"""Whether background task is running (not necessarily doing work)."""
|
||||
return self._running and self._process.is_alive()
|
||||
|
||||
def send(self, *args: Tuple[Any], **kwargs: Dict[Any, Any]):
|
||||
"""Send data to background process for processing.
|
||||
Raises StoppedError when called on a stopped process.
|
||||
"""
|
||||
|
||||
if not self.running:
|
||||
logger.error("Background process was closed previously!")
|
||||
raise BackgroundProcess.StoppedError()
|
||||
|
||||
try:
|
||||
self._task_queue.put_nowait({"args": args, "kwargs": kwargs})
|
||||
except queue.Full:
|
||||
logger.debug(f"Dropping task! args: {args}, kwargs: {kwargs}")
|
||||
|
||||
def cancel(self, timeout=-1):
|
||||
"""Stop process as soon as current task is finished."""
|
||||
|
||||
self._should_terminate_flag.value = 1
|
||||
if self.running:
|
||||
self._task_queue.close()
|
||||
self._task_queue.cancel_join_thread()
|
||||
self._task_queue.join_thread()
|
||||
self._process.join(timeout)
|
||||
self._running = False
|
||||
self._log_listener.stop()
|
||||
|
||||
@staticmethod
|
||||
def _install_sigint_interception():
|
||||
def interrupt_handler(sig, frame):
|
||||
import traceback
|
||||
|
||||
trace = traceback.format_stack(f=frame)
|
||||
logger.debug(f"Caught (and dropping) signal {sig} in:\n" + "".join(trace))
|
||||
|
||||
signal.signal(signal.SIGINT, interrupt_handler)
|
||||
|
||||
@staticmethod
|
||||
def _worker(
|
||||
setup: Callable[..., WorkerSetupResult],
|
||||
function: Callable[[WorkerSetupResult], Any],
|
||||
cleanup: Callable[[WorkerSetupResult], None],
|
||||
task_queue: mp.Queue,
|
||||
should_terminate_flag: mp.Value,
|
||||
logging_queue: mp.Queue,
|
||||
setup_args: Tuple,
|
||||
setup_kwargs: Dict,
|
||||
):
|
||||
log_queue_handler = QueueHandler(logging_queue)
|
||||
logger = logging.getLogger()
|
||||
logger.setLevel(logging.NOTSET)
|
||||
logger.addHandler(log_queue_handler)
|
||||
|
||||
# Intercept SIGINT (ctrl-c), do required cleanup in foreground process!
|
||||
BackgroundProcess._install_sigint_interception()
|
||||
|
||||
setup_result: WorkerSetupResult = setup(*setup_args, **setup_kwargs)
|
||||
|
||||
while not should_terminate_flag.value:
|
||||
try:
|
||||
params = task_queue.get(block=True, timeout=0.1)
|
||||
args = params["args"]
|
||||
kwargs = params["kwargs"]
|
||||
except queue.Empty:
|
||||
continue
|
||||
# except EOFError:
|
||||
# logger.info("Pipe was closed from foreground process .")
|
||||
# break
|
||||
|
||||
try:
|
||||
t0 = time.perf_counter()
|
||||
function(setup_result, *args, **kwargs)
|
||||
t1 = time.perf_counter()
|
||||
# logger.debug(f"Finished background calculation in {(t1 - t0):.2}s")
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error executing background process with parameters {params}:\n{e}"
|
||||
)
|
||||
logger.debug(traceback.format_exc())
|
||||
break
|
||||
else:
|
||||
logger.info("Background process received termination signal.")
|
||||
|
||||
cleanup(setup_result)
|
||||
|
||||
logger.info("Stopping background process.")
|
||||
logger.removeHandler(log_queue_handler)
|
@ -1,297 +0,0 @@
|
||||
"""
|
||||
(*)~---------------------------------------------------------------------------
|
||||
Pupil - eye tracking platform
|
||||
Copyright (C) 2012-2019 Pupil Labs
|
||||
|
||||
Distributed under the terms of the GNU
|
||||
Lesser General Public License (LGPL v3.0).
|
||||
See COPYING and COPYING.LESSER for license details.
|
||||
---------------------------------------------------------------------------~(*)
|
||||
"""
|
||||
import logging
|
||||
import typing as T
|
||||
|
||||
import numpy as np
|
||||
|
||||
from .abstract import TwoSphereModelAbstract, SphereCenterEstimates
|
||||
from ..camera import CameraModel
|
||||
from ..constants import _EYE_RADIUS_DEFAULT, DEFAULT_SPHERE_CENTER
|
||||
from ..geometry.intersections import nearest_point_on_sphere_to_line
|
||||
from ..geometry.primitives import Circle, Line
|
||||
from ..geometry.projections import (
|
||||
project_line_into_image_plane,
|
||||
project_point_into_image_plane,
|
||||
unproject_ellipse,
|
||||
)
|
||||
from ..geometry.utilities import normalize
|
||||
from ..observation import BasicStorage, Observation, ObservationStorage
|
||||
from ..refraction import Refractionizer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TwoSphereModel(TwoSphereModelAbstract):
|
||||
def __init__(
|
||||
self,
|
||||
camera: CameraModel,
|
||||
storage_cls: T.Type[ObservationStorage] = None,
|
||||
storage_kwargs: T.Dict = None,
|
||||
):
|
||||
if storage_cls:
|
||||
kwargs = storage_kwargs if storage_kwargs is not None else {}
|
||||
self.storage = storage_cls(**kwargs)
|
||||
else:
|
||||
self.storage = BasicStorage()
|
||||
self.camera = camera
|
||||
|
||||
self.refractionizer = Refractionizer()
|
||||
self._set_default_model_params()
|
||||
|
||||
@property
|
||||
def sphere_center(self) -> np.ndarray:
|
||||
return self._sphere_center
|
||||
|
||||
@sphere_center.setter
|
||||
def sphere_center(self, coordinates: np.ndarray):
|
||||
self._sphere_center = coordinates
|
||||
|
||||
@property
|
||||
def corrected_sphere_center(self) -> np.ndarray:
|
||||
return self._corrected_sphere_center
|
||||
|
||||
@corrected_sphere_center.setter
|
||||
def corrected_sphere_center(self, coordinates: np.ndarray):
|
||||
self._corrected_sphere_center = coordinates
|
||||
|
||||
@property
|
||||
def projected_sphere_center(self) -> np.ndarray:
|
||||
return self._projected_sphere_center
|
||||
|
||||
@projected_sphere_center.setter
|
||||
def projected_sphere_center(self, projected_sphere_center: np.ndarray):
|
||||
self._projected_sphere_center = projected_sphere_center
|
||||
|
||||
def _set_default_model_params(self):
|
||||
# Overwrite in subclasses that do not allow setting these attributes
|
||||
self._sphere_center = np.asarray(DEFAULT_SPHERE_CENTER)
|
||||
self._corrected_sphere_center = self.refractionizer.correct_sphere_center(
|
||||
np.asarray([[*self.sphere_center]])
|
||||
)[0]
|
||||
self.rms_residual = np.nan
|
||||
|
||||
def add_observation(self, observation: Observation):
|
||||
self.storage.add(observation)
|
||||
|
||||
@property
|
||||
def n_observations(self) -> int:
|
||||
return self.storage.count()
|
||||
|
||||
def set_sphere_center(self, new_sphere_center):
|
||||
self.sphere_center = new_sphere_center
|
||||
self.corrected_sphere_center = self.refractionizer.correct_sphere_center(
|
||||
np.asarray([[*self.sphere_center]])
|
||||
)[0]
|
||||
|
||||
def estimate_sphere_center(
|
||||
self,
|
||||
from_2d=None,
|
||||
prior_3d=None,
|
||||
prior_strength=0.0,
|
||||
calculate_rms_residual=False,
|
||||
):
|
||||
self.projected_sphere_center = (
|
||||
from_2d if from_2d is not None else self.estimate_sphere_center_2d()
|
||||
)
|
||||
sphere_center, rms_residual = self.estimate_sphere_center_3d(
|
||||
self.projected_sphere_center,
|
||||
prior_3d,
|
||||
prior_strength,
|
||||
calculate_rms_residual=calculate_rms_residual,
|
||||
)
|
||||
self.set_sphere_center(sphere_center)
|
||||
self.rms_residual = rms_residual if rms_residual is not None else float("nan")
|
||||
return SphereCenterEstimates(
|
||||
self.projected_sphere_center, sphere_center, rms_residual
|
||||
)
|
||||
|
||||
def estimate_sphere_center_2d(self):
|
||||
observations = self.storage.observations
|
||||
|
||||
# slightly faster than np.array
|
||||
aux_2d = np.concatenate([obs.aux_2d for obs in observations])
|
||||
aux_2d.shape = -1, 2, 3
|
||||
|
||||
# Estimate projected sphere center by nearest intersection of 2d gaze lines
|
||||
sum_aux_2d = aux_2d.sum(axis=0)
|
||||
projected_sphere_center = np.linalg.pinv(sum_aux_2d[:2, :2]) @ sum_aux_2d[:2, 2]
|
||||
|
||||
return projected_sphere_center
|
||||
|
||||
def estimate_sphere_center_3d(
|
||||
self,
|
||||
sphere_center_2d,
|
||||
prior_3d=None,
|
||||
prior_strength=0.0,
|
||||
calculate_rms_residual=False,
|
||||
) -> T.Tuple[np.array, T.Optional[float]]:
|
||||
observations, aux_3d, gaze_2d = self._prep_data()
|
||||
sum_aux_3d, disamb_indices, aux_3d_disamb = self._disambiguate_dierkes_lines(
|
||||
aux_3d, gaze_2d, sphere_center_2d
|
||||
)
|
||||
sphere_center = self._calc_sphere_center(sum_aux_3d, prior_3d, prior_strength)
|
||||
|
||||
rms_residual = (
|
||||
self._calc_rms_residual(
|
||||
observations, disamb_indices, sphere_center, aux_3d_disamb
|
||||
)
|
||||
if calculate_rms_residual
|
||||
else None
|
||||
)
|
||||
|
||||
return sphere_center, rms_residual
|
||||
|
||||
def _prep_data(self):
|
||||
observations = self.storage.observations
|
||||
aux_3d = np.concatenate([obs.aux_3d for obs in observations])
|
||||
aux_3d.shape = -1, 2, 3, 4
|
||||
gaze_2d = np.concatenate([obs.gaze_2d_line for obs in observations])
|
||||
gaze_2d.shape = -1, 4
|
||||
return observations, aux_3d, gaze_2d
|
||||
|
||||
def _disambiguate_dierkes_lines(self, aux_3d, gaze_2d, sphere_center_2d):
|
||||
# Disambiguate Dierkes lines
|
||||
# We want gaze_2d to points towards the sphere center. gaze_2d was collected
|
||||
# from Dierkes[0]. If it points into the correct direction, we know that
|
||||
# Dierkes[0] is the correct one to use, otherwise we need to use Dierkes[1]. We
|
||||
# can check that with the sign of the dot product.
|
||||
gaze_2d_origins = gaze_2d[:, :2]
|
||||
gaze_2d_directions = gaze_2d[:, 2:]
|
||||
gaze_2d_towards_center = gaze_2d_origins - sphere_center_2d
|
||||
|
||||
dot_products = np.sum(gaze_2d_towards_center * gaze_2d_directions, axis=1)
|
||||
disambiguation_indices = np.where(dot_products < 0, 1, 0)
|
||||
|
||||
obs_idc = np.arange(disambiguation_indices.shape[0])
|
||||
aux_3d_disambiguated = aux_3d[obs_idc, disambiguation_indices, :, :]
|
||||
|
||||
# Estimate sphere center by nearest intersection of Dierkes lines
|
||||
sum_aux_3d = aux_3d_disambiguated.sum(axis=0)
|
||||
return sum_aux_3d, disambiguation_indices, aux_3d_disambiguated
|
||||
|
||||
def _calc_sphere_center(self, sum_aux_3d, prior_3d=None, prior_strength=0.0):
|
||||
matrix = sum_aux_3d[:3, :3]
|
||||
try:
|
||||
if prior_3d is None:
|
||||
return np.linalg.pinv(matrix) @ sum_aux_3d[:3, 3]
|
||||
else:
|
||||
return np.linalg.pinv(matrix + prior_strength * np.eye(3)) @ (
|
||||
sum_aux_3d[:3, 3] + prior_strength * prior_3d
|
||||
)
|
||||
except np.linalg.LinAlgError:
|
||||
# happens if lines are parallel, very rare
|
||||
return DEFAULT_SPHERE_CENTER
|
||||
|
||||
def _calc_rms_residual(
|
||||
self, observations, disamb_indices, sphere_center, aux_3d_disamb
|
||||
):
|
||||
# Here we use eq. (10) in https://docplayer.net/21072949-Least-squares-intersection-of-lines.html.
|
||||
origins_dierkes_lines = np.array(
|
||||
[
|
||||
obs.get_Dierkes_line(idx).origin
|
||||
for obs, idx in zip(observations, disamb_indices)
|
||||
]
|
||||
)
|
||||
origins_dierkes_lines.shape = -1, 3, 1
|
||||
deltas = origins_dierkes_lines - sphere_center[:, np.newaxis]
|
||||
tmp = np.einsum("ijk,ikl->ijl", aux_3d_disamb[:, :3, :3], deltas)
|
||||
squared_residuals = np.einsum(
|
||||
"ikj,ijk->i", np.transpose(deltas, (0, 2, 1)), tmp
|
||||
)
|
||||
rms_residual = np.clip(squared_residuals, 0.0, None)
|
||||
rms_residual = np.mean(np.sqrt(rms_residual))
|
||||
return rms_residual
|
||||
|
||||
# GAZE PREDICTION
|
||||
def _extract_unproject_disambiguate(self, pupil_datum):
|
||||
ellipse = self._extract_ellipse(pupil_datum)
|
||||
circle_3d_pair = unproject_ellipse(ellipse, self.camera.focal_length)
|
||||
if circle_3d_pair:
|
||||
circle_3d = self._disambiguate_circle_3d_pair(circle_3d_pair)
|
||||
else:
|
||||
circle_3d = Circle([0.0, 0.0, 0.0], [0.0, 0.0, -1.0], 0.0)
|
||||
return circle_3d
|
||||
|
||||
def _disambiguate_circle_3d_pair(self, circle_3d_pair):
|
||||
circle_center_2d = project_point_into_image_plane(
|
||||
circle_3d_pair[0].center, self.camera.focal_length
|
||||
)
|
||||
circle_normal_2d = normalize(
|
||||
project_line_into_image_plane(
|
||||
Line(circle_3d_pair[0].center, circle_3d_pair[0].normal),
|
||||
self.camera.focal_length,
|
||||
).direction
|
||||
)
|
||||
sphere_center_2d = project_point_into_image_plane(
|
||||
self.sphere_center, self.camera.focal_length
|
||||
)
|
||||
|
||||
if np.dot(circle_center_2d - sphere_center_2d, circle_normal_2d) >= 0:
|
||||
return circle_3d_pair[0]
|
||||
else:
|
||||
return circle_3d_pair[1]
|
||||
|
||||
def predict_pupil_circle(
|
||||
self, observation: Observation, use_unprojection: bool = False
|
||||
) -> Circle:
|
||||
if observation.invalid:
|
||||
return Circle.null()
|
||||
|
||||
circle_3d = self._disambiguate_circle_3d_pair(observation.circle_3d_pair)
|
||||
unprojection_depth = np.linalg.norm(circle_3d.center)
|
||||
direction = circle_3d.center / unprojection_depth
|
||||
|
||||
nearest_point_on_sphere = nearest_point_on_sphere_to_line(
|
||||
self.sphere_center, _EYE_RADIUS_DEFAULT, [0.0, 0.0, 0.0], direction
|
||||
)
|
||||
|
||||
if use_unprojection:
|
||||
gaze_vector = circle_3d.normal
|
||||
else:
|
||||
gaze_vector = normalize(nearest_point_on_sphere - self.sphere_center)
|
||||
|
||||
radius = np.linalg.norm(nearest_point_on_sphere) / unprojection_depth
|
||||
pupil_circle = Circle(nearest_point_on_sphere, gaze_vector, radius)
|
||||
return pupil_circle
|
||||
|
||||
def apply_refraction_correction(self, pupil_circle):
|
||||
input_features = np.asarray(
|
||||
[[*self.sphere_center, *pupil_circle.normal, pupil_circle.radius]]
|
||||
)
|
||||
refraction_corrected_params = self.refractionizer.correct_pupil_circle(
|
||||
input_features
|
||||
)[0]
|
||||
|
||||
refraction_corrected_gaze_vector = normalize(refraction_corrected_params[:3])
|
||||
refraction_corrected_radius = refraction_corrected_params[-1]
|
||||
refraction_corrected_pupil_center = (
|
||||
self.corrected_sphere_center
|
||||
+ _EYE_RADIUS_DEFAULT * refraction_corrected_gaze_vector
|
||||
)
|
||||
|
||||
refraction_corrected_pupil_circle = Circle(
|
||||
refraction_corrected_pupil_center,
|
||||
refraction_corrected_gaze_vector,
|
||||
refraction_corrected_radius,
|
||||
)
|
||||
|
||||
return refraction_corrected_pupil_circle
|
||||
|
||||
def mean_observation_circularity(self):
|
||||
observation_circularities = [
|
||||
observation.ellipse.circularity()
|
||||
for observation in self.storage.observations
|
||||
]
|
||||
return np.mean(observation_circularities)
|
||||
|
||||
def cleanup(self):
|
||||
pass
|
@ -1,161 +0,0 @@
|
||||
"""
|
||||
(*)~---------------------------------------------------------------------------
|
||||
Pupil - eye tracking platform
|
||||
Copyright (C) 2012-2019 Pupil Labs
|
||||
|
||||
Distributed under the terms of the GNU
|
||||
Lesser General Public License (LGPL v3.0).
|
||||
See COPYING and COPYING.LESSER for license details.
|
||||
---------------------------------------------------------------------------~(*)
|
||||
"""
|
||||
import numpy as np
|
||||
|
||||
|
||||
def intersect_line_line(p11, p12, p21, p22, internal=False):
|
||||
x1, y1 = p11
|
||||
x2, y2 = p12
|
||||
x3, y3 = p21
|
||||
x4, y4 = p22
|
||||
|
||||
if ((x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4)) != 0:
|
||||
Px = ((x1 * y2 - y1 * x2) * (x3 - x4) - (x1 - x2) * (x3 * y4 - y3 * x4)) / (
|
||||
(x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4)
|
||||
)
|
||||
Py = ((x1 * y2 - y1 * x2) * (y3 - y4) - (y1 - y2) * (x3 * y4 - y3 * x4)) / (
|
||||
(x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4)
|
||||
)
|
||||
if internal:
|
||||
if x1 != x2:
|
||||
lam = (Px - x2) / (x1 - x2)
|
||||
else:
|
||||
lam = (Py - y2) / (y1 - y2)
|
||||
if 0 <= lam <= 1:
|
||||
return [True, Px, Py]
|
||||
else:
|
||||
return [False]
|
||||
else:
|
||||
return [True, Px, Py]
|
||||
else:
|
||||
return [False]
|
||||
|
||||
|
||||
def intersect_sphere_multiple_lines(sphere_center, radius, points, directions):
|
||||
# Note: Directions need to be normalized!
|
||||
intermediate = np.einsum("ij,ij->i", directions, points - sphere_center)
|
||||
discriminant = (
|
||||
intermediate ** 2 - np.sum((points - sphere_center) ** 2, axis=1) + radius ** 2
|
||||
)
|
||||
idx = discriminant > 0
|
||||
sqr = np.sqrt(discriminant[idx])
|
||||
d1 = -intermediate[idx] + sqr
|
||||
d2 = -intermediate[idx] - sqr
|
||||
d_final = np.expand_dims(np.minimum(d1, d2), axis=1)
|
||||
intersections_on_sphere = points[idx] + d_final * directions[idx]
|
||||
|
||||
return intersections_on_sphere, idx
|
||||
|
||||
|
||||
def intersect_sphere_line(sphere_center, radius, point, direction):
|
||||
temp = np.dot(direction, point - sphere_center)
|
||||
discriminant = temp ** 2 - np.linalg.norm(point - sphere_center) ** 2 + radius ** 2
|
||||
if discriminant >= 0.0:
|
||||
sqr = np.sqrt(discriminant)
|
||||
d1 = -temp + sqr
|
||||
d2 = -temp - sqr
|
||||
return [True, d1, d2]
|
||||
else:
|
||||
return [False, 0.0, 0.0]
|
||||
|
||||
|
||||
def intersect_plane_line(p_plane, n_plane, p_line, l_line, radius=-1):
|
||||
if np.dot(n_plane, l_line) == 0 or np.dot(p_plane - p_line, n_plane) == 0:
|
||||
return [False]
|
||||
else:
|
||||
d = np.dot(p_plane - p_line, n_plane) / np.dot(l_line, n_plane)
|
||||
p_intersect = p_line + d * l_line
|
||||
if radius > 0:
|
||||
if np.linalg.norm(p_plane - p_intersect) <= radius[0]:
|
||||
return [True, p_intersect[0], p_intersect[1], p_intersect[2]]
|
||||
else:
|
||||
return [False, 0.0, 0.0, 0.0]
|
||||
else:
|
||||
return [True, p_intersect[0], p_intersect[1], p_intersect[2]]
|
||||
|
||||
|
||||
def nearest_point_on_sphere_to_line(center, radius, origin, direction):
|
||||
intersection = intersect_sphere_line(center, radius, origin, direction)
|
||||
if intersection[0]:
|
||||
d = np.min(intersection[1:])
|
||||
return origin + d * direction
|
||||
else:
|
||||
temp = np.dot(direction, center - origin)
|
||||
origin_prime = origin + temp * direction
|
||||
direction_prime = center - origin_prime
|
||||
direction_prime /= np.linalg.norm(direction_prime)
|
||||
success, d1, d2 = intersect_sphere_line(
|
||||
center, radius, origin_prime, direction_prime
|
||||
)
|
||||
if success:
|
||||
d = min(d1, d2)
|
||||
return origin_prime + d * direction_prime
|
||||
else:
|
||||
np.zeros(3)
|
||||
|
||||
|
||||
def nearest_intersection_points(p1, p2, p3, p4):
|
||||
"""Calculates the two nearest points, and their distance to each other on
|
||||
two lines defined by (p1,p2) respectively (p3,p4)
|
||||
"""
|
||||
|
||||
def mag(p):
|
||||
return np.sqrt(p.dot(p))
|
||||
|
||||
def normalise(p1, p2):
|
||||
p = p2 - p1
|
||||
m = mag(p)
|
||||
if m == 0:
|
||||
return [0.0, 0.0, 0.0]
|
||||
else:
|
||||
return p / m
|
||||
|
||||
d1 = normalise(p1, p2)
|
||||
d2 = normalise(p3, p4)
|
||||
|
||||
diff = p1 - p3
|
||||
a01 = -d1.dot(d2)
|
||||
b0 = diff.dot(d1)
|
||||
|
||||
if np.abs(a01) < 1.0:
|
||||
|
||||
# Lines are not parallel.
|
||||
det = 1.0 - a01 * a01
|
||||
b1 = -diff.dot(d2)
|
||||
s0 = (a01 * b1 - b0) / det
|
||||
s1 = (a01 * b0 - b1) / det
|
||||
|
||||
else:
|
||||
|
||||
# Lines are parallel, select any pair of closest points.
|
||||
s0 = -b0
|
||||
s1 = 0
|
||||
|
||||
closestPoint1 = p1 + s0 * d1
|
||||
closestPoint2 = p3 + s1 * d2
|
||||
dist = mag(closestPoint2 - closestPoint1)
|
||||
|
||||
return closestPoint1, closestPoint2, dist
|
||||
|
||||
|
||||
def nearest_intersection_lines(lines):
|
||||
dim = len(lines[0].origin)
|
||||
|
||||
R = np.zeros((dim, dim))
|
||||
q = np.zeros(dim)
|
||||
|
||||
for line in lines:
|
||||
v = np.reshape(line.direction, (dim, 1))
|
||||
A = np.eye(dim) - v @ v.T
|
||||
R += A
|
||||
q += A @ line.origin
|
||||
|
||||
return np.linalg.pinv(R) @ q
|
@ -1,188 +0,0 @@
|
||||
"""
|
||||
(*)~---------------------------------------------------------------------------
|
||||
Pupil - eye tracking platform
|
||||
Copyright (C) 2012-2019 Pupil Labs
|
||||
|
||||
Distributed under the terms of the GNU
|
||||
Lesser General Public License (LGPL v3.0).
|
||||
See COPYING and COPYING.LESSER for license details.
|
||||
---------------------------------------------------------------------------~(*)
|
||||
"""
|
||||
import abc
|
||||
|
||||
import numpy as np
|
||||
|
||||
from .utilities import cart2sph, normalize
|
||||
|
||||
|
||||
class Primitive(abc.ABC):
|
||||
__slots__ = ()
|
||||
|
||||
def __repr__(self):
|
||||
klass = "{}.{}".format(self.__class__.__module__, self.__class__.__name__)
|
||||
attributes = " ".join(
|
||||
"{}={}".format(k, v.__repr__()) for k, v in self.__dict__.items()
|
||||
)
|
||||
return "<{klass} at {id}: {attributes}>".format(
|
||||
klass=klass, id=id(self), attributes=attributes
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
def to_str(obj, float_fmt="{:f}") -> str:
|
||||
if isinstance(obj, float) or isinstance(obj, int):
|
||||
return float_fmt.format(obj)
|
||||
if isinstance(obj, np.ndarray):
|
||||
if obj.dtype != np.object:
|
||||
return ", ".join(float_fmt.format(x) for x in obj)
|
||||
return str(obj)
|
||||
|
||||
klass = self.__class__.__name__
|
||||
attributes = " - ".join(
|
||||
"{}: {}".format(k, to_str(v)) for k, v in self.__dict__.items()
|
||||
)
|
||||
return "{klass} -> {attributes}".format(klass=klass, attributes=attributes)
|
||||
|
||||
|
||||
class Line(Primitive):
|
||||
__slots__ = ("origin", "direction", "dim")
|
||||
|
||||
def __init__(self, origin, direction):
|
||||
self.origin = np.asarray(origin)
|
||||
self.direction = normalize(np.asarray(direction))
|
||||
self.dim = self.origin.shape[0]
|
||||
|
||||
|
||||
class Circle(Primitive):
|
||||
__slots__ = ("center", "normal", "radius")
|
||||
|
||||
def __init__(self, center=[0.0, 0.0, 0.0], normal=[0.0, 0.0, -1.0], radius=0.0):
|
||||
self.center = np.asarray(center, dtype=float)
|
||||
self.normal = np.asarray(normal, dtype=float)
|
||||
self.radius = radius
|
||||
|
||||
def spherical_representation(self):
|
||||
phi, theta = cart2sph(self.normal)
|
||||
return phi, theta, self.radius
|
||||
|
||||
def is_null(self):
|
||||
return self.radius <= 0.0
|
||||
|
||||
@staticmethod
|
||||
def null() -> "Circle":
|
||||
return Circle(radius=0.0)
|
||||
|
||||
|
||||
class Ellipse(Primitive):
|
||||
__slots__ = ("center", "major_radius", "minor_radius", "angle")
|
||||
|
||||
def __init__(self, center, minor_radius, major_radius, angle):
|
||||
self.center = center
|
||||
self.major_radius = major_radius
|
||||
self.minor_radius = minor_radius
|
||||
self.angle = angle
|
||||
|
||||
if self.minor_radius > self.major_radius:
|
||||
current_minor_radius = self.minor_radius
|
||||
self.minor_radius = self.major_radius
|
||||
self.major_radius = current_minor_radius
|
||||
self.angle = self.angle + np.pi / 2
|
||||
|
||||
def circumference(self):
|
||||
a = self.minor_radius
|
||||
b = self.major_radius
|
||||
return np.pi * (3.0 * (a + b) - np.sqrt((3.0 * a + b) * (a + 3.0 * b)))
|
||||
|
||||
def area(self):
|
||||
return np.pi * self.minor_radius * self.major_radius
|
||||
|
||||
def circularity(self):
|
||||
return self.minor_radius / self.major_radius
|
||||
|
||||
def parameters(self):
|
||||
return (
|
||||
self.center[0],
|
||||
self.center[1],
|
||||
self.minor_radius,
|
||||
self.major_radius,
|
||||
self.angle,
|
||||
)
|
||||
|
||||
|
||||
class Sphere(Primitive):
|
||||
__slots__ = ("center", "radius")
|
||||
|
||||
def __init__(self, center, radius):
|
||||
self.center = center
|
||||
self.radius = radius
|
||||
|
||||
def __bool__(self):
|
||||
return self.radius > 0
|
||||
|
||||
|
||||
class Conicoid(Primitive):
|
||||
"""
|
||||
Coefficients of the general equation (implicit form) of a cone, given its vertex and base (ellipse/conic).
|
||||
Formulae follow equations (1)-(3) of:
|
||||
Safaee-Rad, R. et al.: "Three-Dimensional Location Estimation of Circular Features for Machine Vision",
|
||||
IEEE Transactions on Robotics and Automation, Vol.8(5), 1992, pp624-640.
|
||||
"""
|
||||
|
||||
__slots__ = tuple("ABCFGHUVWD")
|
||||
|
||||
def __init__(self, conic, vertex):
|
||||
alpha = vertex[0]
|
||||
beta = vertex[1]
|
||||
gamma = vertex[2]
|
||||
self.A = (gamma ** 2) * conic.A
|
||||
self.B = (gamma ** 2) * conic.C
|
||||
self.C = (
|
||||
conic.A * (alpha ** 2)
|
||||
+ conic.B * alpha * beta
|
||||
+ conic.C * (beta ** 2)
|
||||
+ conic.D * alpha
|
||||
+ conic.E * beta
|
||||
+ conic.F
|
||||
)
|
||||
self.F = -gamma * (conic.C * beta + conic.B / 2 * alpha + conic.E / 2)
|
||||
self.G = -gamma * (conic.B / 2 * beta + conic.A * alpha + conic.D / 2)
|
||||
self.H = (gamma ** 2) * conic.B / 2
|
||||
self.U = (gamma ** 2) * conic.D / 2
|
||||
self.V = (gamma ** 2) * conic.E / 2
|
||||
self.W = -gamma * (conic.E / 2 * beta + conic.D / 2 * alpha + conic.F)
|
||||
self.D = (gamma ** 2) * conic.F
|
||||
|
||||
|
||||
class Conic(Primitive):
|
||||
"""
|
||||
Coefficients A-F of the general equation (implicit form) of a conic
|
||||
Ax² + Bxy + Cy² + Dx + Ey + F = 0
|
||||
calculated from 5 ellipse parameters, see https://en.wikipedia.org/wiki/Ellipse#General_ellipse
|
||||
"""
|
||||
|
||||
__slots__ = tuple("ABCDEF")
|
||||
|
||||
def __init__(self, *args):
|
||||
if len(args) == 1:
|
||||
ellipse = args[0]
|
||||
ax = np.cos(ellipse.angle)
|
||||
ay = np.sin(ellipse.angle)
|
||||
a2 = ellipse.major_radius ** 2
|
||||
b2 = ellipse.minor_radius ** 2
|
||||
|
||||
self.A = a2 * ay * ay + b2 * ax * ax
|
||||
self.B = 2.0 * (b2 - a2) * ax * ay
|
||||
self.C = a2 * ax * ax + b2 * ay * ay
|
||||
self.D = -2.0 * self.A * ellipse.center[0] - self.B * ellipse.center[1]
|
||||
self.E = -self.B * ellipse.center[0] - 2.0 * self.C * ellipse.center[1]
|
||||
self.F = (
|
||||
self.A * ellipse.center[0] * ellipse.center[0]
|
||||
+ self.B * ellipse.center[0] * ellipse.center[1]
|
||||
+ self.C * ellipse.center[1] * ellipse.center[1]
|
||||
- a2 * b2
|
||||
)
|
||||
|
||||
if len(args) == 6:
|
||||
self.A, self.B, self.C, self.D, self.E, self.F = args
|
||||
|
||||
def discriminant(self):
|
||||
return self.B ** 2 - 4 * self.A * self.C
|
@ -1,123 +0,0 @@
|
||||
"""
|
||||
(*)~---------------------------------------------------------------------------
|
||||
Pupil - eye tracking platform
|
||||
Copyright (C) 2012-2019 Pupil Labs
|
||||
|
||||
Distributed under the terms of the GNU
|
||||
Lesser General Public License (LGPL v3.0).
|
||||
See COPYING and COPYING.LESSER for license details.
|
||||
---------------------------------------------------------------------------~(*)
|
||||
"""
|
||||
import logging
|
||||
import warnings
|
||||
|
||||
import numpy as np
|
||||
|
||||
from .intersections import intersect_sphere_multiple_lines
|
||||
from .primitives import Circle, Conic, Conicoid, Ellipse, Line
|
||||
from .utilities import normalize
|
||||
from ..cpp.projections import unproject_ellipse
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def unproject_edges_to_sphere(
|
||||
edges, focal_length, sphere_center, sphere_radius, width=640, height=480
|
||||
):
|
||||
n_edges = edges.shape[0]
|
||||
|
||||
directions = edges - np.asarray([width / 2.0, height / 2.0])
|
||||
directions = np.hstack((directions, focal_length * np.ones((n_edges, 1))))
|
||||
directions = directions / np.linalg.norm(directions, axis=1, keepdims=1)
|
||||
|
||||
origins = np.zeros((n_edges, 3))
|
||||
|
||||
edges_on_sphere, idxs = intersect_sphere_multiple_lines(
|
||||
sphere_center, sphere_radius, origins, directions
|
||||
)
|
||||
|
||||
return edges_on_sphere, idxs
|
||||
|
||||
|
||||
def project_point_into_image_plane(point, focal_length):
|
||||
scale = focal_length / point[2]
|
||||
point_projected = scale * np.asarray(point)
|
||||
return point_projected[:2]
|
||||
|
||||
|
||||
def project_line_into_image_plane(line, focal_length):
|
||||
p1 = line.origin
|
||||
p2 = line.origin + line.direction
|
||||
|
||||
p1_projected = project_point_into_image_plane(p1, focal_length)
|
||||
p2_projected = project_point_into_image_plane(p2, focal_length)
|
||||
|
||||
return Line(p1_projected, p2_projected - p1_projected)
|
||||
|
||||
|
||||
def project_circle_into_image_plane(
|
||||
circle, focal_length, transform=True, width=0, height=0
|
||||
):
|
||||
c = circle.center
|
||||
n = circle.normal
|
||||
r = circle.radius
|
||||
f = focal_length
|
||||
|
||||
cn = np.dot(c, n)
|
||||
c2r2 = np.dot(c, c) - r ** 2
|
||||
ABC = cn ** 2 - 2.0 * cn * (c * n) + c2r2 * (n ** 2)
|
||||
F = 2.0 * (c2r2 * n[1] * n[2] - cn * (n[1] * c[2] + n[2] * c[1]))
|
||||
G = 2.0 * (c2r2 * n[2] * n[0] - cn * (n[2] * c[0] + n[0] * c[2]))
|
||||
H = 2.0 * (c2r2 * n[0] * n[1] - cn * (n[0] * c[1] + n[1] * c[0]))
|
||||
conic = Conic(ABC[0], H, ABC[1], G * f, F * f, ABC[2] * f ** 2)
|
||||
|
||||
disc_ = conic.discriminant()
|
||||
|
||||
if disc_ < 0:
|
||||
|
||||
A, B, C, D, E, F = conic.A, conic.B, conic.C, conic.D, conic.E, conic.F
|
||||
center_x = (2 * C * D - B * E) / disc_
|
||||
center_y = (2 * A * E - B * D) / disc_
|
||||
temp_ = 2 * (A * E ** 2 + C * D ** 2 - B * D * E + disc_ * F)
|
||||
minor_axis = (
|
||||
-np.sqrt(np.abs(temp_ * (A + C - np.sqrt((A - C) ** 2 + B ** 2)))) / disc_
|
||||
) # Todo: Absolute value???
|
||||
major_axis = (
|
||||
-np.sqrt(np.abs(temp_ * (A + C + np.sqrt((A - C) ** 2 + B ** 2)))) / disc_
|
||||
)
|
||||
|
||||
if B == 0 and A < C:
|
||||
angle = 0
|
||||
elif B == 0 and A >= C:
|
||||
angle = np.pi / 2.0
|
||||
else:
|
||||
angle = np.arctan((C - A - np.sqrt((A - C) ** 2 + B ** 2)) / B)
|
||||
|
||||
# TO BE CONSISTENT WITH PUPIL
|
||||
if transform:
|
||||
center_x = center_x + width / 2.0
|
||||
center_y = center_y + height / 2.0
|
||||
minor_axis, major_axis = 2.0 * minor_axis, 2.0 * major_axis
|
||||
angle = angle * 180.0 / np.pi + 90.0
|
||||
|
||||
return Ellipse(np.asarray([center_x, center_y]), minor_axis, major_axis, angle)
|
||||
|
||||
else:
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def project_sphere_into_image_plane(
|
||||
sphere, focal_length, transform=True, width=0, height=0
|
||||
):
|
||||
scale = focal_length / sphere.center[2]
|
||||
|
||||
projected_sphere_center = scale * sphere.center
|
||||
projected_radius = scale * sphere.radius
|
||||
|
||||
if transform:
|
||||
projected_sphere_center[0] += width / 2.0
|
||||
projected_sphere_center[1] += height / 2
|
||||
projected_radius *= 2.0
|
||||
|
||||
return Ellipse(projected_sphere_center[:2], projected_radius, projected_radius, 0.0)
|
@ -1,92 +0,0 @@
|
||||
"""
|
||||
(*)~---------------------------------------------------------------------------
|
||||
Pupil - eye tracking platform
|
||||
Copyright (C) 2012-2019 Pupil Labs
|
||||
|
||||
Distributed under the terms of the GNU
|
||||
Lesser General Public License (LGPL v3.0).
|
||||
See COPYING and COPYING.LESSER for license details.
|
||||
---------------------------------------------------------------------------~(*)
|
||||
"""
|
||||
import numpy as np
|
||||
|
||||
|
||||
def cart2sph(x):
|
||||
|
||||
phi = np.arctan2(x[2], x[0])
|
||||
theta = np.arccos(x[1] / np.linalg.norm(x))
|
||||
|
||||
return phi, theta
|
||||
|
||||
|
||||
def sph2cart(phi, theta):
|
||||
|
||||
result = np.empty(3)
|
||||
|
||||
result[0] = np.sin(theta) * np.cos(phi)
|
||||
result[1] = np.cos(theta)
|
||||
result[2] = np.sin(theta) * np.sin(phi)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def normalize(v, axis=-1):
|
||||
|
||||
return v / np.linalg.norm(v, axis=axis)
|
||||
|
||||
|
||||
def enclosed_angle(v1, v2, unit="deg", axis=-1):
|
||||
|
||||
v1 = normalize(v1, axis=axis)
|
||||
v2 = normalize(v2, axis=axis)
|
||||
|
||||
alpha = np.arccos(np.clip(np.dot(v1.T, v2), -1, 1))
|
||||
|
||||
if unit == "deg":
|
||||
return 180.0 / np.pi * alpha
|
||||
else:
|
||||
return alpha
|
||||
|
||||
|
||||
def make_homogeneous_vector(v):
|
||||
|
||||
return np.hstack((v, [0.0]))
|
||||
|
||||
|
||||
def make_homogeneous_point(p):
|
||||
return np.hstack((p, [1.0]))
|
||||
|
||||
|
||||
def transform_as_homogeneous_point(p, trafo):
|
||||
p = make_homogeneous_point(p)
|
||||
return (trafo @ p)[:3]
|
||||
|
||||
|
||||
def transform_as_homogeneous_vector(v, trafo):
|
||||
v = make_homogeneous_vector(v)
|
||||
return (trafo @ v)[:3]
|
||||
|
||||
|
||||
def rotate_v1_on_v2(v1, v2):
|
||||
|
||||
v1 = normalize(v1)
|
||||
v2 = normalize(v2)
|
||||
cos_angle = np.dot(v1, v2)
|
||||
|
||||
if not np.allclose(np.abs(cos_angle), 1):
|
||||
u = np.cross(v1, v2)
|
||||
s = np.linalg.norm(u)
|
||||
c = np.dot(v1, v2)
|
||||
|
||||
I = np.eye(3)
|
||||
ux = np.asarray([[0, -u[2], u[1]], [u[2], 0, -u[0]], [-u[1], u[0], 0]])
|
||||
|
||||
R = I + ux + np.dot(ux, ux) * (1 - c) / s ** 2
|
||||
|
||||
elif np.allclose(cos_angle, 1):
|
||||
R = np.eye(3)
|
||||
|
||||
elif np.allclose(cos_angle, -1):
|
||||
R = -np.eye(3)
|
||||
|
||||
return R
|
@ -1,58 +0,0 @@
|
||||
"""
|
||||
(*)~---------------------------------------------------------------------------
|
||||
Pupil - eye tracking platform
|
||||
Copyright (C) 2012-2019 Pupil Labs
|
||||
|
||||
Distributed under the terms of the GNU
|
||||
Lesser General Public License (LGPL v3.0).
|
||||
See COPYING and COPYING.LESSER for license details.
|
||||
---------------------------------------------------------------------------~(*)
|
||||
"""
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
|
||||
class KalmanFilter(object):
|
||||
def __init__(self):
|
||||
self.filter = cv2.KalmanFilter(7, 3, 0, cv2.CV_32F)
|
||||
self.filter.measurementMatrix = np.asarray(
|
||||
[[1, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1]],
|
||||
dtype=np.float32,
|
||||
)
|
||||
self.filter.processNoiseCov = 1e-4 * np.eye(7, dtype=np.float32)
|
||||
self.filter.measurementNoiseCov = 1e-5 * np.eye(3, dtype=np.float32)
|
||||
self.filter.measurementNoiseCov[2][2] = 0.1
|
||||
self.filter.statePost = np.asarray([0, 0, 0, 0, 0, 0, 2.0], dtype=np.float32)
|
||||
self.filter.errorCovPost = np.eye(7, dtype=np.float32)
|
||||
self.last_call = -1
|
||||
|
||||
def predict(self, t):
|
||||
if self.last_call != -1 and t > self.last_call:
|
||||
dt = t - self.last_call
|
||||
self.filter.transitionMatrix = np.asarray(
|
||||
[
|
||||
[1, 0, dt, 0, 0.5 * dt * dt, 0, 0],
|
||||
[0, 1, 0, dt, 0, 0.5 * dt * dt, 0],
|
||||
[0, 0, 1, 0, dt, 0, 0],
|
||||
[0, 0, 0, 1, 0, dt, 0],
|
||||
[0, 0, 0, 0, 1, 0, 0],
|
||||
[0, 0, 0, 0, 0, 1, 0],
|
||||
[0, 0, 0, 0, 0, 0, 1],
|
||||
],
|
||||
dtype=np.float32,
|
||||
)
|
||||
prediction = self.filter.predict()
|
||||
phi, theta, pupil_radius = (
|
||||
prediction[0][0],
|
||||
prediction[1][0],
|
||||
prediction[6][0],
|
||||
)
|
||||
else:
|
||||
phi, theta, pupil_radius = -np.pi / 2, np.pi / 2, 0
|
||||
|
||||
self.last_call = t
|
||||
|
||||
return phi, theta, pupil_radius
|
||||
|
||||
def correct(self, phi, theta, radius):
|
||||
self.filter.correct(np.asarray([phi, theta, radius], dtype=np.float32))
|
@ -1,232 +0,0 @@
|
||||
"""
|
||||
(*)~---------------------------------------------------------------------------
|
||||
Pupil - eye tracking platform
|
||||
Copyright (C) 2012-2019 Pupil Labs
|
||||
|
||||
Distributed under the terms of the GNU
|
||||
Lesser General Public License (LGPL v3.0).
|
||||
See COPYING and COPYING.LESSER for license details.
|
||||
---------------------------------------------------------------------------~(*)
|
||||
"""
|
||||
from abc import abstractmethod, abstractproperty
|
||||
from collections import deque
|
||||
from math import floor
|
||||
from typing import Sequence, Optional
|
||||
|
||||
import numpy as np
|
||||
from sortedcontainers import SortedList
|
||||
|
||||
from .camera import CameraModel
|
||||
from .constants import _EYE_RADIUS_DEFAULT
|
||||
from .geometry.primitives import Ellipse, Line
|
||||
from .geometry.projections import project_line_into_image_plane, unproject_ellipse
|
||||
|
||||
|
||||
class Observation(object):
|
||||
def __init__(
|
||||
self, ellipse: Ellipse, confidence: float, timestamp: float, focal_length: float
|
||||
):
|
||||
self.ellipse = ellipse
|
||||
self.confidence_2d = confidence
|
||||
self.confidence = 0.0
|
||||
self.timestamp = timestamp
|
||||
|
||||
self.circle_3d_pair = None
|
||||
self.gaze_3d_pair = None
|
||||
self.gaze_2d = None
|
||||
self.aux_2d = None
|
||||
self.aux_3d = None
|
||||
self.invalid = True
|
||||
|
||||
circle_3d_pair = unproject_ellipse(ellipse, focal_length)
|
||||
if not circle_3d_pair:
|
||||
# unprojecting ellipse failed, invalid observation!
|
||||
return
|
||||
|
||||
self.invalid = False
|
||||
self.confidence = self.confidence_2d
|
||||
self.circle_3d_pair = circle_3d_pair
|
||||
|
||||
self.gaze_3d_pair = [
|
||||
Line(
|
||||
circle_3d_pair[i].center,
|
||||
circle_3d_pair[i].center + circle_3d_pair[i].normal,
|
||||
)
|
||||
for i in [0, 1]
|
||||
]
|
||||
self.gaze_2d = project_line_into_image_plane(self.gaze_3d_pair[0], focal_length)
|
||||
self.gaze_2d_line = np.array([*self.gaze_2d.origin, *self.gaze_2d.direction])
|
||||
|
||||
self.aux_2d = np.empty((2, 3))
|
||||
v = np.reshape(self.gaze_2d.direction, (2, 1))
|
||||
self.aux_2d[:, :2] = np.eye(2) - v @ v.T
|
||||
self.aux_2d[:, 2] = (np.eye(2) - v @ v.T) @ self.gaze_2d.origin
|
||||
|
||||
self.aux_3d = np.empty((2, 3, 4))
|
||||
for i in range(2):
|
||||
Dierkes_line = self.get_Dierkes_line(i)
|
||||
v = np.reshape(Dierkes_line.direction, (3, 1))
|
||||
self.aux_3d[i, :3, :3] = np.eye(3) - v @ v.T
|
||||
self.aux_3d[i, :3, 3] = (np.eye(3) - v @ v.T) @ Dierkes_line.origin
|
||||
|
||||
def get_Dierkes_line(self, i):
|
||||
origin = (
|
||||
self.circle_3d_pair[i].center
|
||||
- _EYE_RADIUS_DEFAULT * self.circle_3d_pair[i].normal
|
||||
)
|
||||
direction = self.circle_3d_pair[i].center
|
||||
return Line(origin, direction)
|
||||
|
||||
|
||||
class ObservationStorage:
|
||||
@abstractmethod
|
||||
def add(self, observation: Observation):
|
||||
pass
|
||||
|
||||
@abstractproperty
|
||||
def observations(self) -> Sequence[Observation]:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def clear(self):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def count(self) -> int:
|
||||
pass
|
||||
|
||||
|
||||
class BasicStorage(ObservationStorage):
|
||||
def __init__(self):
|
||||
self._storage = []
|
||||
|
||||
def add(self, observation: Observation):
|
||||
if observation.invalid:
|
||||
return
|
||||
self._storage.append(observation)
|
||||
|
||||
@property
|
||||
def observations(self) -> Sequence[Observation]:
|
||||
return self._storage
|
||||
|
||||
def clear(self):
|
||||
self._storage.clear()
|
||||
|
||||
def count(self) -> int:
|
||||
return len(self._storage)
|
||||
|
||||
|
||||
class BufferedObservationStorage(ObservationStorage):
|
||||
def __init__(self, confidence_threshold: float, buffer_length: int):
|
||||
self.confidence_threshold = confidence_threshold
|
||||
self._storage = deque(maxlen=buffer_length)
|
||||
|
||||
def add(self, observation: Observation):
|
||||
if observation.invalid:
|
||||
return
|
||||
if observation.confidence < self.confidence_threshold:
|
||||
return
|
||||
|
||||
self._storage.append(observation)
|
||||
|
||||
@property
|
||||
def observations(self) -> Sequence[Observation]:
|
||||
return list(self._storage)
|
||||
|
||||
def clear(self):
|
||||
self._storage.clear()
|
||||
|
||||
def count(self) -> int:
|
||||
return len(self._storage)
|
||||
|
||||
|
||||
class BinBufferedObservationStorage(ObservationStorage):
|
||||
def __init__(
|
||||
self,
|
||||
camera: CameraModel,
|
||||
confidence_threshold: float,
|
||||
n_bins_horizontal: int,
|
||||
bin_buffer_length: int,
|
||||
forget_min_observations: Optional[int] = None,
|
||||
forget_min_time: Optional[float] = None,
|
||||
):
|
||||
self.camera = camera
|
||||
self.confidence_threshold = confidence_threshold
|
||||
self.bin_buffer_length = bin_buffer_length
|
||||
self.forget_min_observations = forget_min_observations
|
||||
self.forget_min_time = forget_min_time
|
||||
self.pixels_per_bin = self.camera.resolution[0] / n_bins_horizontal
|
||||
self.w = n_bins_horizontal
|
||||
self.h = int(round(self.camera.resolution[1] / self.pixels_per_bin))
|
||||
|
||||
self._by_time = SortedList(key=lambda obs: obs.timestamp)
|
||||
self._by_bin = dict()
|
||||
|
||||
def add(self, observation: Observation):
|
||||
if observation.invalid:
|
||||
return
|
||||
if observation.confidence < self.confidence_threshold:
|
||||
return
|
||||
|
||||
idx = self._get_bin(observation)
|
||||
if idx < 0 or idx >= self.w * self.h:
|
||||
print(f"INDEX OUT OF BOUNDS: {idx}")
|
||||
return
|
||||
|
||||
if idx not in self._by_bin:
|
||||
self._by_bin[idx] = SortedList(key=lambda obs: obs.timestamp)
|
||||
|
||||
# add to both lookup structures
|
||||
_bin: SortedList = self._by_bin[idx]
|
||||
_bin.add(observation)
|
||||
self._by_time.add(observation)
|
||||
|
||||
# manage within-bin forgetting
|
||||
while len(_bin) > self.bin_buffer_length:
|
||||
old = _bin.pop(0)
|
||||
self._by_time.remove(old)
|
||||
|
||||
# manage across-bin forgetting
|
||||
if self.forget_min_observations is None or self.forget_min_time is None:
|
||||
return
|
||||
|
||||
while self.count() > self.forget_min_observations:
|
||||
oldest_age = observation.timestamp - self._by_time[0].timestamp
|
||||
if oldest_age < self.forget_min_time:
|
||||
break
|
||||
|
||||
# forget oldest entry
|
||||
old = self._by_time.pop(0)
|
||||
idx = self._get_bin(old)
|
||||
_bin = self._by_bin[idx]
|
||||
_bin.remove(old)
|
||||
# make sure to remove bin if empty for bin-counting to work
|
||||
if len(_bin) == 0:
|
||||
self._by_bin.pop(idx)
|
||||
|
||||
@property
|
||||
def observations(self) -> Sequence[Observation]:
|
||||
return list(self._by_time)
|
||||
|
||||
def clear(self):
|
||||
self._by_time.clear()
|
||||
self._by_bin.clear()
|
||||
|
||||
def count(self) -> int:
|
||||
return len(self._by_time)
|
||||
|
||||
def get_bin_counts(self) -> np.ndarray:
|
||||
dense_1d = np.zeros((self.w * self.h,))
|
||||
for idx, _bin in self._by_bin.items():
|
||||
dense_1d[idx] = len(_bin)
|
||||
return np.reshape(dense_1d, (self.w, self.h))
|
||||
|
||||
def _get_bin(self, observation: Observation) -> int:
|
||||
x, y = (
|
||||
floor((ellipse_center + resolution / 2) / self.pixels_per_bin)
|
||||
for ellipse_center, resolution in zip(
|
||||
observation.ellipse.center, self.camera.resolution
|
||||
)
|
||||
)
|
||||
# convert to 1D bin index
|
||||
return x + y * self.h
|
@ -1,141 +0,0 @@
|
||||
import itertools
|
||||
from pathlib import Path
|
||||
from .cpp.refraction_correction import apply_correction_pipeline
|
||||
|
||||
import numpy as np
|
||||
import msgpack
|
||||
|
||||
LOAD_DIR = Path(__file__).parent / "refraction_models"
|
||||
LOAD_VERSION = 1
|
||||
|
||||
|
||||
class ModelDeserializationError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class Refractionizer:
|
||||
def __init__(self, degree=3, type_="default", custom_load_dir=None):
|
||||
self.pipeline_radius_as_list = self.load_config_from_msgpack(
|
||||
"radius", type_, degree, custom_load_dir
|
||||
)
|
||||
|
||||
self.pipeline_gaze_vector_as_list = self.load_config_from_msgpack(
|
||||
"gaze_vector", type_, degree, custom_load_dir
|
||||
)
|
||||
|
||||
self.pipeline_sphere_center_as_list = self.load_config_from_msgpack(
|
||||
"sphere_center", type_, degree, custom_load_dir
|
||||
)
|
||||
|
||||
self.pipeline_pupil_circle_as_list = self.load_config_from_msgpack(
|
||||
"pupil_circle", type_, degree, custom_load_dir
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def load_config_from_msgpack(feature, type_, degree, custom_load_dir=None):
|
||||
load_dir = Path(custom_load_dir or LOAD_DIR).resolve()
|
||||
name = f"{type_}_refraction_model_{feature}_degree_{degree}.msgpack"
|
||||
path = load_dir / name
|
||||
with path.open("rb") as file:
|
||||
config_model = msgpack.unpack(file)
|
||||
Refractionizer._validate_loaded_model_config(config_model)
|
||||
try:
|
||||
return list(
|
||||
itertools.chain(
|
||||
Refractionizer._polynomial_features_from_config(config_model),
|
||||
Refractionizer._standard_scaler_from_config(config_model),
|
||||
Refractionizer._linear_regression_from_config(config_model),
|
||||
)
|
||||
)
|
||||
except KeyError as err:
|
||||
raise ModelDeserializationError from err
|
||||
|
||||
@staticmethod
|
||||
def _validate_loaded_model_config(config_model):
|
||||
if not isinstance(config_model, dict) or "version" not in config_model:
|
||||
raise ModelDeserializationError("Unrecognized format")
|
||||
if config_model["version"] != LOAD_VERSION:
|
||||
raise ModelDeserializationError(
|
||||
f"Unexpected version `{config_model['version']}` "
|
||||
f"(expected `{LOAD_VERSION}``)"
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _polynomial_features_from_config(config_model):
|
||||
yield np.array(config_model["steps"]["PolynomialFeatures"]["powers"])
|
||||
|
||||
@staticmethod
|
||||
def _standard_scaler_from_config(config_model):
|
||||
config_scaler = config_model["steps"]["StandardScaler"]
|
||||
yield np.array(config_scaler["mean"])
|
||||
yield np.array(config_scaler["var"])
|
||||
|
||||
@staticmethod
|
||||
def _linear_regression_from_config(config_model):
|
||||
config_lin_reg = config_model["steps"]["LinearRegression"]
|
||||
yield np.array(config_lin_reg["coef"])
|
||||
yield np.array(config_lin_reg["intercept"])
|
||||
|
||||
@staticmethod
|
||||
def _apply_correction_pipeline(X, pipeline_arrays):
|
||||
return apply_correction_pipeline(np.asarray(X).T, *pipeline_arrays)
|
||||
|
||||
def correct_radius(self, X):
|
||||
return self._apply_correction_pipeline(X, self.pipeline_radius_as_list)
|
||||
|
||||
def correct_gaze_vector(self, X):
|
||||
return self._apply_correction_pipeline(X, self.pipeline_gaze_vector_as_list)
|
||||
|
||||
def correct_sphere_center(self, X):
|
||||
return self._apply_correction_pipeline(X, self.pipeline_sphere_center_as_list)
|
||||
|
||||
def correct_pupil_circle(self, X):
|
||||
return self._apply_correction_pipeline(X, self.pipeline_pupil_circle_as_list)
|
||||
|
||||
|
||||
class SklearnRefractionizer(Refractionizer):
|
||||
def __init__(self, degree=3, type_="default", custom_load_dir=None):
|
||||
self.correct_radius = self.load_predict_fn_from_joblib_pickle(
|
||||
"radius", type_, degree, custom_load_dir
|
||||
)
|
||||
|
||||
self.correct_gaze_vector = self.load_predict_fn_from_joblib_pickle(
|
||||
"gaze_vector", type_, degree, custom_load_dir
|
||||
)
|
||||
|
||||
self.correct_sphere_center = self.load_predict_fn_from_joblib_pickle(
|
||||
"sphere_center", type_, degree, custom_load_dir
|
||||
)
|
||||
|
||||
self.correct_pupil_circle = self.load_predict_fn_from_joblib_pickle(
|
||||
"pupil_circle", type_, degree, custom_load_dir
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def load_predict_fn_from_joblib_pickle(
|
||||
feature, type_, degree, custom_load_dir=None
|
||||
):
|
||||
import joblib
|
||||
|
||||
load_dir = Path(custom_load_dir or LOAD_DIR).resolve()
|
||||
name = f"{type_}_refraction_model_{feature}_degree_{degree}.save"
|
||||
path = load_dir / name
|
||||
try:
|
||||
pipeline = joblib.load(path)
|
||||
except FileNotFoundError as err:
|
||||
raise
|
||||
except Exception as exc:
|
||||
raise ModelDeserializationError(
|
||||
f"Failed to load pickled model from {path}"
|
||||
) from exc
|
||||
return pipeline.predict
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
refractionizer = Refractionizer()
|
||||
|
||||
print(refractionizer.correct_sphere_center([[0.0, 0.0, 35.0]]))
|
||||
print(refractionizer.correct_radius([[0.0, 0.0, 35.0, 0.0, 0.0, -1.0, 2.0]]))
|
||||
print(refractionizer.correct_gaze_vector([[0.0, 0.0, 35.0, 0.0, 0.0, -1.0, 2.0]]))
|
||||
print(refractionizer.correct_pupil_circle([[0.0, 0.0, 35.0, 0.0, 0.0, -1.0, 2.0]]))
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -1,515 +0,0 @@
|
||||
from dataclasses import dataclass
|
||||
import sys
|
||||
sys.path.append(".")
|
||||
from config import RansacConfig
|
||||
from pye3dcustom.detector_3d import CameraModel, Detector3D, DetectorMode
|
||||
import queue
|
||||
import threading
|
||||
import numpy as np
|
||||
import cv2
|
||||
from enum import Enum
|
||||
from one_euro_filter import OneEuroFilter
|
||||
|
||||
class InformationOrigin(Enum):
|
||||
RANSAC = 1
|
||||
BLOB = 2
|
||||
FAILURE = 3
|
||||
|
||||
@dataclass
|
||||
class EyeInformation:
|
||||
info_type: InformationOrigin
|
||||
x: float
|
||||
y: float
|
||||
blink: bool
|
||||
|
||||
def fit_rotated_ellipse_ransac(
|
||||
data, iter=5, sample_num=10, offset=80 # 80.0, 10, 80
|
||||
): # before changing these values, please read up on the ransac algorithm
|
||||
# However if you want to change any value just know that higher iterations will make processing frames slower
|
||||
count_max = 0
|
||||
effective_sample = None
|
||||
|
||||
# TODO This iteration is extremely slow.
|
||||
#
|
||||
# Either we need to keep the iteration number low, or we need to keep a worker pool specifically
|
||||
# for handling this calculation. It's parallelizable, so just throwing something like joblib at
|
||||
# it would be fine.
|
||||
for i in range(iter):
|
||||
sample = np.random.choice(len(data), sample_num, replace=False)
|
||||
|
||||
xs = data[sample][:, 0].reshape(-1, 1)
|
||||
ys = data[sample][:, 1].reshape(-1, 1)
|
||||
|
||||
J = np.mat(
|
||||
np.hstack((xs * ys, ys**2, xs, ys, np.ones_like(xs, dtype=np.float)))
|
||||
)
|
||||
Y = np.mat(-1 * xs**2)
|
||||
P = (J.T * J).I * J.T * Y
|
||||
|
||||
# fitter a*x**2 + b*x*y + c*y**2 + d*x + e*y + f = 0
|
||||
a = 1.0
|
||||
b = P[0, 0]
|
||||
c = P[1, 0]
|
||||
d = P[2, 0]
|
||||
e = P[3, 0]
|
||||
f = P[4, 0]
|
||||
ellipse_model = (
|
||||
lambda x, y: a * x**2 + b * x * y + c * y**2 + d * x + e * y + f
|
||||
)
|
||||
|
||||
# thresh
|
||||
ran_sample = np.array(
|
||||
[[x, y] for (x, y) in data if np.abs(ellipse_model(x, y)) < offset]
|
||||
)
|
||||
|
||||
if len(ran_sample) > count_max:
|
||||
count_max = len(ran_sample)
|
||||
effective_sample = ran_sample
|
||||
|
||||
return fit_rotated_ellipse(effective_sample)
|
||||
|
||||
|
||||
def fit_rotated_ellipse(data):
|
||||
xs = data[:, 0].reshape(-1, 1)
|
||||
ys = data[:, 1].reshape(-1, 1)
|
||||
|
||||
J = np.mat(np.hstack((xs * ys, ys**2, xs, ys, np.ones_like(xs, dtype=np.float))))
|
||||
Y = np.mat(-1 * xs**2)
|
||||
P = (J.T * J).I * J.T * Y
|
||||
|
||||
a = 1.0
|
||||
b = P[0, 0]
|
||||
c = P[1, 0]
|
||||
d = P[2, 0]
|
||||
e = P[3, 0]
|
||||
f = P[4, 0]
|
||||
theta = 0.5 * np.arctan(b / (a - c))
|
||||
|
||||
cx = (2 * c * d - b * e) / (b**2 - 4 * a * c)
|
||||
cy = (2 * a * e - b * d) / (b**2 - 4 * a * c)
|
||||
|
||||
cu = a * cx**2 + b * cx * cy + c * cy**2 - f
|
||||
w = np.sqrt(
|
||||
cu
|
||||
/ (
|
||||
a * np.cos(theta) ** 2
|
||||
+ b * np.cos(theta) * np.sin(theta)
|
||||
+ c * np.sin(theta) ** 2
|
||||
)
|
||||
)
|
||||
h = np.sqrt(
|
||||
cu
|
||||
/ (
|
||||
a * np.sin(theta) ** 2
|
||||
- b * np.cos(theta) * np.sin(theta)
|
||||
+ c * np.cos(theta) ** 2
|
||||
)
|
||||
)
|
||||
|
||||
ellipse_model = lambda x, y: a * x**2 + b * x * y + c * y**2 + d * x + e * y + f
|
||||
|
||||
error_sum = np.sum([ellipse_model(x, y) for x, y in data])
|
||||
return (cx, cy, w, h, theta)
|
||||
|
||||
class Ransac:
|
||||
def __init__(self, config: "RansacConfig", cancellation_event: "threading.Event", capture_event: "threading.Event", capture_queue_incoming: "queue.Queue", image_queue_outgoing: "queue.Queue"):
|
||||
self.config = config
|
||||
|
||||
# Cross-thread communication management
|
||||
self.capture_queue_incoming = capture_queue_incoming
|
||||
self.image_queue_outgoing = image_queue_outgoing
|
||||
self.cancellation_event = cancellation_event
|
||||
self.capture_event = capture_event
|
||||
|
||||
# Cross algo state
|
||||
self.lkg_projected_sphere = None
|
||||
|
||||
# Image state
|
||||
self.previous_image = None
|
||||
self.current_image = None
|
||||
self.current_image_gray = None
|
||||
self.current_frame_number = None
|
||||
self.current_fps = None
|
||||
self.threshold_image = None
|
||||
|
||||
# Calibration Values
|
||||
self.xoff = 1
|
||||
self.yoff = 1
|
||||
self.calibration_frame_counter = 300 # Keep large in order to recenter correctly
|
||||
self.eyeoffx = 1
|
||||
|
||||
self.xmax = 69420
|
||||
self.xmin = -69420
|
||||
self.ymax = 69420
|
||||
self.ymin = -69420
|
||||
self.previous_rotation = self.config.rotation_angle
|
||||
self.recenter_eye = False
|
||||
|
||||
min_cutoff = 0.0004
|
||||
beta = 0.7
|
||||
noisy_point = np.array([1, 1])
|
||||
self.one_euro_filter = OneEuroFilter(
|
||||
noisy_point,
|
||||
min_cutoff=min_cutoff,
|
||||
beta=beta
|
||||
)
|
||||
|
||||
|
||||
|
||||
def output_images_and_update(self, threshold_image, output_information: EyeInformation):
|
||||
if self.config.show_color_image:
|
||||
image_stack = np.concatenate((self.current_image, cv2.cvtColor(self.current_image_gray, cv2.COLOR_GRAY2BGR), cv2.cvtColor(threshold_image, cv2.COLOR_GRAY2BGR)), axis=1)
|
||||
else:
|
||||
image_stack = np.concatenate((cv2.cvtColor(self.current_image_gray, cv2.COLOR_GRAY2BGR), cv2.cvtColor(threshold_image, cv2.COLOR_GRAY2BGR)), axis=1)
|
||||
self.image_queue_outgoing.put((image_stack, output_information))
|
||||
self.previous_image = self.current_image
|
||||
self.previous_rotation = self.config.rotation_angle
|
||||
|
||||
def capture_crop_rotate_image(self):
|
||||
# Get our current frame
|
||||
try:
|
||||
# Get frame from capture source, crop to ROI
|
||||
self.current_image = self.current_image[int(self.config.roi_window_y): int(self.config.roi_window_y+self.config.roi_window_h), int(self.config.roi_window_x): int(self.config.roi_window_x+self.config.roi_window_w)]
|
||||
except:
|
||||
# Failure to process frame, reuse previous frame.
|
||||
self.current_image = self.previous_image
|
||||
print('[ERROR] Frame capture issue detected.')
|
||||
|
||||
# Apply rotation to cropped area. For any rotation area outside of the bounds of the image,
|
||||
# fill with white.
|
||||
rows, cols, _ = self.current_image.shape
|
||||
img_center = (cols / 2, rows / 2)
|
||||
rotation_matrix = cv2.getRotationMatrix2D(img_center, self.config.rotation_angle, 1)
|
||||
self.current_image = cv2.warpAffine(self.current_image, rotation_matrix, (cols, rows),
|
||||
borderMode=cv2.BORDER_CONSTANT,
|
||||
borderValue=(255,255,255))
|
||||
return True
|
||||
|
||||
def blob_tracking_fallback(self):
|
||||
|
||||
# Increase our threshold value slightly, in order to have a better possibility of getting back
|
||||
# something to do blob tracking on.
|
||||
_, larger_threshold = cv2.threshold(
|
||||
self.current_image_gray, int(self.config.threshold + 5), 255, cv2.THRESH_BINARY
|
||||
)
|
||||
|
||||
# Blob tracking requires that we have a vague idea of where the eye may be at the moment. This
|
||||
# means we need to have had at least one successful runthrough of the Pupil Labs algorithm in
|
||||
# order to have a projected sphere.
|
||||
if self.lkg_projected_sphere == None:
|
||||
self.output_images_and_update(larger_threshold, EyeInformation(InformationOrigin.FAILURE, 0, 0, False))
|
||||
return
|
||||
|
||||
|
||||
# define circle for "cropping"
|
||||
try:
|
||||
ht, wd = self.current_image_gray.shape
|
||||
|
||||
radius = int(float(self.lkg_projected_sphere["axes"][0]))
|
||||
|
||||
xc = int(self.lkg_projected_sphere["center"][0])
|
||||
yc = int(self.lkg_projected_sphere["center"][1])
|
||||
|
||||
# draw filled circle in white on black background as mask
|
||||
mask = np.zeros((ht,wd), dtype=np.uint8)
|
||||
mask = cv2.circle(mask, (xc,yc), radius, 255, -1)
|
||||
|
||||
# create white colored background
|
||||
color = np.full_like(self.current_image_gray, (255))
|
||||
|
||||
# apply mask to image
|
||||
masked_img = cv2.bitwise_and(self.current_image_gray, self.current_image_gray, mask=mask)
|
||||
|
||||
# apply inverse mask to colored image
|
||||
masked_color = cv2.bitwise_and(color, color, mask=255-mask)
|
||||
|
||||
# combine the two masked images
|
||||
self.current_image_gray = cv2.add(masked_img, masked_color)
|
||||
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
|
||||
try:
|
||||
# Try rebuilding our contours
|
||||
contours, _ = cv2.findContours(larger_threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
||||
contours = sorted(contours, key=lambda x: cv2.contourArea(x), reverse=True)
|
||||
# If we have no contours, we have nothing to blob track. Fail here.
|
||||
if len(contours) == 0:
|
||||
raise RuntimeError("No contours found for image")
|
||||
except:
|
||||
self.output_images_and_update(larger_threshold, EyeInformation(InformationOrigin.FAILURE, 0, 0, False))
|
||||
return
|
||||
|
||||
rows, cols = larger_threshold.shape
|
||||
for cnt in contours:
|
||||
(x, y, w, h) = cv2.boundingRect(cnt)
|
||||
|
||||
# if our blob width/height are within suitable (yet arbitrary) boundaries, call that good.
|
||||
#
|
||||
# TODO This should be scaled based on camera resolution.
|
||||
if not 8 <= h <= 30 or not 8 <= w <= 30:
|
||||
continue
|
||||
xt = x + int(w/2)
|
||||
yt = y + int(h/2)
|
||||
xrlb = (xt - self.lkg_projected_sphere["center"][0]) / self.lkg_projected_sphere["axes"][0]
|
||||
eyeyb = (yt - self.lkg_projected_sphere["center"][1]) / self.lkg_projected_sphere["axes"][1]
|
||||
cv2.line(self.current_image_gray, (x + int(w/2), 0), (x + int(w/2), rows), (255, 0, 0), 1) #visualizes eyetracking on thresh
|
||||
cv2.line(self.current_image_gray, (0, y + int(h/2)), (cols, y + int(h/2)), (255, 0, 0), 1)
|
||||
cv2.drawContours(self.current_image_gray, [cnt], -1, (255, 0, 0), 3)
|
||||
cv2.rectangle(self.current_image_gray, (x, y), (x + w, y + h), (255, 0, 0), 2)
|
||||
|
||||
|
||||
|
||||
try:
|
||||
noisy_point = np.array([cx, cy]) #fliter our values with a One Euro Filter
|
||||
point_hat = self.one_euro_filter(noisy_point)
|
||||
cx = point_hat[0]
|
||||
cy = point_hat[1]
|
||||
except:
|
||||
pass
|
||||
|
||||
eye_position_scalar = self.config.vrc_eye_position_scalar
|
||||
|
||||
|
||||
|
||||
|
||||
xl = float(((xt - self.xoff) * eye_position_scalar) / (self.xmax - self.xoff))
|
||||
xr = float(((xt - self.xoff) * eye_position_scalar) / (self.xmin - self.xoff))
|
||||
yu = float(((yt - self.yoff) * eye_position_scalar) / (self.ymax - self.yoff))
|
||||
yd = float(((yt - self.yoff) * eye_position_scalar) / (self.ymin - self.yoff))
|
||||
#print(f"{xl} {xr} {yu} {yd}")
|
||||
|
||||
out_x = 0
|
||||
out_y = 0
|
||||
if xr > 0:
|
||||
out_x = max(0.0, min(1.0, xr))
|
||||
if xl > 0:
|
||||
out_x = -abs(max(0.0, min(1.0, xl)))
|
||||
if yd > 0:
|
||||
out_y = -abs(max(0.0, min(1.0, yd)))
|
||||
if yu < 0:
|
||||
out_y = max(0.0, min(1.0, yu))
|
||||
#print(xt, yt, out_x, out_y, 'BLOB')
|
||||
self.output_images_and_update(larger_threshold, EyeInformation(InformationOrigin.BLOB, out_x, out_y, False))
|
||||
return
|
||||
self.output_images_and_update(larger_threshold, EyeInformation(InformationOrigin.BLOB, 0, 0, True))
|
||||
print('[INFO] BLINK Detected.')
|
||||
|
||||
def run(self):
|
||||
camera_model = CameraModel(focal_length=self.config.focal_length, resolution=[self.config.roi_window_w, self.config.roi_window_h])
|
||||
detector_3d = Detector3D(camera=camera_model, long_term_mode=DetectorMode.blocking)
|
||||
|
||||
|
||||
|
||||
while True:
|
||||
# Check to make sure we haven't been requested to close
|
||||
if self.cancellation_event.is_set():
|
||||
print("Exiting RANSAC thread")
|
||||
return
|
||||
|
||||
# If our ROI configuration has changed, reset our model and detector
|
||||
if camera_model.resolution != [self.config.roi_window_w, self.config.roi_window_h]:
|
||||
camera_model = CameraModel(focal_length=self.config.focal_length, resolution=[self.config.roi_window_w, self.config.roi_window_h])
|
||||
detector_3d = Detector3D(camera=camera_model, long_term_mode=DetectorMode.blocking)
|
||||
|
||||
try:
|
||||
if self.capture_queue_incoming.empty():
|
||||
self.capture_event.set()
|
||||
# Wait a bit for images here. If we don't get one, just try again.
|
||||
(self.current_image, self.current_frame_number, self.current_fps) = self.capture_queue_incoming.get(block=True, timeout=0.2)
|
||||
except queue.Empty:
|
||||
# print("No image available")
|
||||
continue
|
||||
|
||||
if not self.capture_crop_rotate_image():
|
||||
continue
|
||||
|
||||
# Convert the image to grayscale, and set up thresholding. Thresholds here are basically a
|
||||
# low-pass filter that will set any pixel < the threshold value to 0. Thresholding is user
|
||||
# configurable in this utility as we're dealing with variable lighting amounts/placement, as
|
||||
# well as camera positioning and lensing. Therefore everyone's cutoff may be different.
|
||||
#
|
||||
# The goal of thresholding settings is to make sure we can ONLY see the pupil. This is why we
|
||||
# crop the image earlier; it gives us less possible dark area to get confused about in the
|
||||
# next step.
|
||||
self.current_image_gray = cv2.cvtColor(self.current_image, cv2.COLOR_BGR2GRAY)
|
||||
_, thresh = cv2.threshold(
|
||||
self.current_image_gray, int(self.config.threshold), 255, cv2.THRESH_BINARY
|
||||
)
|
||||
|
||||
|
||||
|
||||
# define circle for "cropping"
|
||||
|
||||
try:
|
||||
ht, wd = self.current_image_gray.shape
|
||||
|
||||
radius = int(float(self.lkg_projected_sphere["axes"][0]))
|
||||
|
||||
xc = int(self.lkg_projected_sphere["center"][0])
|
||||
yc = int(self.lkg_projected_sphere["center"][1])
|
||||
|
||||
# draw filled circle in white on black background as mask
|
||||
mask = np.zeros((ht,wd), dtype=np.uint8)
|
||||
mask = cv2.circle(mask, (xc,yc), radius, 255, -1)
|
||||
|
||||
# create white colored background
|
||||
color = np.full_like(self.current_image_gray, (255))
|
||||
|
||||
# apply mask to image
|
||||
masked_img = cv2.bitwise_and(self.current_image_gray, self.current_image_gray, mask=mask)
|
||||
|
||||
# apply inverse mask to colored image
|
||||
masked_color = cv2.bitwise_and(color, color, mask=255-mask)
|
||||
|
||||
# combine the two masked images
|
||||
self.current_image_gray = cv2.add(masked_img, masked_color)
|
||||
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
|
||||
|
||||
# Set up morphological transforms, for smoothing and clearing the image we get out of the
|
||||
# thresholding operation. After this, we'd really like to just have a black blob in the middle
|
||||
# of a bunch of white area.
|
||||
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
|
||||
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
|
||||
closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
|
||||
image = 255 - closing
|
||||
|
||||
# Now that the image is relatively clean, run contour finding in order to get us our pupil
|
||||
# boundaries in the 2D context. Ideally, we just get one border.
|
||||
contours, _ = cv2.findContours(
|
||||
image, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE
|
||||
)
|
||||
|
||||
# Find the convex shape based on each contour, and sort the list of them from smallest to
|
||||
# largest area.
|
||||
convex_hulls = []
|
||||
for i in range(len(contours)):
|
||||
convex_hulls.append(cv2.convexHull(contours[i], False))
|
||||
|
||||
# If we have no convex maidens, we have no pupil, and can't progress from here. Dump back to
|
||||
# using blob tracking.
|
||||
#
|
||||
if len(convex_hulls) == 0:
|
||||
self.blob_tracking_fallback()
|
||||
continue
|
||||
|
||||
# Find our largest hull, which we expect will probably be the ellipse that represents the 2d
|
||||
# area for the pupil, which we can use as the search area for the eye in general.
|
||||
largest_hull = sorted(convex_hulls, key=cv2.contourArea)[-1]
|
||||
|
||||
# However eyes are annoyingly three dimensional, so we need to take this ellipse and turn it
|
||||
# into a curve patch on the surface of a sphere (the eye itself). If it's not a sphere, see your
|
||||
# ophthalmologist about possible issues with astigmatism.
|
||||
try:
|
||||
cx, cy, w, h, theta = fit_rotated_ellipse_ransac(largest_hull.reshape(-1, 2))
|
||||
except:
|
||||
self.blob_tracking_fallback()
|
||||
continue
|
||||
|
||||
# Get axis and angle of the ellipse, using pupil labs 2d algos. The next bit of code ranges
|
||||
# from somewhat to completely magic, as most of it happens in native libraries (hence passing
|
||||
# via dicts).
|
||||
result_2d = {}
|
||||
result_2d_final = {}
|
||||
|
||||
result_2d["center"] = (cx, cy)
|
||||
result_2d["axes"] = (w, h)
|
||||
result_2d["angle"] = theta * 180.0 / np.pi
|
||||
result_2d_final["ellipse"] = result_2d
|
||||
result_2d_final["diameter"] = w
|
||||
result_2d_final["location"] = (cx, cy)
|
||||
result_2d_final["confidence"] = 0.99
|
||||
result_2d_final["timestamp"] = self.current_frame_number / self.current_fps
|
||||
# Black magic happens here, but after this we have our reprojected pupil/eye, and all we had
|
||||
# to do was sell our soul to satan and/or C++.
|
||||
result_3d = detector_3d.update_and_detect(result_2d_final, self.current_image_gray)
|
||||
|
||||
# Now we have our pupil
|
||||
ellipse_3d = result_3d["ellipse"]
|
||||
# And our eyeball that the pupil is on the surface of
|
||||
self.lkg_projected_sphere = result_3d["projected_sphere"]
|
||||
|
||||
|
||||
# Record our pupil center
|
||||
exm = ellipse_3d["center"][0]
|
||||
eym = ellipse_3d["center"][1]
|
||||
|
||||
if self.calibration_frame_counter == 0 or self.recenter_eye:
|
||||
self.calibration_frame_counter = None
|
||||
self.recenter_eye = False
|
||||
self.xoff = exm
|
||||
self.yoff = eym
|
||||
elif self.calibration_frame_counter != None:
|
||||
if exm > self.xmax:
|
||||
self.xmax = exm
|
||||
if exm < self.xmin:
|
||||
self.xmin = exm
|
||||
if eym > self.ymax:
|
||||
self.ymax = eym
|
||||
if eym < self.xmin:
|
||||
self.ymin = eym
|
||||
self.calibration_frame_counter -= 1
|
||||
eye_position_scalar = self.config.vrc_eye_position_scalar
|
||||
|
||||
noisy_point = np.array([cx, cy]) #fliter our values with a One Euro Filter
|
||||
point_hat = self.one_euro_filter(noisy_point)
|
||||
cx = point_hat[0]
|
||||
cy = point_hat[1]
|
||||
|
||||
xl = float(((cx - self.xoff) * eye_position_scalar) / (self.xmax - self.xoff))
|
||||
xr = float(((cx - self.xoff) * eye_position_scalar) / (self.xmin - self.xoff))
|
||||
yu = float(((cy - self.yoff) * eye_position_scalar) / (self.ymax - self.yoff))
|
||||
yd = float(((cy - self.yoff) * eye_position_scalar) / (self.ymin - self.yoff))
|
||||
|
||||
out_x = 0
|
||||
out_y = 0
|
||||
if xr > 0:
|
||||
out_x = max(0.0, min(1.0, xr))
|
||||
if xl > 0:
|
||||
out_x = -abs(max(0.0, min(1.0, xl)))
|
||||
if yd > 0:
|
||||
out_y = -abs(max(0.0, min(1.0, yd)))
|
||||
if yu > 0:
|
||||
out_y = max(0.0, min(1.0, yu))
|
||||
|
||||
#print(cx, cy, out_x, out_y, 'RANSAC 3D')
|
||||
|
||||
output_info = EyeInformation(InformationOrigin.RANSAC, out_x, out_y, False)
|
||||
|
||||
|
||||
# Draw our image and stack it for visual output
|
||||
cv2.drawContours(self.current_image_gray, contours, -1, (255, 0, 0), 1)
|
||||
|
||||
# draw pupil
|
||||
try:
|
||||
cv2.ellipse(
|
||||
self.current_image_gray,
|
||||
tuple(int(v) for v in ellipse_3d["center"]),
|
||||
tuple(int(v) for v in ellipse_3d["axes"]),
|
||||
ellipse_3d["angle"],
|
||||
0,
|
||||
360, # start/end angle for drawing
|
||||
(0, 255, 0), # color (BGR): red
|
||||
)
|
||||
except Exception:
|
||||
# Sometimes we get bogus axes and trying to draw this throws. Ideally we should check for
|
||||
# validity beforehand, but for now just pass. It usually fixes itself on the next frame.
|
||||
pass
|
||||
# draw line from center of eyeball to center of pupil
|
||||
cv2.line(
|
||||
self.current_image_gray,
|
||||
tuple(int(v) for v in self.lkg_projected_sphere["center"]),
|
||||
tuple(int(v) for v in ellipse_3d["center"]),
|
||||
(0, 255, 0), # color (BGR): red
|
||||
)
|
||||
|
||||
# Shove a concatenated image out to the main GUI thread for rendering
|
||||
self.output_images_and_update(thresh, output_info)
|
||||
|
Binary file not shown.
@ -1,26 +0,0 @@
|
||||
import pyttsx3
|
||||
import queue
|
||||
import threading
|
||||
|
||||
|
||||
class SpeechEngine:
|
||||
def __init__(self, queue: "queue.Queue[str | None]"):
|
||||
self.engine = pyttsx3.init()
|
||||
self.queue = queue
|
||||
|
||||
def say(self, item):
|
||||
self.engine.say(item)
|
||||
|
||||
def force_stop(self):
|
||||
self.engine.stop()
|
||||
|
||||
def run(self):
|
||||
while True:
|
||||
print("Waiting for speech item")
|
||||
item = self.queue.get()
|
||||
if item is None:
|
||||
print("Stopping speech engine")
|
||||
self.engine.stop()
|
||||
return
|
||||
self.engine.say(item)
|
||||
self.engine.runAndWait()
|
BIN
EyeTrackApp/Images/logo.ico
Normal file
BIN
EyeTrackApp/Images/logo.ico
Normal file
Binary file not shown.
After Width: | Height: | Size: 174 KiB |
@ -106,7 +106,7 @@ class CameraWidget:
|
||||
drag_submits=True,
|
||||
enable_events=True,
|
||||
),
|
||||
sg.Text("Please set an Eye Cropping.", key=self.gui_roi_message, visible=False),
|
||||
sg.Text("Please set an Eye Cropping.", key=self.gui_roi_message, background_color='#424042', visible=False),
|
||||
],
|
||||
]
|
||||
|
||||
|
@ -96,7 +96,7 @@ def main():
|
||||
eyes[1].start()
|
||||
|
||||
# Create the window
|
||||
window = sg.Window("EyeTrackVR v0.1.1", layout, icon='logo.ico', background_color='#292929')
|
||||
window = sg.Window("EyeTrackVR v0.1.1", layout, icon='Images/logo.ico', background_color='#292929')
|
||||
|
||||
# GUI Render loop
|
||||
while True:
|
||||
|
Loading…
Reference in New Issue
Block a user