mirror of
https://github.com/EyeTrackVR/EyeTrackVR.git
synced 2025-11-04 14:39:42 +08:00
Start building new RANSAC App with multiple modules and unified GUI
Divide out utilities from main algorithm, set utilities on their own threads. Reference binaries in original directory so we don't have to duplicate them in the repo.
This commit is contained in:
parent
7f9927823a
commit
71abcd4a06
22
RANSACApp/config.py
Normal file
22
RANSACApp/config.py
Normal file
@ -0,0 +1,22 @@
|
||||
import json
|
||||
import os.path
|
||||
|
||||
class RansacConfig:
|
||||
def __init__(self):
|
||||
self.threshhold = 0
|
||||
self.rotation_angle = 0
|
||||
self.roi_window_x = 0
|
||||
self.roi_window_y = 0
|
||||
self.roi_window_w = 640
|
||||
self.roi_window_h = 480
|
||||
|
||||
def load(self):
|
||||
if not os.path.exists("ransac_settings.json"):
|
||||
print("No settings file, using base settings")
|
||||
return
|
||||
with open("ransac_settings.json", 'r') as settings_file:
|
||||
json.load(settings_file)
|
||||
|
||||
def save(self):
|
||||
with open("ransac_settings.json", 'w+') as settings_file:
|
||||
json.dump(self.__dict__, settings_file)
|
||||
73
RANSACApp/main.py
Normal file
73
RANSACApp/main.py
Normal file
@ -0,0 +1,73 @@
|
||||
from osc import VRChatOSC
|
||||
from config import RansacConfig
|
||||
from speech_engine import SpeechEngine
|
||||
from ransac import Ransac
|
||||
import queue
|
||||
import threading
|
||||
import cv2
|
||||
|
||||
WINDOW_NAME = "RANSACApp"
|
||||
|
||||
def main():
|
||||
# Get Configuration
|
||||
config = RansacConfig()
|
||||
config.load()
|
||||
|
||||
# Set up basic cv2 window with our GUI
|
||||
def update_threshold(val: "int"):
|
||||
config.threshhold = val
|
||||
|
||||
def update_rot(val: "int"):
|
||||
config.rotation_angle = val
|
||||
|
||||
cv2.namedWindow(WINDOW_NAME)
|
||||
cv2.createTrackbar("Threshold", WINDOW_NAME, 0, 100, update_threshold)
|
||||
cv2.createTrackbar("Rotation", WINDOW_NAME, 0, 360, update_rot)
|
||||
|
||||
# Spawn worker threads
|
||||
osc_queue: "queue.Queue[tuple[bool, int, int] | None]" = queue.Queue()
|
||||
osc = VRChatOSC(osc_queue)
|
||||
osc_thread = threading.Thread(target=osc.run)
|
||||
osc_thread.start()
|
||||
|
||||
# t2s_queue: "queue.Queue[str | None]" = queue.Queue()
|
||||
# t2s_engine = SpeechEngine(t2s_queue)
|
||||
# t2s_thread = threading.Thread(target=t2s_engine.run)
|
||||
# t2s_thread.start()
|
||||
# t2s_queue.put("App Starting")
|
||||
|
||||
ransac_queue = queue.Queue()
|
||||
image_queue = queue.Queue()
|
||||
ransac = Ransac(config, ransac_queue, image_queue)
|
||||
ransac_thread = threading.Thread(target=ransac.run)
|
||||
ransac_thread.start()
|
||||
|
||||
|
||||
# GUI Render loop
|
||||
|
||||
while True:
|
||||
# If we're in ROI mode, show current video and allow markup.
|
||||
|
||||
# If we're in tracking mode, bring up the tracking thread, let it do all of its work, then
|
||||
# update ourselves whenever it pushes out an image into its buffer.
|
||||
try:
|
||||
maybe_image = image_queue.get(block = False)
|
||||
cv2.imshow(WINDOW_NAME, maybe_image)
|
||||
except queue.Empty:
|
||||
pass
|
||||
|
||||
# If we're in either mode and someone hits q, quit immediately
|
||||
if cv2.waitKey(10) & 0xFF == ord("q"):
|
||||
cv2.destroyAllWindows()
|
||||
osc_queue.put(None)
|
||||
osc_thread.join()
|
||||
ransac_queue.put(None)
|
||||
ransac_thread.join()
|
||||
# t2s_engine.force_stop()
|
||||
# t2s_queue.put(None)
|
||||
# t2s_thread.join()
|
||||
print("Exiting RANSAC App")
|
||||
return
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
36
RANSACApp/osc.py
Normal file
36
RANSACApp/osc.py
Normal file
@ -0,0 +1,36 @@
|
||||
from pythonosc import udp_client
|
||||
import queue
|
||||
|
||||
class VRChatOSC:
|
||||
# VRChat OSC Networking Info. For now, we'll assume it's always local.
|
||||
OSC_IP="127.0.0.1"
|
||||
OSC_PORT=9000 #VR Chat OSC port
|
||||
|
||||
# Use a tuple of blink (true, blinking, false, not), x, y for now. Probably clearer as a class but
|
||||
# we're stuck in python 3.6 so still no dataclasses. God I hate python.
|
||||
def __init__(self, msg_queue: "queue.Queue[tuple[bool, int, int] | None]"):
|
||||
self.client = udp_client.SimpleUDPClient(VRChatOSC.OSC_IP, VRChatOSC.OSC_PORT)
|
||||
self.msg_queue = msg_queue
|
||||
|
||||
def run(self):
|
||||
# Set blinking status to true when we start, just so we make sure we get to an eyelid open state
|
||||
# no matter what.
|
||||
was_blinking = True
|
||||
while True:
|
||||
item = self.msg_queue.get()
|
||||
if item is None:
|
||||
print("Exiting OSC Queue")
|
||||
return
|
||||
# If we're not blinking, set position
|
||||
if not item[0]:
|
||||
self.client.send_message("/avatar/parameters/RightEyeX", item[1])
|
||||
self.client.send_message("/avatar/parameters/LeftEyeX", item[1])
|
||||
self.client.send_message("/avatar/parameters/EyesY", item[2])
|
||||
if was_blinking:
|
||||
self.client.send_message("/avatar/parameters/LeftEyeLid", float(1))
|
||||
self.client.send_message("/avatar/parameters/RightEyeLid", float(1))
|
||||
was_blinking = False
|
||||
else:
|
||||
self.client.send_message("/avatar/parameters/LeftEyeLid", float(0))
|
||||
self.client.send_message("/avatar/parameters/RightEyeLid", float(0))
|
||||
was_blinking = True
|
||||
161
RANSACApp/ransac.py
Normal file
161
RANSACApp/ransac.py
Normal file
@ -0,0 +1,161 @@
|
||||
import sys
|
||||
sys.path.append("../RANSAC3d")
|
||||
from config import RansacConfig
|
||||
from pye3dcustom.detector_3d import CameraModel, Detector3D, DetectorMode
|
||||
import queue
|
||||
import numpy as np
|
||||
import cv2
|
||||
|
||||
def fit_rotated_ellipse_ransac(
|
||||
data, iter=80, sample_num=10, offset=80 # 80.0, 10, 80
|
||||
): # before changing these values, please read up on the ransac algorithm
|
||||
# However if you want to change any value just know that higher iterations will make processing frames slower
|
||||
count_max = 0
|
||||
effective_sample = None
|
||||
|
||||
for i in range(iter):
|
||||
sample = np.random.choice(len(data), sample_num, replace=False)
|
||||
|
||||
xs = data[sample][:, 0].reshape(-1, 1)
|
||||
ys = data[sample][:, 1].reshape(-1, 1)
|
||||
|
||||
J = np.mat(
|
||||
np.hstack((xs * ys, ys**2, xs, ys, np.ones_like(xs, dtype=np.float)))
|
||||
)
|
||||
Y = np.mat(-1 * xs**2)
|
||||
P = (J.T * J).I * J.T * Y
|
||||
|
||||
# fitter a*x**2 + b*x*y + c*y**2 + d*x + e*y + f = 0
|
||||
a = 1.0
|
||||
b = P[0, 0]
|
||||
c = P[1, 0]
|
||||
d = P[2, 0]
|
||||
e = P[3, 0]
|
||||
f = P[4, 0]
|
||||
ellipse_model = (
|
||||
lambda x, y: a * x**2 + b * x * y + c * y**2 + d * x + e * y + f
|
||||
)
|
||||
|
||||
# thresh
|
||||
ran_sample = np.array(
|
||||
[[x, y] for (x, y) in data if np.abs(ellipse_model(x, y)) < offset]
|
||||
)
|
||||
|
||||
if len(ran_sample) > count_max:
|
||||
count_max = len(ran_sample)
|
||||
effective_sample = ran_sample
|
||||
|
||||
return fit_rotated_ellipse(effective_sample)
|
||||
|
||||
|
||||
def fit_rotated_ellipse(data):
|
||||
|
||||
xs = data[:, 0].reshape(-1, 1)
|
||||
ys = data[:, 1].reshape(-1, 1)
|
||||
|
||||
J = np.mat(np.hstack((xs * ys, ys**2, xs, ys, np.ones_like(xs, dtype=np.float))))
|
||||
Y = np.mat(-1 * xs**2)
|
||||
P = (J.T * J).I * J.T * Y
|
||||
|
||||
a = 1.0
|
||||
b = P[0, 0]
|
||||
c = P[1, 0]
|
||||
d = P[2, 0]
|
||||
e = P[3, 0]
|
||||
f = P[4, 0]
|
||||
theta = 0.5 * np.arctan(b / (a - c))
|
||||
|
||||
cx = (2 * c * d - b * e) / (b**2 - 4 * a * c)
|
||||
cy = (2 * a * e - b * d) / (b**2 - 4 * a * c)
|
||||
|
||||
cu = a * cx**2 + b * cx * cy + c * cy**2 - f
|
||||
w = np.sqrt(
|
||||
cu
|
||||
/ (
|
||||
a * np.cos(theta) ** 2
|
||||
+ b * np.cos(theta) * np.sin(theta)
|
||||
+ c * np.sin(theta) ** 2
|
||||
)
|
||||
)
|
||||
h = np.sqrt(
|
||||
cu
|
||||
/ (
|
||||
a * np.sin(theta) ** 2
|
||||
- b * np.cos(theta) * np.sin(theta)
|
||||
+ c * np.cos(theta) ** 2
|
||||
)
|
||||
)
|
||||
|
||||
ellipse_model = lambda x, y: a * x**2 + b * x * y + c * y**2 + d * x + e * y + f
|
||||
|
||||
error_sum = np.sum([ellipse_model(x, y) for x, y in data])
|
||||
|
||||
return (cx, cy, w, h, theta)
|
||||
|
||||
class Ransac:
|
||||
def __init__(self, config: "RansacConfig", msg_queue: "queue.Queue[None]", img_queue):
|
||||
self.config = config
|
||||
self.img_queue = img_queue
|
||||
self.msg_queue = msg_queue
|
||||
|
||||
self.roicheck = 1
|
||||
|
||||
self.xoff = 1
|
||||
self.yoff = 1
|
||||
self.eyeoffset = 300 # Keep large in order to recenter correctly
|
||||
self.eyeoffx = 1
|
||||
self.setoff = 1
|
||||
self.x = config.roi_window_x
|
||||
self.y = config.roi_window_y
|
||||
self.w = config.roi_window_w
|
||||
self.h = config.roi_window_h
|
||||
|
||||
self.xmax = 69420
|
||||
self.xmin = -69420
|
||||
self.ymax = 69420
|
||||
self.ymin = -69420
|
||||
|
||||
|
||||
def run(self):
|
||||
cap = cv2.VideoCapture(2) # change this to the video you want to test
|
||||
# Get an initial image to get our settings for this run
|
||||
ret, img = cap.read()
|
||||
frame_number = cap.get(cv2.CAP_PROP_POS_FRAMES)
|
||||
fps = cap.get(cv2.CAP_PROP_FPS)
|
||||
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
|
||||
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
|
||||
#print(cv2.selectROI("image", img, fromCenter=False, showCrosshair=True))
|
||||
|
||||
# TODO Read focal length from config
|
||||
camera = CameraModel(focal_length=60, resolution=[self.w, self.h])
|
||||
detector_3d = Detector3D(camera=camera, long_term_mode=DetectorMode.blocking)
|
||||
while cap.isOpened():
|
||||
try:
|
||||
self.msg_queue.get(block=False)
|
||||
print("Exiting RANSAC thread")
|
||||
return
|
||||
except queue.Empty:
|
||||
pass
|
||||
|
||||
result_2d = {}
|
||||
result_2d_final = {}
|
||||
|
||||
# Get our current frame
|
||||
try:
|
||||
ret, img = cap.read()
|
||||
img = img[int(self.y): int(self.y+self.h), int(self.x): int(float(self.x+self.w))]
|
||||
except:
|
||||
img = imgo[int(self.y): int(self.y+self.h), int(self.x): int(float(self.x+self.w))]
|
||||
print('[SEVERE WARN] Frame Issue Detected.')
|
||||
|
||||
frame_number = cap.get(cv2.CAP_PROP_POS_FRAMES)
|
||||
fps = cap.get(cv2.CAP_PROP_FPS)
|
||||
|
||||
if not ret:
|
||||
print("Error fetching frame, bailing")
|
||||
return
|
||||
# image_stack = np.concatenate((img, cv2.cvtColor(image_gray, cv2.COLOR_GRAY2BGR), cv2.cvtColor(thresh, cv2.COLOR_GRAY2BGR), cv2.cvtColor(backupthresh, cv2.COLOR_GRAY2BGR)), axis=1)
|
||||
image_stack = img
|
||||
self.img_queue.put(image_stack)
|
||||
# Initial image will be huge, resize by half.
|
||||
|
||||
25
RANSACApp/speech_engine.py
Normal file
25
RANSACApp/speech_engine.py
Normal file
@ -0,0 +1,25 @@
|
||||
import pyttsx3
|
||||
import queue
|
||||
import threading
|
||||
|
||||
class SpeechEngine:
|
||||
def __init__(self, queue: "queue.Queue[str | None]"):
|
||||
self.engine = pyttsx3.init()
|
||||
self.queue = queue
|
||||
|
||||
def say(self, item):
|
||||
self.engine.say(item)
|
||||
|
||||
def force_stop(self):
|
||||
self.engine.stop()
|
||||
|
||||
def run(self):
|
||||
while True:
|
||||
print("Waiting for speech item")
|
||||
item = self.queue.get()
|
||||
if item is None:
|
||||
print("Stopping speech engine")
|
||||
self.engine.stop()
|
||||
return
|
||||
self.engine.say(item)
|
||||
self.engine.runAndWait()
|
||||
Loading…
Reference in New Issue
Block a user