Add eye position output graph

This commit is contained in:
Kyle Machulis 2022-06-04 16:01:25 -07:00
parent 484057eb91
commit cd37a6c7ef
3 changed files with 23 additions and 12 deletions

View File

@ -45,8 +45,11 @@ class Camera:
raise RuntimeError("Problem while getting frame") raise RuntimeError("Problem while getting frame")
frame_number = self.capture_source.get(cv2.CAP_PROP_POS_FRAMES) frame_number = self.capture_source.get(cv2.CAP_PROP_POS_FRAMES)
fps = self.capture_source.get(cv2.CAP_PROP_FPS) fps = self.capture_source.get(cv2.CAP_PROP_FPS)
if self.camera_output_outgoing.qsize() > 0: if self.camera_output_outgoing.qsize() > 1:
print("CAPTURE QUEUE BACKPRESSURE. CHECK FOR TIMING ISSUES IN ALGORITHM.") print("CAPTURE QUEUE BACKPRESSURE. CHECK FOR TIMING ISSUES IN ALGORITHM.")
while self.camera_output_outgoing.qsize() > 0:
time.sleep(10)
print("BACKPRESSURE CLEAR, CONTINUING")
self.camera_output_outgoing.put((image, frame_number, fps)) self.camera_output_outgoing.put((image, frame_number, fps))
except: except:
print("Capture source problem, assuming camera disconnected, waiting for reconnect.") print("Capture source problem, assuming camera disconnected, waiting for reconnect.")

View File

@ -18,6 +18,7 @@ ROI_SELECTION_NAME = "-GRAPH-"
TRACKING_BUTTON_NAME = "-TRACKINGMODE-" TRACKING_BUTTON_NAME = "-TRACKINGMODE-"
TRACKING_LAYOUT_NAME = "-TRACKINGLAYOUT-" TRACKING_LAYOUT_NAME = "-TRACKINGLAYOUT-"
TRACKING_IMAGE_NAME = "-IMAGE-" TRACKING_IMAGE_NAME = "-IMAGE-"
OUTPUT_GRAPH_NAME = "-OUTPUTGRAPH-"
def main(): def main():
in_roi_mode = False in_roi_mode = False
@ -35,6 +36,7 @@ def main():
[sg.Text("Threshold"), sg.Slider(range=(0, 100), default_value=config.threshold, orientation = 'h', key=THRESHOLD_SLIDER_NAME)], [sg.Text("Threshold"), sg.Slider(range=(0, 100), default_value=config.threshold, orientation = 'h', key=THRESHOLD_SLIDER_NAME)],
[sg.Text("Rotation"), sg.Slider(range=(0, 360), default_value=config.rotation_angle, orientation = 'h', key=ROTATION_SLIDER_NAME)], [sg.Text("Rotation"), sg.Slider(range=(0, 360), default_value=config.rotation_angle, orientation = 'h', key=ROTATION_SLIDER_NAME)],
[sg.Image(filename="", key=TRACKING_IMAGE_NAME)], [sg.Image(filename="", key=TRACKING_IMAGE_NAME)],
[sg.Graph((200,200), (-100, 100), (100, -100), key=OUTPUT_GRAPH_NAME,drag_submits=True, enable_events=True)]
] ]
layout = [[sg.Text("Camera Address"), sg.InputText(config.capture_source, key=CAMERA_ADDR_NAME)], layout = [[sg.Text("Camera Address"), sg.InputText(config.capture_source, key=CAMERA_ADDR_NAME)],
@ -166,9 +168,15 @@ def main():
pass pass
else: else:
try: try:
maybe_image = image_queue.get(block = False) (maybe_image, eye_info) = image_queue.get(block = False)
print(eye_info)
imgbytes = cv2.imencode(".ppm", maybe_image)[1].tobytes() imgbytes = cv2.imencode(".ppm", maybe_image)[1].tobytes()
window[TRACKING_IMAGE_NAME].update(data=imgbytes) window[TRACKING_IMAGE_NAME].update(data=imgbytes)
graph = window[OUTPUT_GRAPH_NAME]
graph.erase()
if eye_info[0] and not eye_info[3]:
graph.draw_circle((eye_info[1] * -100, eye_info[2] * -100), 25, fill_color='black',line_color='white')
except queue.Empty: except queue.Empty:
pass pass

View File

@ -130,9 +130,9 @@ class Ransac:
self.ymin = -69420 self.ymin = -69420
self.previous_rotation = self.config.rotation_angle self.previous_rotation = self.config.rotation_angle
def output_images_and_update(self, threshold_image): def output_images_and_update(self, threshold_image, output_tuple):
image_stack = np.concatenate((self.current_image, cv2.cvtColor(self.current_image_gray, cv2.COLOR_GRAY2BGR), cv2.cvtColor(threshold_image, cv2.COLOR_GRAY2BGR)), axis=1) image_stack = np.concatenate((self.current_image, cv2.cvtColor(self.current_image_gray, cv2.COLOR_GRAY2BGR), cv2.cvtColor(threshold_image, cv2.COLOR_GRAY2BGR)), axis=1)
self.image_queue_outgoing.put(image_stack) self.image_queue_outgoing.put((image_stack, output_tuple))
self.previous_image = self.current_image self.previous_image = self.current_image
self.previous_rotation = self.config.rotation_angle self.previous_rotation = self.config.rotation_angle
@ -167,7 +167,7 @@ class Ransac:
# means we need to have had at least one successful runthrough of the Pupil Labs algorithm in # means we need to have had at least one successful runthrough of the Pupil Labs algorithm in
# order to have a projected sphere. # order to have a projected sphere.
if self.lkg_projected_sphere == None: if self.lkg_projected_sphere == None:
self.output_images_and_update(larger_threshold) self.output_images_and_update(larger_threshold, None)
return return
try: try:
@ -178,7 +178,7 @@ class Ransac:
if len(contours) == 0: if len(contours) == 0:
raise RuntimeError("No contours found for image") raise RuntimeError("No contours found for image")
except: except:
self.output_images_and_update(larger_threshold) self.output_images_and_update(larger_threshold, None)
return return
rows, cols = larger_threshold.shape rows, cols = larger_threshold.shape
@ -199,7 +199,6 @@ class Ransac:
cv2.drawContours(self.current_image_gray, [cnt], -1, (255, 0, 0), 3) cv2.drawContours(self.current_image_gray, [cnt], -1, (255, 0, 0), 3)
cv2.rectangle(self.current_image_gray, (x, y), (x + w, y + h), (255, 0, 0), 2) cv2.rectangle(self.current_image_gray, (x, y), (x + w, y + h), (255, 0, 0), 2)
if xrlb >= 0: if xrlb >= 0:
pass pass
# client.send_message("/avatar/parameters/RightEyeX", -abs(xrl)) # client.send_message("/avatar/parameters/RightEyeX", -abs(xrl))
# client.send_message("/avatar/parameters/LeftEyeX", -abs(xrl)) # client.send_message("/avatar/parameters/LeftEyeX", -abs(xrl))
@ -207,13 +206,13 @@ class Ransac:
pass pass
# client.send_message("/avatar/parameters/RightEyeX", -abs(xrl)) # client.send_message("/avatar/parameters/RightEyeX", -abs(xrl))
# client.send_message("/avatar/parameters/LeftEyeX", -abs(xrl)) # client.send_message("/avatar/parameters/LeftEyeX", -abs(xrl))
self.output_images_and_update(larger_threshold) self.output_images_and_update(larger_threshold, (True, -abs(xrlb) if xrlb >= 0 else abs(xrlb), -abs(xrlb) if eyeyb <= 0 else abs(xrlb), False))
# If we've sent something, just return. # If we've sent something, just return.
return return
# If we haven't returned yet, consider this blinking # If we haven't returned yet, consider this blinking
# client.send_message("/avatar/parameters/LeftEyeLid", float(0)) # client.send_message("/avatar/parameters/LeftEyeLid", float(0))
# client.send_message("/avatar/parameters/RightEyeLid", float(0)) # client.send_message("/avatar/parameters/RightEyeLid", float(0))
self.output_images_and_update(larger_threshold) self.output_images_and_update(larger_threshold, (True, 0, 0, True))
print('[INFO] BLINK Detected.') print('[INFO] BLINK Detected.')
def run(self): def run(self):
@ -333,9 +332,10 @@ class Ransac:
# TODO Reimplement Prohurtz's Center Calibration and Calculations # TODO Reimplement Prohurtz's Center Calibration and Calculations
# Pack our base info to send to VRChat # Pack our base info to send to VRChat
output_tuple = (-abs(xrl) if xrl >= 0 else abs(xrl), output_tuple = (True,
-abs(xrl) if xrl >= 0 else abs(xrl),
-abs(eyey) if eyey >= 0 else abs(eyey), -abs(eyey) if eyey >= 0 else abs(eyey),
0) False)
# print(output_tuple) # print(output_tuple)
@ -366,5 +366,5 @@ class Ransac:
) )
# Shove a concatenated image out to the main GUI thread for rendering # Shove a concatenated image out to the main GUI thread for rendering
self.output_images_and_update(thresh) self.output_images_and_update(thresh, output_tuple)