This commit is contained in:
Prohurtz 2023-03-19 16:46:10 -05:00
parent 7836a5022b
commit 5c0b1b45fd
3 changed files with 9 additions and 18 deletions

View File

@ -115,9 +115,7 @@ class Camera:
if should_push:
self.push_image_to_queue(image, frame_number, fps)
except:
print(
"Capture source problem, assuming camera disconnected, waiting for reconnect."
)
print("\033[93m[INFO] Capture source problem, assuming camera disconnected, waiting for reconnect.\033[0m")
self.camera_status = CameraState.DISCONNECTED
pass
@ -160,8 +158,7 @@ class Camera:
print(ex)
except Exception as ex:
print(ex.__class__)
print(
"Serial capture source problem, assuming camera disconnected, waiting for reconnect.")
print("\033[93m[INFO]Serial capture source problem, assuming camera disconnected, waiting for reconnect.\033[0m")
self.camera_status = CameraState.DISCONNECTED
pass
@ -188,6 +185,6 @@ class Camera:
# some sort of capture event conflict though.
qsize = self.camera_output_outgoing.qsize()
if qsize > 1:
print(f"\033[91mCAPTURE QUEUE BACKPRESSURE OF {qsize}. CHECK FOR CRASH OR TIMING ISSUES IN ALGORITHM.\033[0m")
print(f"\033[91m[WARN] CAPTURE QUEUE BACKPRESSURE OF {qsize}. CHECK FOR CRASH OR TIMING ISSUES IN ALGORITHM.\033[0m")
self.camera_output_outgoing.put((image, frame_number, fps))
self.capture_event.clear()

View File

@ -69,7 +69,7 @@ def u16_3ch_to_u32_1ch(img):
def newdata(frameshape):
print("Initialise data for blinking.")
print("\033[94m[INFO] Initialise data for blinking.\033[0m")
return np.zeros(frameshape, dtype=np.uint32)
@ -99,7 +99,7 @@ class IntensityBasedOpeness:
# Not very clever, but increase the width by 1px to save the maximum value.
frameshape = (frameshape[0], frameshape[1] + 1)
if self.data is None:
print("Load data for blinking: {}".format(self.imgfile))
print(f"\033[92m[INFO] Loaded data for blinking: {self.imgfile}\033[0m")
if os.path.isfile(self.imgfile):
try:
img = cv2.imread(self.imgfile, flags=cv2.IMREAD_UNCHANGED)
@ -116,16 +116,16 @@ class IntensityBasedOpeness:
else:
self.maxval = self.data[0, -1]
except:
print("File read error: {}".format(self.imgfile))
print("[ERROR] File read error: {}".format(self.imgfile))
req_newdata = True
else:
print("File does not exist.")
print("\033[94m[INFO] File does not exist.\033[0m")
req_newdata = True
else:
if self.data.shape != frameshape or not np.array_equal(self.img_roi, self.now_roi):
# If the ROI recorded in the image file differs from the current ROI
#todo: Using the previous and current frame sizes and centre positions from the original, etc., the data can be ported to some extent, but there may be many areas where code changes are required.
print("Frame size changed.")
print("[INFO] \033[94mFrame size changed.\033[0m")
req_newdata = True
if req_newdata:
self.data = newdata(frameshape)

View File

@ -2,7 +2,6 @@
from pythonosc import udp_client
from pythonosc import osc_server
from pythonosc import dispatcher
import sys
from utils.misc_utils import PlaySound,SND_FILENAME,SND_ASYNC
import queue
import threading
@ -35,7 +34,6 @@ def output_osc(eye_x, eye_y, eye_blink, last_blink, self):
for i in range(5):
self.client.send_message("/avatar/parameters/LeftEyeLidExpandedSqueeze", float(self.l_eye_blink))
last_blink = time.time() - last_blink
if self.config.gui_eye_falloff:
if self.r_eye_blink == 0.0: #if both eyes closed and DEF is enables, blink
self.client.send_message("/avatar/parameters/LeftEyeLidExpandedSqueeze", float(self.l_eye_blink))
@ -57,7 +55,6 @@ def output_osc(eye_x, eye_y, eye_blink, last_blink, self):
for i in range(5):
self.client.send_message("/avatar/parameters/LeftEyeLidExpandedSqueeze", float(self.r_eye_blink))
last_blink = time.time() - last_blink
if self.config.gui_eye_falloff:
if self.l_eye_blink == 0.0: #if both eyes closed and DEF is enables, blink
self.client.send_message("/avatar/parameters/LeftEyeLidExpandedSqueeze", float(self.r_eye_blink))
@ -75,7 +72,6 @@ def output_osc(eye_x, eye_y, eye_blink, last_blink, self):
self.client.send_message("/avatar/parameters/EyesY", y)
class VRChatOSC:
# Use a tuple of blink (true, blinking, false, not), x, y for now.
def __init__(self, cancellation_event: threading.Event, msg_queue: queue.Queue[tuple[bool, int, int]], main_config: EyeTrackConfig,):
@ -93,7 +89,6 @@ class VRChatOSC:
self.l_eye_blink = 0.7
def run(self):
start = time.time()
last_blink = time.time()
@ -109,7 +104,6 @@ class VRChatOSC:
output_osc(eye_info.x, eye_info.y, eye_info.blink, last_blink, self)
class VRChatOSCReceiver:
def __init__(self, cancellation_event: threading.Event, main_config: EyeTrackConfig, eyes: []):
self.config = main_config.settings