mirror of
https://github.com/EyeTrackVR/EyeTrackVR.git
synced 2025-11-04 14:39:42 +08:00
fix algos, ransac crash fix
This commit is contained in:
parent
25d7417c62
commit
a763098bad
@ -287,7 +287,7 @@ class EyeProcessor:
|
|||||||
|
|
||||||
def DADDYM(self):
|
def DADDYM(self):
|
||||||
# todo: We should have a proper variable for drawing.
|
# todo: We should have a proper variable for drawing.
|
||||||
self.thresh=self.current_image_gray.copy()
|
self.thresh = self.current_image_gray.copy()
|
||||||
self.rawx, self.rawy, self.eyeopen = self.er_daddy.run(self.current_image_gray)
|
self.rawx, self.rawy, self.eyeopen = self.er_daddy.run(self.current_image_gray)
|
||||||
# Daddy also uses a one euro filter, so I'll have to use it twice, but I'm not going to think too much about it.
|
# Daddy also uses a one euro filter, so I'll have to use it twice, but I'm not going to think too much about it.
|
||||||
self.out_x, self.out_y = cal.cal_osc(self, self.rawx, self.rawy)
|
self.out_x, self.out_y = cal.cal_osc(self, self.rawx, self.rawy)
|
||||||
@ -295,8 +295,7 @@ class EyeProcessor:
|
|||||||
|
|
||||||
def HSRACM(self):
|
def HSRACM(self):
|
||||||
# todo: added process to initialise er_hsrac when resolution changes
|
# todo: added process to initialise er_hsrac when resolution changes
|
||||||
self.rawx, self.rawy, self.thresh, gray_frame, self.bd_blink = self.er_hsrac.run(self.current_image_gray)
|
self.rawx, self.rawy, self.thresh, self.current_image_gray, self.bd_blink = self.er_hsrac.run(self.current_image_gray)
|
||||||
self.current_image_gray = gray_frame
|
|
||||||
if self.prev_x is None:
|
if self.prev_x is None:
|
||||||
self.prev_x = self.rawx
|
self.prev_x = self.rawx
|
||||||
self.prev_y = self.rawy
|
self.prev_y = self.rawy
|
||||||
@ -313,13 +312,11 @@ class EyeProcessor:
|
|||||||
def RANSAC3DM(self):
|
def RANSAC3DM(self):
|
||||||
current_image_gray_copy = self.current_image_gray.copy() # Duplicate before overwriting in RANSAC3D.
|
current_image_gray_copy = self.current_image_gray.copy() # Duplicate before overwriting in RANSAC3D.
|
||||||
self.rawx, self.rawy, self.thresh = RANSAC3D(self)
|
self.rawx, self.rawy, self.thresh = RANSAC3D(self)
|
||||||
self.eyeopen = self.ibo.intense(self.rawx, self.rawy, current_image_gray_copy)
|
|
||||||
self.out_x, self.out_y = cal.cal_osc(self, self.rawx, self.rawy)
|
self.out_x, self.out_y = cal.cal_osc(self, self.rawx, self.rawy)
|
||||||
self.current_algorithm = InformationOrigin.RANSAC
|
self.current_algorithm = InformationOrigin.RANSAC
|
||||||
|
|
||||||
def BLOBM(self):
|
def BLOBM(self):
|
||||||
self.rawx, self.rawy, self.thresh = BLOB(self)
|
self.rawx, self.rawy, self.thresh = BLOB(self)
|
||||||
self.eyeopen = self.ibo.intense(self.rawx, self.rawy, self.current_image_gray)
|
|
||||||
self.out_x, self.out_y = cal.cal_osc(self, self.rawx, self.rawy)
|
self.out_x, self.out_y = cal.cal_osc(self, self.rawx, self.rawy)
|
||||||
self.current_algorithm = InformationOrigin.BLOB
|
self.current_algorithm = InformationOrigin.BLOB
|
||||||
|
|
||||||
|
|||||||
@ -168,9 +168,9 @@ class IntensityBasedOpeness:
|
|||||||
avg_color = np.average(avg_color_per_row, axis=0)
|
avg_color = np.average(avg_color_per_row, axis=0)
|
||||||
ar, ag, ab = avg_color
|
ar, ag, ab = avg_color
|
||||||
intensity = ar
|
intensity = ar
|
||||||
cv2.imshow("IBO", frame_crop)
|
#cv2.imshow("IBO", frame_crop)
|
||||||
if cv2.waitKey(1) & 0xFF == ord("q"):
|
#if cv2.waitKey(1) & 0xFF == ord("q"):
|
||||||
pass
|
# pass
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -95,6 +95,7 @@ def output_osc(eye_x, eye_y, eye_blink, last_blink, self):
|
|||||||
if self.eye_id in [EyeId.LEFT] and not se: # left eye, send data to left
|
if self.eye_id in [EyeId.LEFT] and not se: # left eye, send data to left
|
||||||
self.l_eye_x = eye_x
|
self.l_eye_x = eye_x
|
||||||
self.l_eye_blink = eye_blink
|
self.l_eye_blink = eye_blink
|
||||||
|
self.left_y = eye_y
|
||||||
|
|
||||||
if self.l_eye_blink == 0.0:
|
if self.l_eye_blink == 0.0:
|
||||||
if last_blink > 0.7: # when binary blink is on, blinks may be too fast for OSC so we repeat them.
|
if last_blink > 0.7: # when binary blink is on, blinks may be too fast for OSC so we repeat them.
|
||||||
@ -108,13 +109,14 @@ def output_osc(eye_x, eye_y, eye_blink, last_blink, self):
|
|||||||
float(1 - eye_blink))
|
float(1 - eye_blink))
|
||||||
self.l_eye_x = self.r_eye_x
|
self.l_eye_x = self.r_eye_x
|
||||||
|
|
||||||
self.left_y = eye_y
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
elif self.eye_id in [EyeId.RIGHT] and not se: # Right eye, send data to right
|
elif self.eye_id in [EyeId.RIGHT] and not se: # Right eye, send data to right
|
||||||
self.r_eye_x = eye_x
|
self.r_eye_x = eye_x
|
||||||
self.r_eye_blink = eye_blink
|
self.r_eye_blink = eye_blink
|
||||||
|
self.right_y = eye_y
|
||||||
|
|
||||||
if self.r_eye_blink == 0.0:
|
if self.r_eye_blink == 0.0:
|
||||||
if last_blink > 0.7: # when binary blink is on, blinks may be too fast for OSC so we repeat them.
|
if last_blink > 0.7: # when binary blink is on, blinks may be too fast for OSC so we repeat them.
|
||||||
@ -129,8 +131,6 @@ def output_osc(eye_x, eye_y, eye_blink, last_blink, self):
|
|||||||
|
|
||||||
self.r_eye_x = self.l_eye_x
|
self.r_eye_x = self.l_eye_x
|
||||||
|
|
||||||
self.right_y = eye_y
|
|
||||||
|
|
||||||
|
|
||||||
if self.main_config.eye_display_id in [EyeId.BOTH] and self.r_eye_blink != 621 and self.r_eye_blink != 621:
|
if self.main_config.eye_display_id in [EyeId.BOTH] and self.r_eye_blink != 621 and self.r_eye_blink != 621:
|
||||||
if self.r_eye_blink == 0.0 or self.l_eye_blink == 0.0:
|
if self.r_eye_blink == 0.0 or self.l_eye_blink == 0.0:
|
||||||
@ -138,6 +138,7 @@ def output_osc(eye_x, eye_y, eye_blink, last_blink, self):
|
|||||||
for i in range(5):
|
for i in range(5):
|
||||||
self.client.send_message("/tracking/eye/EyesClosedAmount",
|
self.client.send_message("/tracking/eye/EyesClosedAmount",
|
||||||
float(1))
|
float(1))
|
||||||
|
last_blink = time.time() - last_blink
|
||||||
eye_blink = (self.r_eye_blink + self.l_eye_blink) / 2
|
eye_blink = (self.r_eye_blink + self.l_eye_blink) / 2
|
||||||
self.client.send_message("/tracking/eye/EyesClosedAmount",
|
self.client.send_message("/tracking/eye/EyesClosedAmount",
|
||||||
float(1 - eye_blink))
|
float(1 - eye_blink))
|
||||||
@ -147,6 +148,7 @@ def output_osc(eye_x, eye_y, eye_blink, last_blink, self):
|
|||||||
eye_y = (self.right_y + self.left_y) / 2
|
eye_y = (self.right_y + self.left_y) / 2
|
||||||
|
|
||||||
if not se:
|
if not se:
|
||||||
|
|
||||||
self.client.send_message("/tracking/eye/LeftRightVec",
|
self.client.send_message("/tracking/eye/LeftRightVec",
|
||||||
[float(self.l_eye_x), float(eye_y), 0.8, float(self.r_eye_x), float(eye_y),
|
[float(self.l_eye_x), float(eye_y), 0.8, float(self.r_eye_x), float(eye_y),
|
||||||
0.8]) # vrc native ET (z values may need tweaking, they act like a scalar)
|
0.8]) # vrc native ET (z values may need tweaking, they act like a scalar)
|
||||||
|
|||||||
@ -121,7 +121,10 @@ def fit_rotated_ellipse(data, P):
|
|||||||
# I just want to clear things up around here.
|
# I just want to clear things up around here.
|
||||||
cu = a * cx ** 2 + b * cx * cy + c * cy ** 2 - f
|
cu = a * cx ** 2 + b * cx * cy + c * cy ** 2 - f
|
||||||
cu_r = np.array([(a * tc2 + b_tcs + c * ts2), (a * ts2 - b_tcs + c * tc2)])
|
cu_r = np.array([(a * tc2 + b_tcs + c * ts2), (a * ts2 - b_tcs + c * tc2)])
|
||||||
|
if cu > 1: #negatives can get thrown which cause errors, just ignore them
|
||||||
wh = np.sqrt(cu / cu_r)
|
wh = np.sqrt(cu / cu_r)
|
||||||
|
else:
|
||||||
|
pass
|
||||||
|
|
||||||
w, h = wh[0], wh[1]
|
w, h = wh[0], wh[1]
|
||||||
|
|
||||||
@ -177,7 +180,7 @@ def RANSAC3D(self):
|
|||||||
frame = self.current_image_gray
|
frame = self.current_image_gray
|
||||||
# For measuring processing time of image processing
|
# For measuring processing time of image processing
|
||||||
# Crop first to reduce the amount of data to process.
|
# Crop first to reduce the amount of data to process.
|
||||||
frame = frame[0:len(frame) - 5, :]
|
# frame = frame[0:len(frame) - 5, :]
|
||||||
# To reduce the processing data, first convert to 1-channel and then blur.
|
# To reduce the processing data, first convert to 1-channel and then blur.
|
||||||
# The processing results were the same when I swapped the order of blurring and 1-channelization.
|
# The processing results were the same when I swapped the order of blurring and 1-channelization.
|
||||||
frame_gray = cv2.GaussianBlur(frame, (5, 5), 0)
|
frame_gray = cv2.GaussianBlur(frame, (5, 5), 0)
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user