mirror of
https://github.com/EyeTrackVR/EyeTrackVR.git
synced 2025-11-04 14:39:42 +08:00
v0.0.10
add blob filter, circular cropping, adjust blob fallback settings
This commit is contained in:
parent
6cbc310dae
commit
5578da7c99
@ -62,7 +62,7 @@ def main():
|
||||
sg.Column(roi_layout, key=ROI_LAYOUT_NAME, visible=False)]]
|
||||
|
||||
# Create the window
|
||||
window = sg.Window('Eye Tracking', layout)
|
||||
window = sg.Window('EyeTrackVR v0.0.10', layout)
|
||||
|
||||
cancellation_event = threading.Event()
|
||||
|
||||
|
||||
@ -145,6 +145,16 @@ class Ransac:
|
||||
self.previous_rotation = self.config.rotation_angle
|
||||
self.recenter_eye = False
|
||||
|
||||
min_cutoff = 0.0004
|
||||
beta = 0.7
|
||||
noisy_point = np.array([1, 1])
|
||||
self.one_euro_filter = OneEuroFilter(
|
||||
noisy_point,
|
||||
min_cutoff=min_cutoff,
|
||||
beta=beta
|
||||
)
|
||||
|
||||
|
||||
|
||||
def output_images_and_update(self, threshold_image, output_information: EyeInformation):
|
||||
if self.config.show_color_image:
|
||||
@ -190,6 +200,37 @@ class Ransac:
|
||||
self.output_images_and_update(larger_threshold, EyeInformation(InformationOrigin.FAILURE, 0, 0, False))
|
||||
return
|
||||
|
||||
|
||||
# define circle for "cropping"
|
||||
try:
|
||||
ht, wd = self.current_image_gray.shape
|
||||
|
||||
radius = int(float(self.lkg_projected_sphere["axes"][0]))
|
||||
|
||||
xc = int(self.lkg_projected_sphere["center"][0])
|
||||
yc = int(self.lkg_projected_sphere["center"][1])
|
||||
|
||||
# draw filled circle in white on black background as mask
|
||||
mask = np.zeros((ht,wd), dtype=np.uint8)
|
||||
mask = cv2.circle(mask, (xc,yc), radius, 255, -1)
|
||||
|
||||
# create white colored background
|
||||
color = np.full_like(self.current_image_gray, (255))
|
||||
|
||||
# apply mask to image
|
||||
masked_img = cv2.bitwise_and(self.current_image_gray, self.current_image_gray, mask=mask)
|
||||
|
||||
# apply inverse mask to colored image
|
||||
masked_color = cv2.bitwise_and(color, color, mask=255-mask)
|
||||
|
||||
# combine the two masked images
|
||||
self.current_image_gray = cv2.add(masked_img, masked_color)
|
||||
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
|
||||
try:
|
||||
# Try rebuilding our contours
|
||||
contours, _ = cv2.findContours(larger_threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
||||
@ -219,6 +260,16 @@ class Ransac:
|
||||
cv2.drawContours(self.current_image_gray, [cnt], -1, (255, 0, 0), 3)
|
||||
cv2.rectangle(self.current_image_gray, (x, y), (x + w, y + h), (255, 0, 0), 2)
|
||||
|
||||
|
||||
|
||||
try:
|
||||
noisy_point = np.array([cx, cy]) #fliter our values with a One Euro Filter
|
||||
point_hat = self.one_euro_filter(noisy_point)
|
||||
cx = point_hat[0]
|
||||
cy = point_hat[1]
|
||||
except:
|
||||
pass
|
||||
|
||||
eye_position_scalar = self.config.vrc_eye_position_scalar
|
||||
|
||||
|
||||
@ -240,7 +291,7 @@ class Ransac:
|
||||
out_y = -abs(max(0.0, min(1.0, yd)))
|
||||
if yu < 0:
|
||||
out_y = max(0.0, min(1.0, yu))
|
||||
|
||||
#print(xt, yt, out_x, out_y, 'BLOB')
|
||||
self.output_images_and_update(larger_threshold, EyeInformation(InformationOrigin.BLOB, out_x, out_y, False))
|
||||
return
|
||||
self.output_images_and_update(larger_threshold, EyeInformation(InformationOrigin.BLOB, 0, 0, True))
|
||||
@ -251,15 +302,7 @@ class Ransac:
|
||||
detector_3d = Detector3D(camera=camera_model, long_term_mode=DetectorMode.blocking)
|
||||
|
||||
|
||||
min_cutoff = 0.0004
|
||||
beta = 0.7
|
||||
noisy_point = np.array([1, 1])
|
||||
|
||||
one_euro_filter = OneEuroFilter(
|
||||
noisy_point,
|
||||
min_cutoff=min_cutoff,
|
||||
beta=beta
|
||||
)
|
||||
while True:
|
||||
# Check to make sure we haven't been requested to close
|
||||
if self.cancellation_event.is_set():
|
||||
@ -295,6 +338,41 @@ class Ransac:
|
||||
_, thresh = cv2.threshold(
|
||||
self.current_image_gray, int(self.config.threshold), 255, cv2.THRESH_BINARY
|
||||
)
|
||||
|
||||
|
||||
|
||||
# define circle for "cropping"
|
||||
|
||||
try:
|
||||
ht, wd = self.current_image_gray.shape
|
||||
|
||||
radius = int(float(self.lkg_projected_sphere["axes"][0]))
|
||||
|
||||
xc = int(self.lkg_projected_sphere["center"][0])
|
||||
yc = int(self.lkg_projected_sphere["center"][1])
|
||||
|
||||
# draw filled circle in white on black background as mask
|
||||
mask = np.zeros((ht,wd), dtype=np.uint8)
|
||||
mask = cv2.circle(mask, (xc,yc), radius, 255, -1)
|
||||
|
||||
# create white colored background
|
||||
color = np.full_like(self.current_image_gray, (255))
|
||||
|
||||
# apply mask to image
|
||||
masked_img = cv2.bitwise_and(self.current_image_gray, self.current_image_gray, mask=mask)
|
||||
|
||||
# apply inverse mask to colored image
|
||||
masked_color = cv2.bitwise_and(color, color, mask=255-mask)
|
||||
|
||||
# combine the two masked images
|
||||
self.current_image_gray = cv2.add(masked_img, masked_color)
|
||||
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
|
||||
|
||||
# Set up morphological transforms, for smoothing and clearing the image we get out of the
|
||||
# thresholding operation. After this, we'd really like to just have a black blob in the middle
|
||||
# of a bunch of white area.
|
||||
@ -381,7 +459,7 @@ class Ransac:
|
||||
eye_position_scalar = self.config.vrc_eye_position_scalar
|
||||
|
||||
noisy_point = np.array([cx, cy]) #fliter our values with a One Euro Filter
|
||||
point_hat = one_euro_filter(noisy_point)
|
||||
point_hat = self.one_euro_filter(noisy_point)
|
||||
cx = point_hat[0]
|
||||
cy = point_hat[1]
|
||||
|
||||
@ -401,7 +479,7 @@ class Ransac:
|
||||
if yu > 0:
|
||||
out_y = max(0.0, min(1.0, yu))
|
||||
|
||||
|
||||
#print(cx, cy, out_x, out_y, 'RANSAC 3D')
|
||||
|
||||
output_info = EyeInformation(InformationOrigin.RANSAC, out_x, out_y, False)
|
||||
|
||||
|
||||
@ -174,9 +174,14 @@ class EyeProcessor:
|
||||
self.calibration_frame_counter
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
min_cutoff = 0.0004
|
||||
beta = 0.7
|
||||
noisy_point = np.array([1, 1])
|
||||
self.one_euro_filter = OneEuroFilter(
|
||||
noisy_point,
|
||||
min_cutoff=min_cutoff,
|
||||
beta=beta
|
||||
)
|
||||
|
||||
|
||||
|
||||
@ -248,12 +253,12 @@ class EyeProcessor:
|
||||
return True
|
||||
|
||||
def blob_tracking_fallback(self):
|
||||
st = time.time()
|
||||
|
||||
# Increase our threshold value slightly, in order to have a better possibility of getting back
|
||||
# something to do blob tracking on.
|
||||
_, larger_threshold = cv2.threshold(
|
||||
self.current_image_gray,
|
||||
int(self.config.threshold + 5),
|
||||
int(self.config.threshold + 25),
|
||||
255,
|
||||
cv2.THRESH_BINARY,
|
||||
)
|
||||
@ -267,6 +272,33 @@ class EyeProcessor:
|
||||
)
|
||||
return
|
||||
|
||||
|
||||
|
||||
# define circle
|
||||
try:
|
||||
ht, wd = self.current_image_gray.shape[:2]
|
||||
radius = int(float(self.lkg_projected_sphere["axes"][0]))
|
||||
xc = yc = radius
|
||||
|
||||
# draw filled circle in white on black background as mask
|
||||
mask = np.zeros((ht,wd), dtype=np.uint8)
|
||||
mask = cv2.circle(mask, (xc,yc), radius, 255, -1)
|
||||
|
||||
# create white colored background
|
||||
color = np.full_like(self.current_image_gray, (255))
|
||||
|
||||
# apply mask to image
|
||||
masked_img = cv2.bitwise_and(self.current_image_gray, self.current_image_gray, mask=mask)
|
||||
|
||||
# apply inverse mask to colored image
|
||||
masked_color = cv2.bitwise_and(color, color, mask=255-mask)
|
||||
|
||||
# combine the two masked images
|
||||
self.current_image_gray = cv2.add(masked_img, masked_color)
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
try:
|
||||
# Try rebuilding our contours
|
||||
contours, _ = cv2.findContours(
|
||||
@ -319,10 +351,15 @@ class EyeProcessor:
|
||||
)
|
||||
|
||||
eye_position_scalar = self.config.vrc_eye_position_scalar
|
||||
# initfilter(cx, cy)
|
||||
# fd = filter_smooth(cx, cy, st)
|
||||
#cx = fd[0]
|
||||
# cy = fd[1]
|
||||
|
||||
|
||||
try:
|
||||
noisy_point = np.array([cx, cy]) #fliter our values with a One Euro Filter
|
||||
point_hat = self.one_euro_filter(noisy_point)
|
||||
cx = point_hat[0]
|
||||
cy = point_hat[1]
|
||||
except:
|
||||
pass
|
||||
|
||||
xl = float(
|
||||
((cx - self.xoff) * eye_position_scalar) / (self.xmax - self.xoff)
|
||||
@ -369,16 +406,11 @@ class EyeProcessor:
|
||||
xf = []
|
||||
yf = []
|
||||
pd = []
|
||||
out_pupil_dialation = 1
|
||||
min_cutoff = 0.0004
|
||||
beta = 0.7
|
||||
noisy_point = np.array([1, 1])
|
||||
|
||||
one_euro_filter = OneEuroFilter(
|
||||
noisy_point,
|
||||
min_cutoff=min_cutoff,
|
||||
beta=beta
|
||||
)
|
||||
out_pupil_dialation = 1
|
||||
|
||||
|
||||
|
||||
while True:
|
||||
# oef = init_filter()
|
||||
|
||||
@ -445,6 +477,43 @@ class EyeProcessor:
|
||||
255,
|
||||
cv2.THRESH_BINARY,
|
||||
)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# define circle for "cropping"
|
||||
|
||||
try:
|
||||
ht, wd = self.current_image_gray.shape
|
||||
|
||||
radius = int(float(self.lkg_projected_sphere["axes"][0]))
|
||||
|
||||
xc = int(self.lkg_projected_sphere["center"][0])
|
||||
yc = int(self.lkg_projected_sphere["center"][1])
|
||||
|
||||
# draw filled circle in white on black background as mask
|
||||
mask = np.zeros((ht,wd), dtype=np.uint8)
|
||||
mask = cv2.circle(mask, (xc,yc), radius, 255, -1)
|
||||
|
||||
# create white colored background
|
||||
color = np.full_like(self.current_image_gray, (255))
|
||||
|
||||
# apply mask to image
|
||||
masked_img = cv2.bitwise_and(self.current_image_gray, self.current_image_gray, mask=mask)
|
||||
|
||||
# apply inverse mask to colored image
|
||||
masked_color = cv2.bitwise_and(color, color, mask=255-mask)
|
||||
|
||||
# combine the two masked images
|
||||
self.current_image_gray = cv2.add(masked_img, masked_color)
|
||||
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
|
||||
|
||||
# Set up morphological transforms, for smoothing and clearing the image we get out of the
|
||||
# thresholding operation. After this, we'd really like to just have a black blob in the middle
|
||||
# of a bunch of white area.
|
||||
@ -520,6 +589,8 @@ class EyeProcessor:
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
if self.calibration_frame_counter == 0 or self.recenter_eye:
|
||||
self.calibration_frame_counter = None
|
||||
self.recenter_eye = False
|
||||
@ -541,7 +612,7 @@ class EyeProcessor:
|
||||
|
||||
|
||||
noisy_point = np.array([cx, cy]) #fliter our values with a One Euro Filter
|
||||
point_hat = one_euro_filter(noisy_point)
|
||||
point_hat = self.one_euro_filter(noisy_point)
|
||||
cx = point_hat[0]
|
||||
cy = point_hat[1]
|
||||
|
||||
@ -553,10 +624,10 @@ class EyeProcessor:
|
||||
((cx - self.xoff) * eye_position_scalar) / (self.xmin - self.xoff)
|
||||
)
|
||||
yu = float(
|
||||
((cy - self.yoff) * eye_position_scalar) / (self.ymax - self.yoff)
|
||||
((cy - self.yoff) * eye_position_scalar) / (self.ymin - self.yoff)
|
||||
)
|
||||
yd = float(
|
||||
((cy - self.yoff) * eye_position_scalar) / (self.ymin - self.yoff)
|
||||
((cy - self.yoff) * eye_position_scalar) / (self.ymax - self.yoff)
|
||||
)
|
||||
|
||||
|
||||
@ -581,9 +652,15 @@ class EyeProcessor:
|
||||
output_info = EyeInformation(InformationOrigin.RANSAC, out_x, out_y, out_pupil_dialation, False)
|
||||
|
||||
# Draw our image and stack it for visual output
|
||||
cv2.drawContours(self.current_image_gray, contours, -1, (255, 0, 0), 1)
|
||||
try:
|
||||
cv2.drawContours(self.current_image_gray, contours, -1, (255, 0, 0), 1)
|
||||
cv2.circle(self.current_image_gray, (int(cx), int(cy)), 2, (0, 0, 255), -1)
|
||||
# draw pupil
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
|
||||
# draw pupil
|
||||
try:
|
||||
cv2.ellipse(
|
||||
self.current_image_gray,
|
||||
@ -598,6 +675,25 @@ class EyeProcessor:
|
||||
# Sometimes we get bogus axes and trying to draw this throws. Ideally we should check for
|
||||
# validity beforehand, but for now just pass. It usually fixes itself on the next frame.
|
||||
pass
|
||||
|
||||
|
||||
try:
|
||||
# print(self.lkg_projected_sphere["angle"], self.lkg_projected_sphere["axes"], self.lkg_projected_sphere["center"])
|
||||
cv2.ellipse(
|
||||
self.current_image_gray,
|
||||
tuple(int(v) for v in self.lkg_projected_sphere["center"]),
|
||||
tuple(int(v) for v in self.lkg_projected_sphere["axes"]),
|
||||
self.lkg_projected_sphere["angle"],
|
||||
0,
|
||||
360, # start/end angle for drawing
|
||||
(0, 255, 0), # color (BGR): red
|
||||
)
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
|
||||
|
||||
# draw line from center of eyeball to center of pupil
|
||||
cv2.line(
|
||||
self.current_image_gray,
|
||||
|
||||
@ -91,7 +91,7 @@ def main():
|
||||
eyes[1].start()
|
||||
|
||||
# Create the window
|
||||
window = sg.Window("Eye Tracking", layout)
|
||||
window = sg.Window("EyeTrackVR v0.0.10", layout)
|
||||
|
||||
# GUI Render loop
|
||||
while True:
|
||||
|
||||
Loading…
Reference in New Issue
Block a user