small update, flow POC

This commit is contained in:
Prohurtz 2022-12-28 10:23:01 -08:00
parent 77b2389a03
commit bda5047752
2 changed files with 132 additions and 245 deletions

View File

@ -197,17 +197,19 @@ class EyeProcessor:
) )
def output_images_and_update(self, threshold_image, output_information: EyeInformation): def output_images_and_update(self, threshold_image, output_information: EyeInformation):
image_stack = np.concatenate( try:
( image_stack = np.concatenate(
cv2.cvtColor(self.current_image_gray, cv2.COLOR_GRAY2BGR), (
cv2.cvtColor(threshold_image, cv2.COLOR_GRAY2BGR), cv2.cvtColor(self.current_image_gray, cv2.COLOR_GRAY2BGR),
), cv2.cvtColor(threshold_image, cv2.COLOR_GRAY2BGR),
axis=1, ),
) axis=1,
self.image_queue_outgoing.put((image_stack, output_information)) )
self.previous_image = self.current_image self.image_queue_outgoing.put((image_stack, output_information))
self.previous_rotation = self.config.rotation_angle self.previous_image = self.current_image
self.previous_rotation = self.config.rotation_angle
except:
print("E")
def capture_crop_rotate_image(self): def capture_crop_rotate_image(self):
# Get our current frame # Get our current frame
@ -249,151 +251,44 @@ class EyeProcessor:
except: except:
pass pass
def HSF(self): def HSRACM(self):
cx, cy, thresh = HSRAC(self)
frame = self.current_image_gray out_x, out_y = cal_osc(self, cx, cy)
if self.now_mode == self.cv_mode[1]: if cx == 0:
self.output_images_and_update(thresh, EyeInformation(InformationOrigin.HSRAC, out_x, out_y, 0, True)) #update app
prev_res_len = len(self.response_list)
# adjustment of radius
if prev_res_len == 1:
# len==1==self.response_list==[self.settings.gui_HSF_radius]
self.cvparam.radius = self.auto_radius_range[0]
elif prev_res_len == 2:
# len==2==self.response_list==[self.settings.gui_HSF_radius, self.auto_radius_range[0]]
self.cvparam.radius = self.auto_radius_range[1]
elif prev_res_len == 3:
# len==3==self.response_list==[self.settings.gui_HSF_radius,self.auto_radius_range[0],self.auto_radius_range[1]]
sort_res = sorted(self.response_list, key=lambda x: x[1])[0]
# Extract the radius with the lowest response value
if sort_res[0] == self.settings.gui_HSF_radius:
# If the default value is best, change self.now_mode to init after setting radius to the default value.
self.cvparam.radius = self.settings.gui_HSF_radius
self.now_mode = self.cv_mode[2] if not self.skip_blink_detect else self.cv_mode[3]
self.response_list = []
elif sort_res[0] == self.auto_radius_range[0]:
self.radius_cand_list = [i for i in range(self.auto_radius_range[0], self.settings.gui_HSF_radius, self.default_step[0])][1:]
# self.default_step is defined separately for xy, but radius is shared by xy, so it may be buggy
# It should be no problem to set it to anything other than self.default_step
self.cvparam.radius = self.radius_cand_list.pop()
else:
self.radius_cand_list = [i for i in range(self.settings.gui_HSF_radius, self.auto_radius_range[1], self.default_step[0])][1:]
# self.default_step is defined separately for xy, but radius is shared by xy, so it may be buggy
# It should be no problem to set it to anything other than self.default_step
self.cvparam.radius = self.radius_cand_list.pop()
else:
# Try the contents of the self.radius_cand_list in order until the self.radius_cand_list runs out
# Better make it a binary search.
if len(self.radius_cand_list) == 0:
sort_res = sorted(self.response_list, key=lambda x: x[1])[0]
self.cvparam.radius = sort_res[0]
self.now_mode = self.cv_mode[2] if not self.skip_blink_detect else self.cv_mode[3]
self.response_list = []
else:
self.cvparam.radius = self.radius_cand_list.pop()
radius, pad, step, hsf = self.cvparam.get_rpsh()
# For measuring processing time of image processing
cv_start_time = timeit.default_timer()
gray_frame = frame
# Calculate the integral image of the frame
int_start_time = timeit.default_timer()
# BORDER_CONSTANT is faster than BORDER_REPLICATE There seems to be almost no negative impact when BORDER_CONSTANT is used.
frame_pad = cv2.copyMakeBorder(gray_frame, pad, pad, pad, pad, cv2.BORDER_CONSTANT)
frame_int = cv2.integral(frame_pad)
# Convolve the feature with the integral image
conv_int_start_time = timeit.default_timer()
xy_step = frameint_get_xy_step(frame_int.shape, step, pad, start_offset=None, end_offset=None)
frame_conv, response, center_xy = conv_int(frame_int, hsf, step, pad, xy_step)
crop_start_time = timeit.default_timer()
# Define the center point and radius
center_x, center_y = center_xy
upper_x = center_x + 25 #TODO make this a setting
lower_x = center_x - 25
upper_y = center_y + 25
lower_y = center_y - 25
# Crop the image using the calculated bounds
cropped_image = gray_frame[lower_y:upper_y, lower_x:upper_x] # y is 50px, x is 45? why?
if self.now_mode == self.cv_mode[0] or self.now_mode == self.cv_mode[1]:
# If mode is first_frame or radius_adjust, record current radius and response
self.response_list.append((radius, response))
elif self.now_mode == self.cv_mode[2]:
# Statistics for blink detection
if len(self.response_list) < self.blink_init_frames:
# Record the average value of cropped_image
self.response_list.append(cv2.mean(cropped_image)[0])
else:
# Calculate self.response_max by computing interquartile range, IQR
# Change self.cv_mode to normal
self.response_list = np.array(self.response_list)
# 25%,75%
# This value may need to be adjusted depending on the environment.
quartile_1, quartile_3 = np.percentile(self.response_list, [25, 75])
iqr = quartile_3 - quartile_1
# response_min = quartile_1 - (iqr * 1.5)
self.response_max = quartile_3 + (iqr * 1.5)
self.now_mode = self.cv_mode[3]
else: else:
if 0 in cropped_image.shape: self.output_images_and_update(thresh, EyeInformation(InformationOrigin.HSRAC, out_x, out_y, 0, self.blinkvalue))
# If shape contains 0, it is not detected well.
print("[WARN] HSF: Something's wrong.")
else:
# If the average value of cropped_image is greater than self.response_max
# (i.e., if the cropimage is whitish
if self.response_max is not None and cv2.mean(cropped_image)[0] > self.response_max:
# blink
cv2.circle(frame, (center_x, center_y), 10, (0, 0, 255), -1)
# If you want to update self.response_max. it may be more cost-effective to rewrite self.response_list in the following way
# https://stackoverflow.com/questions/42771110/fastest-way-to-left-cycle-a-numpy-array-like-pop-push-for-a-queue
def HSFM(self):
cx, cy, frame = HSF(self)
out_x, out_y = cal_osc(self, cx, cy)
if cx == 0:
self.output_images_and_update(frame, EyeInformation(InformationOrigin.HSF, out_x, out_y, 0, True)) #update app
else:
self.output_images_and_update(frame, EyeInformation(InformationOrigin.HSF, out_x, out_y, 0, self.blinkvalue))
def RANSAC3DM(self):
cx, cy, thresh = RANSAC3D(self)
out_x, out_y = cal_osc(self, cx, cy)
if cx == 0:
self.output_images_and_update(thresh, EyeInformation(InformationOrigin.RANSAC, out_x, out_y, 0, True)) #update app
else:
self.output_images_and_update(thresh, EyeInformation(InformationOrigin.RANSAC, out_x, out_y, 0, self.blinkvalue))
def BLOBM(self):
cx, cy, thresh = BLOB(self)
out_x, out_y = cal_osc(self, cx, cy)
if cx == 0:
self.output_images_and_update(thresh, EyeInformation(InformationOrigin.HSRAC, out_x, out_y, 0, True)) #update app
else:
self.output_images_and_update(thresh, EyeInformation(InformationOrigin.HSRAC, out_x, out_y, 0, self.blinkvalue))
out_x, out_y = cal_osc(self, center_x, center_y)
cv2.circle(frame, (center_x, center_y), 10, (0, 0, 255), -1)
# print(center_x, center_y)
try:
if self.settings.gui_BLINK: #tbh this is redundant, the algo already has blink detection built in
self.output_images_and_update(frame, EyeInformation(InformationOrigin.HSF, out_x, out_y, 0, self.blinkvalue))
else:
self.output_images_and_update(frame, EyeInformation(InformationOrigin.HSF, out_x, out_y, 0, False))
self.failed = 0
except:
if self.settings.gui_BLINK: #tbh this is redundant, the algo already has blink detection built in
self.output_images_and_update(frame, EyeInformation(InformationOrigin.HSF, 0, 0, 0, self.blinkvalue))
else:
self.output_images_and_update(frame, EyeInformation(InformationOrigin.HSF, 0, 0, 0, False))
self.failed = self.failed + 1
if self.now_mode != self.cv_mode[0] and self.now_mode != self.cv_mode[1]:
if cropped_image.size < 400:
pass
if self.now_mode == self.cv_mode[0]:
self.now_mode = self.cv_mode[1]
return
#self.output_images_and_update(thresh, EyeInformation(InformationOrigin.FAILURE, 0, 0, 0, False))
# return
#self.output_images_and_update(larger_threshold,EyeInformation(InformationOrigin.HSF, out_x, out_y, 0, False),)
# return
#self.output_images_and_update(larger_threshold, EyeInformation(InformationOrigin.HSF, 0, 0, 0, True))
@ -403,14 +298,14 @@ class EyeProcessor:
if self.failed == 0 and self.firstalgo != None: if self.failed == 0 and self.firstalgo != None:
print('first') print('first')
self.firstalgo() self.firstalgo()
else: else:
self.failed = self.failed + 1 self.failed = self.failed + 1
if self.failed == 1 and self.secondalgo != None: if self.failed == 1 and self.secondalgo != None:
print('2nd') print('2nd') #send the tracking algos previous fail number, in algo if we pass set to 0, if fail, + 1
self.secondalgo() #send the tracking algos previous fail number, in algo if we pass set to 0, if fail, + 1 self.secondalgo()
else: else:
self.failed = self.failed + 1 self.failed = self.failed + 1
@ -427,7 +322,7 @@ class EyeProcessor:
else: else:
self.failed = 0 # we have reached last possible algo and it is disabled, move to first algo self.failed = 0 # we have reached last possible algo and it is disabled, move to first algo
print(self.failed)
@ -438,47 +333,42 @@ class EyeProcessor:
self.thirdalgo = None self.thirdalgo = None
self.fourthalgo = None self.fourthalgo = None
#set algo priorities #set algo priorities
""""
if self.settings.gui_HSF and self.settings.gui_HSFP == 1: #I feel like this is super innefficient though it only runs at startup and no solution is coming to me atm if self.settings.gui_HSF and self.settings.gui_HSFP == 1: #I feel like this is super innefficient though it only runs at startup and no solution is coming to me atm
self.firstalgo = self.HSF self.firstalgo = self.HSFM
elif self.settings.gui_HSF and self.settings.gui_HSFP == 2: elif self.settings.gui_HSF and self.settings.gui_HSFP == 2:
self.secondalgo = self.HSF self.secondalgo = self.HSFM
elif self.settings.gui_HSF and self.settings.gui_HSFP == 3: elif self.settings.gui_HSF and self.settings.gui_HSFP == 3:
self.thirdalgo = self.HSF self.thirdalgo = self.HSFM
elif self.settings.gui_HSF and self.settings.gui_HSFP == 4: elif self.settings.gui_HSF and self.settings.gui_HSFP == 4:
self.fourthalgo = self.HSF self.fourthalgo = self.HSFM
if self.settings.gui_RANSAC3D and self.settings.gui_RANSAC3DP == 1: if self.settings.gui_RANSAC3D and self.settings.gui_RANSAC3DP == 1:
self.firstalgo = self.RANSAC3D self.firstalgo = self.RANSAC3DM
elif self.settings.gui_RANSAC3D and self.settings.gui_RANSAC3DP == 2: elif self.settings.gui_RANSAC3D and self.settings.gui_RANSAC3DP == 2:
self.secondalgo = self.RANSAC3D self.secondalgo = self.RANSAC3DM
elif self.settings.gui_RANSAC3D and self.settings.gui_RANSAC3DP == 3: elif self.settings.gui_RANSAC3D and self.settings.gui_RANSAC3DP == 3:
self.thirdalgo = self.RANSAC3D self.thirdalgo = self.RANSAC3DM
elif self.settings.gui_RANSAC3D and self.settings.gui_RANSAC3DP == 4: elif self.settings.gui_RANSAC3D and self.settings.gui_RANSAC3DP == 4:
self.fourthalgo = self.RANSAC3D self.fourthalgo = self.RANSAC3DM
if self.settings.gui_HSRAC and self.settings.gui_HSRACP == 1: if self.settings.gui_HSRAC and self.settings.gui_HSRACP == 1:
self.firstalgo = self.HSRAC self.firstalgo = self.HSRACM
elif self.settings.gui_HSRAC and self.settings.gui_HSRACP == 2: elif self.settings.gui_HSRAC and self.settings.gui_HSRACP == 2:
self.secondalgo = self.HSRAC self.secondalgo = self.HSRACM
elif self.settings.gui_HSRAC and self.settings.gui_HSRACP == 3: elif self.settings.gui_HSRAC and self.settings.gui_HSRACP == 3:
self.thirdalgo = self.HSRAC self.thirdalgo = self.HSRACM
elif self.settings.gui_HSRAC and self.settings.gui_HSRACP == 4: elif self.settings.gui_HSRAC and self.settings.gui_HSRACP == 4:
self.fourthalgo = self.HSRAC self.fourthalgo = self.HSRACM
if self.settings.gui_BLOB and self.settings.gui_BLOBP == 1: if self.settings.gui_BLOB and self.settings.gui_BLOBP == 1:
self.firstalgo = self.BLOB self.firstalgo = self.BLOBM
elif self.settings.gui_BLOB and self.settings.gui_BLOBP == 2: elif self.settings.gui_BLOB and self.settings.gui_BLOBP == 2:
self.secondalgo = self.BLOB self.secondalgo = self.BLOBM
elif self.settings.gui_BLOB and self.settings.gui_BLOBP == 3: elif self.settings.gui_BLOB and self.settings.gui_BLOBP == 3:
self.thirdalgo = self.BLOB self.thirdalgo = self.BLOBM
elif self.settings.gui_BLOB and self.settings.gui_BLOBP == 4: elif self.settings.gui_BLOB and self.settings.gui_BLOBP == 4:
self.fourthalgo = self.BLOB self.fourthalgo = self.BLOBM
"""
# if self.settings.gui_BLOBP
# if self.settings.gui_HSFP
# if self.settings.gui_RANSAC3DP
f = True f = True
while True: while True:
@ -536,14 +426,14 @@ class EyeProcessor:
self.current_image_gray_clean = self.current_image_gray.copy() #copy this frame to have a clean image for blink algo self.current_image_gray_clean = self.current_image_gray.copy() #copy this frame to have a clean image for blink algo
# print(self.settings.gui_RANSAC3D) # print(self.settings.gui_RANSAC3D)
BLINK(self) # BLINK(self)
cx, cy, thresh = HSRAC(self) # cx, cy, thresh = HSRAC(self)
out_x, out_y = cal_osc(self, cx, cy) # out_x, out_y = cal_osc(self, cx, cy)
if cx == 0: # if cx == 0:
self.output_images_and_update(thresh, EyeInformation(InformationOrigin.HSRAC, out_x, out_y, 0, True)) #update app # self.output_images_and_update(thresh, EyeInformation(InformationOrigin.HSRAC, out_x, out_y, 0, True)) #update app
else: # else:
self.output_images_and_update(thresh, EyeInformation(InformationOrigin.HSRAC, out_x, out_y, 0, self.blinkvalue)) # self.output_images_and_update(thresh, EyeInformation(InformationOrigin.HSRAC, out_x, out_y, 0, self.blinkvalue))
# cx, cy, thresh = RANSAC3D(self) # cx, cy, thresh = RANSAC3D(self)
@ -559,7 +449,7 @@ class EyeProcessor:
#out_x, out_y = cal_osc(self, center_x, center_y) #filter and calibrate #out_x, out_y = cal_osc(self, center_x, center_y) #filter and calibrate
#self.output_images_and_update(frame, EyeInformation(InformationOrigin.HSF, out_x, out_y, 0, False)) #update app #self.output_images_and_update(frame, EyeInformation(InformationOrigin.HSF, out_x, out_y, 0, False)) #update app
# self.ALGOSELECT() #run our algos in priority order set in settings self.ALGOSELECT() #run our algos in priority order set in settings

View File

@ -14,8 +14,8 @@ lru_maxsize_vvs = 16
lru_maxsize_vs = 64 lru_maxsize_vs = 64
# CV param # CV param
default_radius = 15 #default_radius = 15
auto_radius_range = (default_radius - 10, default_radius + 10) # (10,30) #auto_radius_range = (default_radius - 10, default_radius + 10) # (10,30)
blink_init_frames = 60 * 3 # 60fps*3sec,Number of blink statistical frames blink_init_frames = 60 * 3 # 60fps*3sec,Number of blink statistical frames
# step==(x,y) # step==(x,y)
default_step = (5, 5) # bigger the steps,lower the processing time! ofc acc also takes an impact default_step = (5, 5) # bigger the steps,lower the processing time! ofc acc also takes an impact
@ -205,6 +205,7 @@ class CvParameters:
# self.prev_step=step # self.prev_step=step
self._step = step self._step = step
self._hsf = HaarSurroundFeature(radius) self._hsf = HaarSurroundFeature(radius)
def get_rpsh(self): def get_rpsh(self):
return self._radius, self.pad, self._step, self._hsf return self._radius, self.pad, self._step, self._hsf
@ -570,67 +571,63 @@ def HSRAC(self):
radius, pad, step, hsf = self.cvparam.get_rpsh() radius, pad, step, hsf = self.cvparam.get_rpsh()
# For measuring processing time of image processing
cv_start_time = timeit.default_timer()
gray_frame = frame gray_frame = frame
try:
# Calculate the integral image of the frame # BORDER_CONSTANT is faster than BORDER_REPLICATE There seems to be almost no negative impact when BORDER_CONSTANT is used.
int_start_time = timeit.default_timer() frame_pad = cv2.copyMakeBorder(gray_frame, pad, pad, pad, pad, cv2.BORDER_CONSTANT)
# BORDER_CONSTANT is faster than BORDER_REPLICATE There seems to be almost no negative impact when BORDER_CONSTANT is used. frame_int = cv2.integral(frame_pad)
frame_pad = cv2.copyMakeBorder(gray_frame, pad, pad, pad, pad, cv2.BORDER_CONSTANT)
frame_int = cv2.integral(frame_pad) # Convolve the feature with the integral image
conv_int_start_time = timeit.default_timer()
# Convolve the feature with the integral image xy_step = frameint_get_xy_step(frame_int.shape, step, pad, start_offset=None, end_offset=None)
conv_int_start_time = timeit.default_timer() frame_conv, response, center_xy = conv_int(frame_int, hsf, step, pad, xy_step)
xy_step = frameint_get_xy_step(frame_int.shape, step, pad, start_offset=None, end_offset=None)
frame_conv, response, center_xy = conv_int(frame_int, hsf, step, pad, xy_step) crop_start_time = timeit.default_timer()
# Define the center point and radius
crop_start_time = timeit.default_timer() center_x, center_y = center_xy
# Define the center point and radius upper_x = center_x + 25 #TODO make this a setting
center_x, center_y = center_xy lower_x = center_x - 25
upper_x = center_x + 25 #TODO make this a setting upper_y = center_y + 25
lower_x = center_x - 25 lower_y = center_y - 25
upper_y = center_y + 25
lower_y = center_y - 25 # Crop the image using the calculated bounds
cropped_image = gray_frame[lower_y:upper_y, lower_x:upper_x] # y is 50px, x is 45? why?
# Crop the image using the calculated bounds
cropped_image = gray_frame[lower_y:upper_y, lower_x:upper_x] # y is 50px, x is 45? why? if self.now_mode == self.cv_mode[0] or self.now_mode == self.cv_mode[1]:
# If mode is first_frame or radius_adjust, record current radius and response
if self.now_mode == self.cv_mode[0] or self.now_mode == self.cv_mode[1]: self.response_list.append((radius, response))
# If mode is first_frame or radius_adjust, record current radius and response elif self.now_mode == self.cv_mode[2]:
self.response_list.append((radius, response)) # Statistics for blink detection
elif self.now_mode == self.cv_mode[2]: if len(self.response_list) < self.blink_init_frames:
# Statistics for blink detection # Record the average value of cropped_image
if len(self.response_list) < self.blink_init_frames: self.response_list.append(cv2.mean(cropped_image)[0])
# Record the average value of cropped_image else:
self.response_list.append(cv2.mean(cropped_image)[0]) # Calculate self.response_max by computing interquartile range, IQR
# Change self.cv_mode to normal
self.response_list = np.array(self.response_list)
# 25%,75%
# This value may need to be adjusted depending on the environment.
quartile_1, quartile_3 = np.percentile(self.response_list, [25, 75])
iqr = quartile_3 - quartile_1
# response_min = quartile_1 - (iqr * 1.5)
self.response_max = quartile_3 + (iqr * 1.5)
self.now_mode = self.cv_mode[3]
else: else:
# Calculate self.response_max by computing interquartile range, IQR if 0 in cropped_image.shape:
# Change self.cv_mode to normal # If shape contains 0, it is not detected well.
self.response_list = np.array(self.response_list) print("Something's wrong.")
# 25%,75% else:
# This value may need to be adjusted depending on the environment. # If the average value of cropped_image is greater than self.response_max
quartile_1, quartile_3 = np.percentile(self.response_list, [25, 75]) # (i.e., if the cropimage is whitish
iqr = quartile_3 - quartile_1 if self.response_max is not None and cv2.mean(cropped_image)[0] > self.response_max:
# response_min = quartile_1 - (iqr * 1.5) # blink
self.response_max = quartile_3 + (iqr * 1.5)
self.now_mode = self.cv_mode[3] cv2.circle(frame, (center_x, center_y), 10, (0, 0, 255), -1)
else: # If you want to update self.response_max. it may be more cost-effective to rewrite self.response_list in the following way
if 0 in cropped_image.shape: # https://stackoverflow.com/questions/42771110/fastest-way-to-left-cycle-a-numpy-array-like-pop-push-for-a-queue
# If shape contains 0, it is not detected well.
print("Something's wrong.") except:
else: return 0, 0, frame
# If the average value of cropped_image is greater than self.response_max
# (i.e., if the cropimage is whitish
if self.response_max is not None and cv2.mean(cropped_image)[0] > self.response_max:
# blink
cv2.circle(frame, (center_x, center_y), 10, (0, 0, 255), -1)
# If you want to update self.response_max. it may be more cost-effective to rewrite self.response_list in the following way
# https://stackoverflow.com/questions/42771110/fastest-way-to-left-cycle-a-numpy-array-like-pop-push-for-a-queue
#run ransac on the HSF crop\ #run ransac on the HSF crop\
try: try:
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
@ -724,7 +721,7 @@ def HSRAC(self):
cv2.circle(self.current_image_gray, min_loc, 2, (0, 0, 255), cv2.circle(self.current_image_gray, min_loc, 2, (0, 0, 255),
-1) # the point of the darkest area in the image -1) # the point of the darkest area in the image
try: try:
print(radius) # print(radius)
return out_x, out_y, thresh return out_x, out_y, thresh
except: except: