Merge pull request #72 from PallasNeko/feature/HSRAC-imp

HSF and HASRAC improvements
This commit is contained in:
Prohurtz 2023-03-16 10:00:55 -05:00 committed by GitHub
commit 0527bddc1b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 1793 additions and 412 deletions

4
.gitignore vendored
View File

@ -3,9 +3,11 @@
**/__pycache__/**
**/*.pyc
*.cfg
*.log
EyeTrackApp/eyetrack_settings.json
EyeTrackApp/eyetrack_settings.backup
EyeTrackApp/build
EyeTrackApp/dist
EyeTrackApp/IBO_LEFT.png
EyeTrackApp/IBO_RIGHT.png
EyeTrackApp/eyetrack_settings.backup

File diff suppressed because it is too large Load Diff

View File

@ -1 +0,0 @@
{"version": 1, "right_eye": {"rotation_angle": 168, "roi_window_x": 225, "roi_window_y": 161, "roi_window_w": 101, "roi_window_h": 68, "focal_length": 30, "capture_source": "http://192.168.1.43:4747/video", "gui_circular_crop": false}, "left_eye": {"rotation_angle": 37, "roi_window_x": 146, "roi_window_y": 108, "roi_window_w": 115, "roi_window_h": 110, "focal_length": 30, "capture_source": "http://192.168.0.62", "gui_circular_crop": false}, "settings": {"gui_flip_x_axis_left": false, "gui_flip_x_axis_right": false, "gui_flip_y_axis": false, "gui_RANSAC3D": true, "gui_HSF": true, "gui_BLOB": false, "gui_BLINK": true, "gui_HSRAC": true, "gui_HSF_radius": 15, "gui_min_cutoff": "0.0004", "gui_speed_coefficient": "0.9", "gui_osc_address": "127.0.0.1", "gui_osc_port": 9000, "gui_osc_receiver_port": 9001, "gui_osc_recenter_address": "/avatar/parameters/etvr_recenter", "gui_osc_recalibrate_address": "/avatar/parameters/etvr_recalibrate", "gui_blob_maxsize": 25.0, "gui_blob_minsize": 10.0, "gui_recenter_eyes": false, "gui_eye_falloff": false, "tracker_single_eye": 2, "gui_blink_sync": false, "gui_threshold": 65, "gui_HSRACP": 1, "gui_RANSAC3DP": 2, "gui_HSFP": 3, "gui_BLOBP": 4, "gui_skip_autoradius": true, "gui_thresh_add": 20, "gui_update_check": true}, "eye_display_id": 0}

View File

@ -13,7 +13,7 @@ from utils.img_utils import safe_crop
video_path = "ezgif.com-gif-maker.avi"
imshow_enable = False
calc_print_enable = True
calc_print_enable = False
save_video = False
skip_autoradius = False
skip_blink_detect = False
@ -21,6 +21,7 @@ skip_blink_detect = False
# cache param
lru_maxsize_vvs = 16
lru_maxsize_vs = 64
lru_maxsize_s = 128
# CV param
default_radius = 20
auto_radius_range = (default_radius - 10, default_radius + 10) # (10,30)
@ -91,8 +92,8 @@ class HaarSurroundFeature:
val_inner = val[0]
val_outer = val[1]
self.val_in = np.array(val_inner, dtype=np.float64)
self.val_out = np.array(val_outer, dtype=np.float64)
self.val_in = float(val_inner) # np.array(val_inner, dtype=np.float64)
self.val_out = float(val_outer) # np.array(val_outer, dtype=np.float64)
self.r_in = r_inner
self.r_out = r_outer
@ -115,135 +116,101 @@ def to_gray(frame):
return cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
@lru_cache(maxsize=lru_maxsize_vs)
def frameint_get_xy_step(imageshape, xysteps, pad, start_offset=None, end_offset=None):
"""
:param imageshape: (height(row),width(col)). row==y,cal==x
:param xysteps: (x,y)
:param pad: int
:param start_offset: (x,y) or None
:param end_offset: (x,y) or None
:return: xy_np:tuple(x,y)
"""
row, col = imageshape
row -= 1
col -= 1
x_step, y_step = xysteps
# This is not beautiful.
start_pad_x = start_pad_y = end_pad_x = end_pad_y = pad
if start_offset is not None:
start_pad_x += start_offset[0]
start_pad_y += start_offset[1]
if end_offset is not None:
end_pad_x += end_offset[0]
end_pad_y += end_offset[1]
y_np = np.arange(start_pad_y, row - end_pad_y, y_step)
x_np = np.arange(start_pad_x, col - end_pad_x, x_step)
xy_np = (x_np, y_np)
return xy_np
@lru_cache(maxsize=lru_maxsize_vvs)
def get_hsf_empty_array(len_syx, frameint_x, frame_int_dtype, fcshape):
# Function to reduce array allocation by providing an empty array first and recycling it with lru
def get_frameint_empty_array(frame_shape, pad, x_step, y_step, r_in, r_out):
frame_int_dtype = np.intc
frame_pad = np.empty((frame_shape[0] + (pad * 2), frame_shape[1] + (pad * 2)), dtype=np.uint8)
row, col = frame_pad.shape
frame_int = np.empty((row + 1, col + 1), dtype=frame_int_dtype)
y_steps_arr = np.arange(pad, row - pad, y_step, dtype=np.int16)
x_steps_arr = np.arange(pad, col - pad, x_step, dtype=np.int16)
len_sx, len_sy = len(x_steps_arr), len(y_steps_arr)
len_syx = (len_sy, len_sx)
y_end = pad + (y_step * (len_sy - 1))
x_end = pad + (x_step * (len_sx - 1))
y_rin_m = slice(pad - r_in, y_end - r_in + 1, y_step)
y_rin_p = slice(pad + r_in, y_end + r_in + 1, y_step)
x_rin_m = slice(pad - r_in, x_end - r_in + 1, x_step)
x_rin_p = slice(pad + r_in, x_end + r_in + 1, x_step)
in_p00 = frame_int[y_rin_m, x_rin_m]
in_p11 = frame_int[y_rin_p, x_rin_p]
in_p01 = frame_int[y_rin_m, x_rin_p]
in_p10 = frame_int[y_rin_p, x_rin_m]
y_ro_m = np.maximum(y_steps_arr - r_out, 0) # [:,np.newaxis]
x_ro_m = np.maximum(x_steps_arr - r_out, 0) # [np.newaxis,:]
y_ro_p = np.minimum(row, y_steps_arr + r_out) # [:,np.newaxis]
x_ro_p = np.minimum(col, x_steps_arr + r_out) # [np.newaxis,:]
inner_sum = np.empty(len_syx, dtype=frame_int_dtype)
outer_sum = np.empty(len_syx, dtype=frame_int_dtype)
p_temp = np.empty((len_syx[0], frameint_x), dtype=frame_int_dtype)
p00 = np.empty(len_syx, dtype=frame_int_dtype)
p11 = np.empty(len_syx, dtype=frame_int_dtype)
p01 = np.empty(len_syx, dtype=frame_int_dtype)
p10 = np.empty(len_syx, dtype=frame_int_dtype)
response_list = np.empty(len_syx, dtype=np.float64)
frame_conv = np.zeros(shape=fcshape[0], dtype=np.uint8)
frame_conv_stride = frame_conv[::fcshape[1], ::fcshape[2]]
return (inner_sum, outer_sum), p_temp, (p00, p11, p01, p10), response_list, (frame_conv, frame_conv_stride)
out_p_temp = np.empty((len_sy, col + 1), dtype=frame_int_dtype)
out_p00 = np.empty(len_syx, dtype=frame_int_dtype)
out_p11 = np.empty(len_syx, dtype=frame_int_dtype)
out_p01 = np.empty(len_syx, dtype=frame_int_dtype)
out_p10 = np.empty(len_syx, dtype=frame_int_dtype)
response_list = np.empty(len_syx, dtype=np.float64) # or np.int32
frame_conv = np.zeros(shape=(row - 2 * pad, col - 2 * pad), dtype=np.uint8) # or np.float64
frame_conv_stride = frame_conv[::y_step, ::x_step]
return frame_pad, frame_int, inner_sum, in_p00, in_p11, in_p01, in_p10, y_ro_m, x_ro_m, y_ro_p, x_ro_p, outer_sum, out_p_temp, out_p00, out_p11, out_p01, out_p10, response_list, frame_conv, frame_conv_stride
# @profile
def conv_int(frame_int, kernel, xy_step, padding, xy_steps_list):
"""
:param frame_int:
:param kernel: hsf
:param step: (x,y)
:param padding: int
:return:
"""
row, col = frame_int.shape
row -= 1
col -= 1
x_step, y_step = xy_step
# padding2 = 2 * padding
f_shape = row - 2 * padding, col - 2 * padding
r_in = kernel.r_in
def conv_int(frame_int, kernel, inner_sum, in_p00, in_p11, in_p01, in_p10, y_ro_m, x_ro_m, y_ro_p, x_ro_p, outer_sum, out_p_temp,
out_p00, out_p11, out_p01, out_p10, response_list, frame_conv_stride):
# inner_sum[:, :] = in_p00 + in_p11 - in_p01 - in_p10
cv2.add(in_p00, in_p11, dst=inner_sum)
cv2.subtract(inner_sum, in_p01, dst=inner_sum)
cv2.subtract(inner_sum, in_p10, dst=inner_sum)
len_sx, len_sy = len(xy_steps_list[0]), len(xy_steps_list[1])
inout_sum, p_temp, p_list, response_list, frameconvlist = get_hsf_empty_array((len_sy, len_sx), col + 1,
frame_int.dtype, (f_shape, y_step, x_step))
inner_sum, outer_sum = inout_sum
p00, p11, p01, p10 = p_list
frame_conv, frame_conv_stride = frameconvlist
y_rin_m = xy_steps_list[1] - r_in
x_rin_m = xy_steps_list[0] - r_in
y_rin_p = xy_steps_list[1] + r_in
x_rin_p = xy_steps_list[0] + r_in
# xx==(y,x),m==MINUS,p==PLUS, ex: mm==(y-,x-)
inarr_mm = frame_int[y_rin_m[0]:y_rin_m[-1] + 1:y_step, x_rin_m[0]:x_rin_m[-1] + 1:x_step]
inarr_mp = frame_int[y_rin_m[0]:y_rin_m[-1] + 1:y_step, x_rin_p[0]:x_rin_p[-1] + 1:x_step]
inarr_pm = frame_int[y_rin_p[0]:y_rin_p[-1] + 1:y_step, x_rin_m[0]:x_rin_m[-1] + 1:x_step]
inarr_pp = frame_int[y_rin_p[0]:y_rin_p[-1] + 1:y_step, x_rin_p[0]:x_rin_p[-1] + 1:x_step]
# == inarr_mm + inarr_pp - inarr_mp - inarr_pm
inner_sum[:, :] = inarr_mm
inner_sum += inarr_pp
inner_sum -= inarr_mp
inner_sum -= inarr_pm
# Bottleneck here, I want to make it smarter. Someone do it.
# (y,x)
# p00=max(y_ro_m,0),max(x_ro_m,0)
# p11=min(y_ro_p,ylim),min(x_ro_p,xlim)
# p01=max(y_ro_m,0),min(x_ro_p,xlim)
# p10=min(y_ro_p,ylim),max(x_ro_m,0)
y_ro_m = xy_steps_list[1] - kernel.r_out
x_ro_m = xy_steps_list[0] - kernel.r_out
y_ro_p = xy_steps_list[1] + kernel.r_out
x_ro_p = xy_steps_list[0] + kernel.r_out
# p00 calc
np.take(frame_int, y_ro_m, axis=0, mode="clip", out=p_temp)
np.take(p_temp, x_ro_m, axis=1, mode="clip", out=p00)
frame_int.take(y_ro_m, axis=0, mode="clip", out=out_p_temp)
out_p_temp.take(x_ro_m, axis=1, mode="clip", out=out_p00)
# p01 calc
np.take(p_temp, x_ro_p, axis=1, mode="clip", out=p01)
out_p_temp.take(x_ro_p, axis=1, mode="clip", out=out_p01)
# p11 calc
np.take(frame_int, y_ro_p, axis=0, mode="clip", out=p_temp)
np.take(p_temp, x_ro_p, axis=1, mode="clip", out=p11)
frame_int.take(y_ro_p, axis=0, mode="clip", out=out_p_temp)
out_p_temp.take(x_ro_p, axis=1, mode="clip", out=out_p11)
# p10 calc
np.take(p_temp, x_ro_m, axis=1, mode="clip", out=p10)
# the point is this
# p00=np.take(np.take(frame_int, y_ro_m, axis=0, mode="clip"), x_ro_m, axis=1, mode="clip")
# p11=np.take(np.take(frame_int, y_ro_p, axis=0, mode="clip"), x_ro_p, axis=1, mode="clip")
# p01=np.take(np.take(frame_int, y_ro_m, axis=0, mode="clip"), x_ro_p, axis=1, mode="clip")
# p10=np.take(np.take(frame_int, y_ro_p, axis=0, mode="clip"), x_ro_m, axis=1, mode="clip")
out_p_temp.take(x_ro_m, axis=1, mode="clip", out=out_p10)
outer_sum[:, :] = p00 + p11 - p01 - p10 - inner_sum
# outer_sum[:, :] = out_p00 + out_p11 - out_p01 - out_p10 - inner_sum
cv2.add(out_p00, out_p11, dst=outer_sum)
cv2.subtract(outer_sum, out_p01, dst=outer_sum)
cv2.subtract(outer_sum, out_p10, dst=outer_sum)
cv2.subtract(outer_sum, inner_sum, dst=outer_sum)
# cv2.transform(np.asarray([p00, p11, -p01, -p10, -inner_sum]).transpose((1, 2, 0)), np.ones((1, 5)),
# dst=outer_sum) # https://answers.opencv.org/question/3120/how-to-sum-a-3-channel-matrix-to-a-one-channel-matrix/
np.multiply(kernel.val_in, inner_sum, dtype=np.float64, out=response_list)
response_list += kernel.val_out * outer_sum
# np.multiply(kernel.val_in, inner_sum, dtype=np.float64, out=response_list)
# response_list += kernel.val_out * outer_sum
cv2.addWeighted(inner_sum,
kernel.val_in,
outer_sum, # or p00 + p11 - p01 - p10 - inner_sum
kernel.val_out,
0.0,
dtype=cv2.CV_64F, # or cv2.CV_32S
dst=response_list)
# min_response, max_val, min_loc, max_loc = cv2.minMaxLoc(response_list)
min_response, _, min_loc, _ = cv2.minMaxLoc(response_list)
center = ((xy_steps_list[0][min_loc[0]] - padding), (xy_steps_list[1][min_loc[1]] - padding))
frame_conv_stride[:, :] = response_list
# or
# frame_conv_stride[:, :] = response_list.astype(np.uint8)
return frame_conv, min_response, center
return min_response, min_loc
@lru_cache(maxsize=lru_maxsize_s)
def get_hsf_center(padding, x_step, y_step, min_loc): # min_x,min_y):
return padding + (x_step * min_loc[0]) - padding, padding + (y_step * min_loc[1]) - padding
class AutoRadiusCalc(object):
@ -567,15 +534,22 @@ class HSF_cls(object):
# Calculate the integral image of the frame
int_start_time = timeit.default_timer()
frame_pad, frame_int, inner_sum, in_p00, in_p11, in_p01, in_p10, y_ro_m, x_ro_m, y_ro_p, x_ro_p, outer_sum, out_p_temp, out_p00, out_p11, out_p01, out_p10, response_list, frame_conv, frame_conv_stride = get_frameint_empty_array(
gray_frame.shape, pad, step[0], step[1], hsf.r_in, hsf.r_out)
# BORDER_CONSTANT is faster than BORDER_REPLICATE There seems to be almost no negative impact when BORDER_CONSTANT is used.
frame_pad = cv2.copyMakeBorder(gray_frame, pad, pad, pad, pad, cv2.BORDER_CONSTANT)
frame_int = cv2.integral(frame_pad)
cv2.copyMakeBorder(gray_frame, pad, pad, pad, pad, cv2.BORDER_CONSTANT, dst=frame_pad)
cv2.integral(frame_pad, sum=frame_int, sdepth=cv2.CV_32S)
self.timedict["int_img"].append(timeit.default_timer() - int_start_time)
# Convolve the feature with the integral image
conv_int_start_time = timeit.default_timer()
xy_step = frameint_get_xy_step(frame_int.shape, step, pad, start_offset=None, end_offset=None)
frame_conv, response, center_xy = conv_int(frame_int, hsf, step, pad, xy_step)
response, hsf_min_loc = conv_int(frame_int, hsf, inner_sum, in_p00, in_p11, in_p01, in_p10, y_ro_m, x_ro_m, y_ro_p, x_ro_p,
outer_sum, out_p_temp, out_p00, out_p11, out_p01, out_p10, response_list,
frame_conv_stride)
center_xy = get_hsf_center(pad, step[0], step[1], hsf_min_loc)
# Pseudo-visualization of HSF
# cv2.normalize(cv2.filter2D(cv2.filter2D(frame_pad, cv2.CV_64F, hsf.get_kernel()[hsf.get_kernel().shape[0]//2,:].reshape(1,-1), borderType=cv2.BORDER_CONSTANT), cv2.CV_64F, hsf.get_kernel()[:,hsf.get_kernel().shape[1]//2].reshape(-1,1), borderType=cv2.BORDER_CONSTANT),None,0,255,cv2.NORM_MINMAX,dtype=cv2.CV_8U))
self.timedict["conv_int"].append(timeit.default_timer() - conv_int_start_time)
crop_start_time = timeit.default_timer()

View File

@ -1,26 +1,27 @@
import math
import timeit
from functools import lru_cache
import cv2
import numpy as np
from numpy.linalg import _umath_linalg
from haar_surround_feature import (
AutoRadiusCalc,
BlinkDetector,
CvParameters, conv_int,
frameint_get_xy_step,
CvParameters, conv_int, get_frameint_empty_array, get_hsf_center,
)
from utils.img_utils import safe_crop
from utils.misc_utils import clamp
# from line_profiler_pycharm import profile
#RANSAC
thresh_add = 10
imshow_enable = True
calc_print_enable = True
imshow_enable = False
calc_print_enable = False
save_video = False
skip_autoradius = False
skip_blink_detect = False
@ -28,6 +29,7 @@ skip_blink_detect = False
# cache param
lru_maxsize_vvs = 16
lru_maxsize_vs = 64
lru_maxsize_s=128
# CV param
default_radius = 20
auto_radius_range = (default_radius - 10, default_radius + 10) # (10,30)
@ -37,24 +39,48 @@ blink_init_frames = 60 * 3 # 60fps*3sec,Number of blink statistical frames
default_step = (5, 5) # bigger the steps,lower the processing time! ofc acc also takes an impact
def ellipse_model(data, y, f):
"""
There is no need to make this process a function, since making the process a function will slow it down a little by calling it.
The results may be slightly different from the lambda version due to calculation errors derived from float types, but the calculation results are virtually the same.
a = 1.0,b = P[0],c = P[1],d = P[2],e = P[3],f = P[4]
:param data:
:param y: np.c_[d, e, a, c, b]
:param f: f == P[4, 0]
:return: this_return == np.array([ellipse_model(x, y) for (x, y) in data ])
"""
return data.dot(y) + f
@lru_cache(maxsize=lru_maxsize_s)
def get_ransac_empty_array_new(iter_num, sample_num, len_data):
# Function to reduce array allocation by providing an empty array first and recycling it with lru
use_dtype = np.float64
dm_rng = np.empty((iter_num, sample_num, 7), dtype=use_dtype)
dm_rng_swap = np.empty((iter_num, sample_num, 5), dtype=use_dtype)
dm_rng_swap_trans = dm_rng_swap.transpose((0, 2, 1))
# dm_rng_swap_trans = np.empty((iter_num, 5,sample_num), dtype=use_dtype)
dm_rng_5x5 = np.empty((iter_num, 5, 5), dtype=use_dtype)
dm_rng_p5smp = np.empty((iter_num, 5, sample_num), dtype=use_dtype)
dm_rng_p = np.empty((iter_num, 5), dtype=use_dtype)
dm_rng_p_npaxis = dm_rng_p[:, :, np.newaxis]
ellipse_y_arr = np.empty((iter_num, 5), dtype=use_dtype)
ellipse_y_arr[:, 2] = 1
swap_index = np.array([4, 3, 0, 1, 5], dtype=np.uint8)
dm_brod = np.broadcast_to(dm_rng_p[:, 4, np.newaxis], (iter_num, len_data))
dm_rng_six = dm_rng[:, :, 6, np.newaxis]
dm_rng_p_24 = dm_rng_p[:, 2:4]
dm_rng_p_10 = dm_rng_p[:, 1::-1]
el_y_arr_2 = ellipse_y_arr[:, :2]
el_y_arr_3 = ellipse_y_arr[:, 3:]
datamod = np.empty((len_data, 7), dtype=use_dtype) # np.empty((len(data), 7), dtype=ret_dtype)
datamod[:, 5] = 1
datamod_b = datamod[:, :5] # .T
rdm_index_init_arr = np.empty((iter_num, len_data), dtype=np.uint16)
rdm_index_init_arr[:, :] = np.arange(len_data, dtype=np.uint16)
rdm_index = np.empty((iter_num, len_data), dtype=np.uint16)
rdm_index_smpnum = rdm_index[:, :sample_num]
ellipse_data_arr = np.empty((iter_num, len_data), dtype=use_dtype)
th_abs = np.empty((iter_num, len_data), dtype=use_dtype)
dm_data = datamod[:, :2] # = data
dm_p2 = datamod[:, 2:4] # = data * data
dm_mul = datamod[:, 4] # = data[:, 0] * data[:, 1]
dm_neg = datamod[:, 6] # = -datamod[:, 2]
inv_ext = np.linalg.linalg.get_linalg_error_extobj(np.linalg.linalg._raise_linalgerror_singular)
return dm_rng, dm_rng_swap, dm_rng_swap_trans, dm_rng_5x5, dm_rng_p5smp, dm_rng_p, dm_rng_p_npaxis, ellipse_y_arr, swap_index, dm_brod, dm_rng_six, dm_rng_p_24, dm_rng_p_10, el_y_arr_2, el_y_arr_3, datamod, datamod_b, dm_data, dm_p2, dm_mul, dm_neg, rdm_index_init_arr, rdm_index, rdm_index_smpnum, ellipse_data_arr, th_abs, inv_ext
# @profile
def fit_rotated_ellipse_ransac(data: np.ndarray, rng: np.random.Generator, iter=100, sample_num=10, offset=80 # 80.0, 10, 80
): # before changing these values, please read up on the ransac algorithm
def fit_rotated_ellipse_ransac(data: np.ndarray, sfc: np.random.Generator, iter_num=100, sample_num=10, offset=80):
# before changing these values, please read up on the ransac algorithm
# However if you want to change any value just know that higher iterations will make processing frames slower
effective_sample = None
# The array contents do not change during the loop, so only one call is needed.
# They say len is faster than shape.
@ -64,81 +90,95 @@ def fit_rotated_ellipse_ransac(data: np.ndarray, rng: np.random.Generator, iter=
if len_data < sample_num:
return None
# Type of calculation result
ret_dtype = np.float64
dm_rng, dm_rng_swap, dm_rng_swap_trans, dm_rng_5x5, dm_rng_p5smp, dm_rng_p, dm_rng_p_npaxis, ellipse_y_arr, swap_index, dm_brod, dm_rng_six, dm_rng_p_24, dm_rng_p_10, el_y_arr_2, el_y_arr_3, datamod, datamod_b, dm_data, dm_p2, dm_mul, dm_neg, rdm_index_init_arr, rdm_index, rdm_index_smpnum, ellipse_data_arr, th_abs, inv_ext = get_ransac_empty_array_new(
iter_num, sample_num, len_data)
# Sorts a random number array of size (iter,len_data). After sorting, returns the index of sample_num random numbers before sorting.
# If the array size is less than about 100, this is faster than rng.choice.
rng_sample = rng.random((iter, len_data)).argsort()[:, :sample_num]
dm_data[:, :] = data # [:]
dm_p2[:, :] = data * data
dm_mul[:] = data[:, 0] * data[:, 1]
dm_neg[:] = -dm_p2[:, 0] # -1 * data[:, 0] ** 2#
sfc.permuted(rdm_index_init_arr, axis=1, out=rdm_index)
# np.take replaces a[ind,:] and is 3-4 times faster, https://gist.github.com/rossant/4645217
# a.take() is faster than np.take(a)
datamod.take(rdm_index_smpnum, axis=0, mode="clip", out=dm_rng)
dm_rng_swap[:, :, :] = dm_rng[:, :, swap_index]
# or
# I don't see any advantage to doing this.
# rng_sample = np.asarray(rng.random((iter, len_data)).argsort()[:, :sample_num], dtype=np.int32)
# dm_rng.take(swap_index, axis=2, mode="clip", out=dm_rng_swap)
# or
# dm_rng_swap = np.take(dm_rng,[4, 3, 0, 1, 5],axis=2)
# I don't think it looks beautiful.
# x,y,x**2,y**2,x*y,1,-1*x**2
datamod = np.concatenate(
[data, data ** 2, (data[:, 0] * data[:, 1])[:, np.newaxis], np.ones((len_data, 1), dtype=ret_dtype),
(-1 * data[:, 0] ** 2)[:, np.newaxis]], axis=1,
dtype=ret_dtype)
np.matmul(dm_rng_swap_trans, dm_rng_swap, out=dm_rng_5x5)
# np.linalg.solve(np.matmul(dm_rng_swap_trans, dm_rng_swap), dm_rng_swap_trans) # solve is slow https://github.com/bogovicj/JaneliaMLCourse/issues/1
_umath_linalg.inv(dm_rng_5x5, signature='d->d',
extobj=inv_ext, out=dm_rng_5x5)
np.matmul(dm_rng_5x5, dm_rng_swap_trans, out=dm_rng_p5smp)
datamod_slim = np.array(datamod[:, :5], dtype=ret_dtype)
np.matmul(dm_rng_p5smp, dm_rng_six, out=dm_rng_p_npaxis)
datamod_rng = datamod[rng_sample]
datamod_rng6 = datamod_rng[:, :, 6]
datamod_rng_swap = datamod_rng[:, :, [4, 3, 0, 1, 5]]
datamod_rng_swap_trans = datamod_rng_swap.transpose((0, 2, 1))
el_y_arr_2[:, :] = dm_rng_p_24
el_y_arr_3[:, :] = dm_rng_p_10
# These two lines are one of the bottlenecks
datamod_rng_5x5 = np.matmul(datamod_rng_swap_trans, datamod_rng_swap)
datamod_rng_p5smp = np.matmul(np.linalg.inv(datamod_rng_5x5), datamod_rng_swap_trans)
cv2.gemm(ellipse_y_arr, datamod_b, 1.0, dm_brod, 1.0, dst=ellipse_data_arr, flags=cv2.GEMM_2_T)
datamod_rng_p = np.matmul(datamod_rng_p5smp, datamod_rng6[:, :, np.newaxis]).reshape((-1, 5))
np.abs(ellipse_data_arr, out=th_abs)
cv2.threshold(th_abs, offset, 1.0, cv2.THRESH_BINARY_INV, dst=th_abs)
ellipse_data_index = \
cv2.minMaxLoc(cv2.reduce(th_abs, 1, cv2.REDUCE_SUM))[3][1]
# I don't think it looks beautiful.
ellipse_y_arr = np.asarray(
[datamod_rng_p[:, 2], datamod_rng_p[:, 3], np.ones(len(datamod_rng_p)), datamod_rng_p[:, 1], datamod_rng_p[:, 0]], dtype=ret_dtype)
# error_num = ellipse_data_arr[ellipse_data_index].sum()
error_num = cv2.sumElems(ellipse_data_arr[ellipse_data_index])[0]
effective_sample_p_arr = dm_rng_p[ellipse_data_index].tolist()
ellipse_data_arr = ellipse_model(datamod_slim, ellipse_y_arr, np.asarray(datamod_rng_p[:, 4])).transpose((1, 0))
ellipse_data_abs = np.abs(ellipse_data_arr)
ellipse_data_index = np.argmax(np.sum(ellipse_data_abs < offset, axis=1), axis=0)
effective_data_arr = ellipse_data_arr[ellipse_data_index]
effective_sample_p_arr = datamod_rng_p[ellipse_data_index]
return fit_rotated_ellipse(effective_data_arr, effective_sample_p_arr)
return fit_rotated_ellipse(error_num, effective_sample_p_arr)
# @profile
def fit_rotated_ellipse(data, P):
a = 1.0
b = P[0]
c = P[1]
d = P[2]
e = P[3]
f = P[4]
# The cost of trigonometric functions is high.
theta = 0.5 * np.arctan(b / (a - c), dtype=np.float64)
theta_sin = np.sin(theta, dtype=np.float64)
theta_cos = np.cos(theta, dtype=np.float64)
tc2 = theta_cos ** 2
ts2 = theta_sin ** 2
# b, c, d, e, f = P[0], P[1], P[2], P[3], P[4]
b, c, d, e = P[0], P[1], P[2], P[3]
theta = 0.5 * math.atan(b / (a - c)) # math.atan2(b, a - c)
theta_sin, theta_cos = math.sin(theta), math.cos(theta)
tc2 = theta_cos * theta_cos
ts2 = theta_sin * theta_sin
b_tcs = b * theta_cos * theta_sin
# Do the calculation only once
cxy = b ** 2 - 4 * a * c
cxy = b * b - 4 * a * c
cx = (2 * c * d - b * e) / cxy
cy = (2 * a * e - b * d) / cxy
# I just want to clear things up around here.
cu = a * cx ** 2 + b * cx * cy + c * cy ** 2 - f
cu_r = np.array([(a * tc2 + b_tcs + c * ts2), (a * ts2 - b_tcs + c * tc2)])
wh = np.sqrt(cu / cu_r)
w, h = wh[0], wh[1]
error_sum = np.sum(data)
# cu = a * cx * cx + b * cx * cy + c * cy * cy - P[4]
cu = c * cy * cy + cx * (a * cx + b * cy) - P[4]
# here: https://stackoverflow.com/questions/327002/which-is-faster-in-python-x-5-or-math-sqrtx
# and : https://gist.github.com/zed/783011
w = math.sqrt(cu / (a * tc2 + b_tcs + c * ts2))
h = math.sqrt(cu / (a * ts2 - b_tcs + c * tc2))
error_sum = data # sum(data)
# print("fitting error = %.3f" % (error_sum))
return (cx, cy, w, h, theta)
return cx, cy, w, h, theta
@lru_cache(lru_maxsize_vvs)
def get_ransac_frame(frame_shape):
return np.empty(frame_shape, dtype=np.uint8), np.empty(frame_shape, dtype=np.uint8) # np.float64)
@lru_cache(lru_maxsize_s)
def get_center_noclamp(center_xy, radius):
center_x, center_y = center_xy
upper_x = center_x + radius
lower_x = center_x - radius
upper_y = center_y + radius
lower_y = center_y - radius
ransac_upper_x = center_x + max(20, radius)
ransac_lower_x = center_x - max(20, radius)
ransac_upper_y = center_y + max(20, radius)
ransac_lower_y = center_y - max(20, radius)
ransac_xy_offset = (ransac_lower_x, ransac_lower_y)
return center_x, center_y, upper_x, lower_x, upper_y, lower_y, ransac_lower_x, ransac_lower_y, ransac_upper_x, ransac_upper_y, ransac_xy_offset
class HSRAC_cls(object):
@ -165,11 +205,17 @@ class HSRAC_cls(object):
self.timedict = {"to_gray": [], "int_img": [], "conv_int": [], "crop": [], "total_cv": []}
# ransac
self.rng = np.random.default_rng()
self.sfc = np.random.default_rng(np.random.SFC64())
# self.kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
# or
self.kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
# https://stackoverflow.com/questions/31025368/erode-is-too-slow-opencv
self.kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
self.gauss_k = cv2.getGaussianKernel(5, 2)
# cv2.getGaussianKernel(kernel size, sigma)
# Increasing the kernel size improves accuracy but slows down performance.
# Increasing sigma improves accuracy a little, but has less effect than kernel size.
def open_video(self, video_path):
# Temporary implementation to run
@ -193,123 +239,113 @@ class HSRAC_cls(object):
def single_run(self):
# Temporary implementation to run
## default_radius = 14
# ori_frame = self.current_image.copy()# debug code
# cropbox=[] # debug code
if imshow_enable:
ori_frame = self.current_image_gray.copy() # debug code
blink_bd = False
frame = self.current_image_gray
if self.now_modeo == self.cv_modeo[1]:
# adjustment of radius
# debug print
# if calc_print_enable:
# temp_radius = self.auto_radius_calc.get_radius()
# print('Now radius:', temp_radius)
# self.cvparam.radius = temp_radius
self.cvparam.radius = self.auto_radius_calc.get_radius()
if self.auto_radius_calc.adj_comp_flag:
self.now_modeo = self.cv_modeo[2] if not skip_blink_detect else self.cv_modeo[3]
radius, pad, step, hsf = self.cvparam.get_rpsh()
# For measuring processing time of image processing
cv_start_time = timeit.default_timer()
# cv_start_time = timeit.default_timer()
frame = self.current_image_gray
gray_frame = frame
self.timedict["to_gray"].append(timeit.default_timer() - cv_start_time)
# self.timedict["to_gray"].append(timeit.default_timer() - cv_start_time)
# Calculate the integral image of the frame
int_start_time = timeit.default_timer()
# BORDER_CONSTANT is faster than BORDER_REPLICATE There seems to be almost no negative impact when BORDER_CONSTANT is used.
frame_pad = cv2.copyMakeBorder(gray_frame, pad, pad, pad, pad, cv2.BORDER_CONSTANT)
frame_int = cv2.integral(frame_pad)
self.timedict["int_img"].append(timeit.default_timer() - int_start_time)
# int_start_time = timeit.default_timer()
frame_pad, frame_int, inner_sum, in_p00, in_p11, in_p01, in_p10, y_ro_m, x_ro_m, y_ro_p, x_ro_p, outer_sum, out_p_temp, out_p00, out_p11, out_p01, out_p10, response_list, frame_conv, frame_conv_stride = get_frameint_empty_array(
gray_frame.shape, pad, step[0], step[1], hsf.r_in, hsf.r_out)
cv2.copyMakeBorder(gray_frame, pad, pad, pad, pad, cv2.BORDER_CONSTANT, dst=frame_pad)
cv2.integral(frame_pad, sum=frame_int, sdepth=cv2.CV_32S)
# self.timedict["int_img"].append(timeit.default_timer() - int_start_time)
# Convolve the feature with the integral image
conv_int_start_time = timeit.default_timer()
xy_step = frameint_get_xy_step(frame_int.shape, step, pad, start_offset=None, end_offset=None)
frame_conv, response, center_xy = conv_int(frame_int, hsf, step, pad, xy_step)
self.timedict["conv_int"].append(timeit.default_timer() - conv_int_start_time)
crop_start_time = timeit.default_timer()
# conv_int_start_time = timeit.default_timer()
response, hsf_min_loc = conv_int(frame_int, hsf, inner_sum, in_p00, in_p11, in_p01, in_p10, y_ro_m, x_ro_m, y_ro_p, x_ro_p,
outer_sum, out_p_temp, out_p00, out_p11, out_p01, out_p10, response_list,
frame_conv_stride)
center_xy = get_hsf_center(pad, step[0], step[1], hsf_min_loc)
# visualization of HSF
# cv2.normalize(cv2.filter2D(cv2.filter2D(frame_pad, cv2.CV_64F, hsf.get_kernel()[hsf.get_kernel().shape[0]//2,:].reshape(1,-1), borderType=cv2.BORDER_CONSTANT), cv2.CV_64F, hsf.get_kernel()[:,hsf.get_kernel().shape[1]//2].reshape(-1,1), borderType=cv2.BORDER_CONSTANT),None,0,255,cv2.NORM_MINMAX,dtype=cv2.CV_8U))
# self.timedict["conv_int"].append(timeit.default_timer() - conv_int_start_time)
# crop_start_time = timeit.default_timer()
# Define the center point and radius
center_x, center_y = center_xy
upper_x = center_x + radius
lower_x = center_x - radius
upper_y = center_y + radius
lower_y = center_y - radius
# Crop the image using the calculated bounds
cropped_image = safe_crop(gray_frame, lower_x, lower_y, upper_x, upper_y)
# cropbox=[clamp(val, 0, gray_frame.shape[i]) for i,val in zip([1,0,1,0],[lower_x,lower_y,upper_x,upper_y])] # debug code
center_x, center_y, upper_x, lower_x, upper_y, lower_y, ransac_lower_x, ransac_lower_y, ransac_upper_x, ransac_upper_y, ransac_xy_offset = get_center_noclamp(
center_xy, radius)
if self.now_modeo == self.cv_modeo[0] or self.now_modeo == self.cv_modeo[1]:
# If mode is first_frame or radius_adjust, record current radius and response
self.auto_radius_calc.add_response(radius, response)
elif self.now_modeo == self.cv_modeo[2]:
# Statistics for blink detection
if self.blink_detector.response_len() < blink_init_frames:
self.blink_detector.add_response(cv2.mean(cropped_image)[0])
upper_x = center_x + max(20,radius)
lower_x = center_x - max(20,radius)
upper_y = center_y + max(20,radius)
lower_y = center_y - max(20,radius)
self.blink_detector.add_response(cv2.mean(safe_crop(gray_frame, lower_x, lower_y, upper_x, upper_y, 1))[0])
self.center_q1.add_response(
cv2.mean(safe_crop(gray_frame, lower_x, lower_y, upper_x, upper_y,keepsize=False))[
cv2.mean(safe_crop(gray_frame, center_x - max(20, radius), center_y - max(20, radius), center_x + max(20, radius),
center_y + max(20, radius), keepsize=False))[
0
]
)
else:
self.blink_detector.calc_thresh()
self.center_q1.calc_thresh()
self.now_modeo = self.cv_modeo[3]
else:
if 0 in cropped_image.shape: # This line may not be needed. The image will be cropped using safecrop.
# If shape contains 0, it is not detected well.
print("Something's wrong.")
else:
orig_x, orig_y = center_x, center_y
if self.blink_detector.enable_detect_flg:
# If the average value of cropped_image is greater than response_max
# (i.e., if the cropimage is whitish
if self.blink_detector.detect(cv2.mean(cropped_image)[0]):
# blink
print("BLINK BD")
blink_bd=True
# if imshow_enable or save_video:
# cv2.circle(frame, (orig_x, orig_y), 6, (0, 0, 255), -1)
# cv2.circle(ori_frame, (center_x, center_y), 7, (255, 0, 0), -1)
if self.blink_detector.enable_detect_flg and self.blink_detector.detect(
cv2.mean(safe_crop(gray_frame, lower_x, lower_y, upper_x, upper_y, 1))[0]):
# If the average value of cropped_image is greater than response_max
# (i.e., if the cropimage is whitish
# blink
print("BLINK BD")
blink_bd = True
# if imshow_enable or save_video:
# cv2.circle(frame, (orig_x, orig_y), 6, (0, 0, 255), -1)
# cv2.circle(ori_frame, (center_x, center_y), 7, (255, 0, 0), -1)
# If you want to update response_max. it may be more cost-effective to rewrite response_list in the following way
# https://stackoverflow.com/questions/42771110/fastest-way-to-left-cycle-a-numpy-array-like-pop-push-for-a-queue
cv_end_time = timeit.default_timer()
self.timedict["crop"].append(cv_end_time - crop_start_time)
self.timedict["total_cv"].append(cv_end_time - cv_start_time)
# if calc_print_enable:
# the lower the response the better the likelyhood of there being a pupil. you can adujst the radius and steps accordingly
# print('Kernel response:', response)
# print('Pixel position:', center_xy)
#
# if imshow_enable:
# if self.now_modeo != self.cv_modeo[0] and self.now_modeo != self.cv_modeo[1]:
# if 0 in cropped_image.shape:
# If shape contains 0, it is not detected well.
# pass
# else:
# cv2.imshow("crop", cropped_image)
# cv2.imshow("frame", frame)
# if cv2.waitKey(1) & 0xFF == ord("q"):
# pass
# cv_end_time = timeit.default_timer()
# self.timedict["crop"].append(timeit.default_timer() - crop_start_time)
# self.timedict["total_cv"].append(cv_end_time - cv_start_time)
# if calc_print_enable:
# the lower the response the better the likelyhood of there being a pupil. you can adujst the radius and steps accordingly
# print('Kernel response:', response)
# print('Pixel position:', center_xy)
#
# if imshow_enable:
# if self.now_modeo != self.cv_modeo[0] and self.now_modeo != self.cv_modeo[1]:
# if 0 in cropped_image.shape:
# If shape contains 0, it is not detected well.
# pass
# else:
# cv2.imshow("crop", cropped_image)
# cv2.imshow("frame", frame)
# if cv2.waitKey(1) & 0xFF == ord("q"):
# pass
if self.now_modeo == self.cv_modeo[0]:
# Moving from first_frame to the next mode
@ -319,150 +355,137 @@ class HSRAC_cls(object):
self.now_modeo = self.cv_modeo[2]
else:
self.now_modeo = self.cv_modeo[1]
# For measuring processing time of image processing
cv_start_time = timeit.default_timer()
# Crop first to reduce the amount of data to process.
# frame = cropped_image[0:len(cropped_image) - 10, :]
# To reduce the processing data, first convert to 1-channel and then blur.
# The processing results were the same when I swapped the order of blurring and 1-channelization.
frame_gray = cv2.GaussianBlur(frame, (5, 5), 0)
hsf_center_x, hsf_center_y = center_x.copy(), center_y.copy()
# ransac_xy_offset = (hsf_center_x-20, hsf_center_y-20)
upper_x = hsf_center_x + max(20, radius)
lower_x = hsf_center_x - max(20, radius)
upper_y = hsf_center_y + max(20, radius)
lower_y = hsf_center_y - max(20, radius)
ransac_xy_offset = (lower_x, lower_y)
# Crop the image using the calculated bounds
ransac_start_time = timeit.default_timer()
frame_gray_crop = safe_crop(frame_gray, lower_x, lower_y, upper_x, upper_y)
frame = frame_gray_crop
# this will need to be adjusted everytime hardware is changed (brightness of IR, Camera postion, etc)m
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(frame_gray_crop)
threshold_value = min_val + thresh_add
_, thresh = cv2.threshold(frame_gray_crop, threshold_value, 255, cv2.THRESH_BINARY)
# print(thresh.shape, frame_gray.shape)
try:
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, self.kernel)
closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, self.kernel)
th_frame = 255 - closing
except:
# I want to eliminate try here because try tends to be slow in execution.
th_frame = 255 - frame_gray_crop
contours, _ = cv2.findContours(th_frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# frame_gray = cv2.GaussianBlur(frame, (5, 5), 0)
# cv2.GaussianBlur is slow (uses 10% of the time of all this script)
# use cv2.blur()
# or
# contours, _=cv2.findContours(th_frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
if not blink_bd and self.blink_detector.enable_detect_flg:
threshold_value = self.center_q1.quartile_1
if threshold_value<min_val + thresh_add:
# In most of these cases, the pupil is at the edge of the eye.
thresh = cv2.threshold(frame_gray_crop, (min_val + thresh_add*4+threshold_value)/2, 255, cv2.THRESH_BINARY)[1]
else:
threshold_value = self.center_q1.quartile_1
_, thresh = cv2.threshold(frame_gray_crop, threshold_value, 255, cv2.THRESH_BINARY)
try:
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, self.kernel)
closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, self.kernel)
th_frame = 255 - closing
except:
# I want to eliminate try here because try tends to be slow in execution.
th_frame = 255 - frame_gray_crop
contours2, _ = cv2.findContours(th_frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
contours = (*contours, *contours2)
hull = []
# This way is faster than contours[i]
# But maybe this one is faster. hull = [cv2.convexHull(cnt, False) for cnt in contours]
for cnt in contours:
hull.append(cv2.convexHull(cnt, False))
if not hull:
# If empty, go to next loop
pass
try:
cnt = sorted(hull, key=cv2.contourArea)
maxcnt = cnt[-1]
# ellipse = cv2.fitEllipse(maxcnt)
ransac_data = fit_rotated_ellipse_ransac(maxcnt.reshape(-1, 2), self.rng)
if ransac_data is None:
# ransac_data is None==maxcnt.shape[0]<sample_num
# go to next loop
pass
crop_start_time = timeit.default_timer()
cx, cy, w, h, theta = ransac_data
# print(cx, cy)
if w >= 2.1 * h: #new blink detection algo lmao this works pretty good actually
print("RAN BLINK")
#return center_x, center_y, frame, frame, True
# cx = center_x - (csx - cx) # we find the difference between the crop size and ransac point, and subtract from the center point from HSF
# cy = center_y - (csy - cy)
# csy = frame.shape[0]
# csx = frame.shape[1]
csy = gray_frame.shape[0]
csx = gray_frame.shape[1]
# frame_gray =cv2.boxFilter(frame, -1,(5, 5))# https://github.com/bfraboni/FastGaussianBlur
# cv2.boxFilter(frame_gray, -1,(5, 5),dst=frame_gray)
# cv2.boxFilter(frame_gray, -1,(5, 5),dst=frame_gray)
# or
frame_gray = cv2.sepFilter2D(frame, -1, self.gauss_k, self.gauss_k)
# cx = clamp((cx - 20) + center_x, 0, csx)
# cy = clamp((cy - 20) + center_y, 0, csy)
cx = int(clamp(cx + ransac_xy_offset[0], 0, csx))
cy = int(clamp(cy + ransac_xy_offset[1], 0, csy))
# Crop the image using the calculated bounds
# todo:safecrop tune
frame_gray_crop = safe_crop(frame_gray, ransac_lower_x, ransac_lower_y, ransac_upper_x, ransac_upper_y, 1)
th_frame, fic_frame = get_ransac_frame(frame_gray_crop.shape)
frame = frame_gray_crop # todo: It can cause bugs.
cv_end_time = timeit.default_timer()
# if imshow_enable or save_video:
#
# cv2.circle(ori_frame, (orig_x, orig_y), 3, (0, 255, 0), -1)
# cv2.drawContours(ori_frame, contours, -1, (255, 0, 0), 1)
# cv2.circle(ori_frame, (cx, cy), 2, (0, 0, 255), -1)
# # cx1, cy1, w1, h1, theta1 = fit_rotated_ellipse(maxcnt.reshape(-1, 2))
# cv2.ellipse(
# ori_frame,
# (cx, cy),
# (int(w), int(h)),
# theta * 180.0 / np.pi,
# 0.0,
# 360.0,
# (50, 250, 200),
# 1,
# )
# cv2.imshow("crop", cropped_image)
# # cv2.imshow("frame", frame)
# cv2.imshow("ori_frame",ori_frame)
# if cv2.waitKey(1) & 0xFF == ord("q"):
# pass
# this will need to be adjusted everytime hardware is changed (brightness of IR, Camera postion, etc)m
# min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(frame_gray_crop)
min_val = cv2.minMaxLoc(frame_gray_crop)[0]
# threshold_value = min_val + thresh_add
except Exception as e:
# print(e)
pass
cv2.threshold(frame_gray_crop, min_val + thresh_add, 255, cv2.THRESH_BINARY_INV, dst=th_frame)
# print(thresh.shape, frame_gray.shape)
# debug code
# try:
# if any([isinstance(val, float) for val in [cx, cy]]):
# print()
# return int(cx), int(cy),cropbox, ori_frame,thresh, frame, gray_frame
# except:
# if any([isinstance(val, float) for val in [center_x, center_y]]):
# print()
# return center_x, center_y,cropbox, ori_frame,thresh, frame, gray_frame
# print(frame_gray.shape, thresh.shape)
# cv2.morphologyEx(th_frame, cv2.MORPH_OPEN, self.kernel, dst=fic_frame)
# cv2.morphologyEx(fic_frame, cv2.MORPH_CLOSE, self.kernel, dst=fic_frame)
# cv2.bitwise_not(fic_frame, fic_frame)
# https://stackoverflow.com/questions/23062572/why-multiple-openings-closing-with-a-same-kernel-does-not-have-effect
# try (cv2.absdiff(cv2.morphologyEx(th_frame, cv2.MORPH_OPEN, self.kernel),cv2.morphologyEx( cv2.morphologyEx(th_frame, cv2.MORPH_OPEN, self.kernel), cv2.MORPH_CLOSE, self.kernel))>1).sum()
cv2.morphologyEx(th_frame, cv2.MORPH_OPEN, self.kernel, dst=fic_frame) # or cv2.MORPH_CLOSE
contours = cv2.findContours(fic_frame, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[0]
# or
# contours = cv2.findContours(fic_frame, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)[0]
# if not blink_bd and self.blink_detector.enable_detect_flg:
# threshold_value = self.center_q1.quartile_1
# if threshold_value < min_val + thresh_add:
# # In most of these cases, the pupil is at the edge of the eye.
# cv2.threshold(frame_gray_crop, (min_val + thresh_add * 4 + threshold_value) / 2, 255, cv2.THRESH_BINARY, dst=th_frame)
# else:
# threshold_value = self.center_q1.quartile_1
# cv2.threshold(frame_gray_crop, threshold_value, 255, cv2.THRESH_BINARY_INV, dst=th_frame)
# # cv2.morphologyEx(th_frame, cv2.MORPH_OPEN, self.kernel, dst=fic_frame)
# # cv2.morphologyEx(fic_frame, cv2.MORPH_CLOSE, self.kernel, dst=fic_frame)
# # cv2.bitwise_not(fic_frame, fic_frame)
# # https://stackoverflow.com/questions/23062572/why-multiple-openings-closing-with-a-same-kernel-does-not-have-effect
# # try (cv2.absdiff(cv2.morphologyEx(th_frame, cv2.MORPH_OPEN, self.kernel),cv2.morphologyEx( cv2.morphologyEx(th_frame, cv2.MORPH_OPEN, self.kernel), cv2.MORPH_CLOSE, self.kernel))>1).sum()
# cv2.morphologyEx(th_frame, cv2.MORPH_OPEN, self.kernel, dst=fic_frame) # or cv2.MORPH_CLOSE
# contours = (*contours, *cv2.findContours(fic_frame, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[0])
# # or
# # contours = (*contours, *cv2.findContours(fic_frame, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)[0])
if not contours:
# If empty, go to next loop
return int(center_x), int(center_y), th_frame, frame, gray_frame
cnt_ind = None
max_area = -1
for i, cnt in enumerate(contours):
now_area = cv2.contourArea(cnt)
if max_area < now_area:
max_area = now_area
cnt_ind = i
hull = cv2.convexHull(contours[cnt_ind], False)
# if not hull:
# If empty, go to next loop
# return int(center_x), int(center_y), th_frame, frame, gray_frame
ransac_data = fit_rotated_ellipse_ransac(hull.reshape(-1, 2).astype(np.float64), self.sfc)
if ransac_data is None:
# ransac_data is None==maxcnt.shape[0]<sample_num
# go to next loop
# pass
return int(center_x), int(center_y), th_frame, frame, gray_frame
# crop_start_time = timeit.default_timer()
cx, cy, w, h, theta = ransac_data
# print(cx, cy)
if w >= 2.1 * h: # new blink detection algo lmao this works pretty good actually
print("RAN BLINK")
# return center_x, center_y, frame, frame, True
# cx = center_x - (csx - cx) # we find the difference between the crop size and ransac point, and subtract from the center point from HSF
# cy = center_y - (csy - cy)
# csy = frame.shape[0]
# csx = frame.shape[1]
csy = gray_frame.shape[0]
csx = gray_frame.shape[1]
# cx = clamp((cx - 20) + center_x, 0, csx)
# cy = clamp((cy - 20) + center_y, 0, csy)
cx = int(clamp(cx + ransac_xy_offset[0], 0, csx))
cy = int(clamp(cy + ransac_xy_offset[1], 0, csy))
# cv_end_time = timeit.default_timer()
if imshow_enable:#imsave_flg:
cv2.circle(ori_frame, (int(center_x), int(center_y)), 3, (128, 0, 0), -1)
cv2.drawContours(ori_frame, contours, -1, (255, 0, 0), 1)
cv2.circle(ori_frame, (int(cx), int(cy)), 2, (255, 0, 0), -1)
# cx1, cy1, w1, h1, theta1 = fit_rotated_ellipse(maxcnt.reshape(-1, 2))
# cv2.ellipse(
# ori_frame,
# (cx, cy),
# (int(w), int(h)),
# theta * 180.0 / np.pi,
# 0.0,
# 360.0,
# (50, 250, 200),
# 1,
# )
# cv2.imshow("crop", cropped_image)
# cv2.imshow("frame", frame)
if imshow_enable:
cv2.imshow("ori_frame", ori_frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
pass
# cv_end_time = timeit.default_timer()
# self.timedict["ransac"].append(cv_end_time - ransac_start_time)
# self.timedict["total_cv"].append(cv_end_time - cv_start_time)
try:
return int(cx), int(cy), thresh, frame, gray_frame
return int(cx), int(cy), th_frame, frame, gray_frame
except:
return int(center_x), int(center_y), thresh, frame, gray_frame
return int(center_x), int(center_y), th_frame, frame, gray_frame