mirror of
https://github.com/EyeTrackVR/EyeTrackVR.git
synced 2025-11-04 14:39:42 +08:00
fix: AHSF dual eye jitter
This commit is contained in:
parent
a32a08e89b
commit
9798fb13a6
@ -43,176 +43,48 @@ import numpy as np
|
|||||||
|
|
||||||
# from line_profiler_pycharm import profile
|
# from line_profiler_pycharm import profile
|
||||||
|
|
||||||
# memo: Old Name: CPRD
|
|
||||||
# memo: New Name: AHSF(Adaptive Haar Surround Feature)
|
|
||||||
|
|
||||||
this_file_basename = os.path.basename(__file__)
|
|
||||||
this_file_name = this_file_basename.replace(".py", "")
|
|
||||||
alg_ver = "PallasNekoV3" # memo: Created by PallasNeko on 230929
|
|
||||||
|
|
||||||
##############################
|
class AHSF:
|
||||||
save_logfile = False # This setting is disabled when imshow_enable or save_img or save_video is true
|
def __init__(self, video_src, save_logfile=False, imshow_enable=False, save_video=False):
|
||||||
imshow_enable = False
|
self.this_file_basename = os.path.basename(__file__)
|
||||||
save_video = False
|
self.this_file_name = self.this_file_basename.replace(".py", "")
|
||||||
|
self.alg_ver = "PallasNekoV3"
|
||||||
|
|
||||||
VideoCapture_SRC = "/Users/prohurtz/Desktop/t3c.mp4" # "demo2.mp4"
|
self.save_logfile = save_logfile
|
||||||
input_is_webcam = False
|
self.imshow_enable = imshow_enable
|
||||||
benchmark_flag = True if not input_is_webcam and not imshow_enable and not save_video else False
|
self.save_video = save_video
|
||||||
loop_num = 1 if imshow_enable or save_video else 10
|
|
||||||
output_video_path = f"./{this_file_name}.mp4"
|
|
||||||
logfilename = f"./{this_file_name}.log"
|
|
||||||
print_enable = False # I don't recommend changing to True.
|
|
||||||
##############################
|
|
||||||
|
|
||||||
# cache param
|
self.VideoCapture_SRC = video_src
|
||||||
lru_maxsize_vvs = 16
|
self.input_is_webcam = False
|
||||||
lru_maxsize_vs = 64
|
self.benchmark_flag = True if not self.input_is_webcam and not self.imshow_enable and not self.save_video else False
|
||||||
lru_maxsize_s = 128
|
self.loop_num = 1 if self.imshow_enable or self.save_video else 10
|
||||||
|
self.output_video_path = f"./{self.this_file_name}.mp4"
|
||||||
|
self.logfilename = f"./{self.this_file_name}.log"
|
||||||
|
self.print_enable = False
|
||||||
|
|
||||||
logger = getLogger(__name__)
|
self.lru_maxsize_vvs = 16
|
||||||
logger.setLevel(INFO)
|
self.lru_maxsize_vs = 64
|
||||||
formatter = Formatter("%(message)s")
|
self.lru_maxsize_s = 128
|
||||||
handler = StreamHandler()
|
|
||||||
handler.setLevel(INFO)
|
self.logger = getLogger(__name__)
|
||||||
handler.setFormatter(formatter)
|
self.logger.setLevel(INFO)
|
||||||
logger.addHandler(handler)
|
formatter = Formatter("%(message)s")
|
||||||
if save_logfile:
|
handler = StreamHandler()
|
||||||
handler = FileHandler(logfilename, encoding="utf8", mode="w")
|
|
||||||
handler.setLevel(INFO)
|
handler.setLevel(INFO)
|
||||||
handler.setFormatter(formatter)
|
handler.setFormatter(formatter)
|
||||||
logger.addHandler(handler)
|
self.logger.addHandler(handler)
|
||||||
else:
|
if self.save_logfile:
|
||||||
save_logfile = False
|
handler = FileHandler(self.logfilename, encoding="utf8", mode="w")
|
||||||
video_wr = cv2.VideoWriter if save_video else None
|
handler.setLevel(INFO)
|
||||||
|
handler.setFormatter(formatter)
|
||||||
# Utils
|
self.logger.addHandler(handler)
|
||||||
def TimeitWrapper(*args, **kwargs):
|
else:
|
||||||
"""
|
self.save_logfile = False
|
||||||
This decorator @TimeitWrapper() prints the function name and execution time in seconds.
|
self.video_wr = cv2.VideoWriter if self.save_video else None
|
||||||
:param args:
|
|
||||||
:param kwargs:
|
|
||||||
:return:
|
|
||||||
"""
|
|
||||||
|
|
||||||
def decorator(function):
|
|
||||||
@functools.wraps(function)
|
|
||||||
def wrapper(*args, **kwargs):
|
|
||||||
start = timeit.default_timer()
|
|
||||||
results = function(*args, **kwargs)
|
|
||||||
end = timeit.default_timer()
|
|
||||||
print("{} execution time: {:.10f} s".format(function.__name__, end - start))
|
|
||||||
return results
|
|
||||||
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
return decorator
|
|
||||||
|
|
||||||
|
|
||||||
class TimeitResult(object):
|
def format_time(self, timespan, precision=3):
|
||||||
"""
|
|
||||||
from https://github.com/ipython/ipython/blob/339c0d510a1f3cb2158dd8c6e7f4ac89aa4c89d8/IPython/core/magics/execution.py#L55
|
|
||||||
|
|
||||||
Object returned by the timeit magic with info about the run.
|
|
||||||
Contains the following attributes :
|
|
||||||
loops: (int) number of loops done per measurement
|
|
||||||
repeat: (int) number of times the measurement has been repeated
|
|
||||||
best: (float) best execution time / number
|
|
||||||
all_runs: (list of float) execution time of each run (in s)
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, loops, repeat, best, worst, all_runs, precision):
|
|
||||||
self.loops = loops
|
|
||||||
self.repeat = repeat
|
|
||||||
self.best = best
|
|
||||||
self.worst = worst
|
|
||||||
self.all_runs = all_runs
|
|
||||||
self._precision = precision
|
|
||||||
self.timings = [dt / self.loops for dt in all_runs]
|
|
||||||
|
|
||||||
@property
|
|
||||||
def average(self):
|
|
||||||
return math.fsum(self.timings) / len(self.timings)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def stdev(self):
|
|
||||||
mean = self.average
|
|
||||||
return (math.fsum([(x - mean) ** 2 for x in self.timings]) / len(self.timings)) ** 0.5
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
pm = "+-"
|
|
||||||
if hasattr(sys.stdout, "encoding") and sys.stdout.encoding:
|
|
||||||
try:
|
|
||||||
"\xb1".encode(sys.stdout.encoding)
|
|
||||||
pm = "\xb1"
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
return "min:{best} max:{worst} mean:{mean} {pm} {std} per loop (mean {pm} std. dev. of {runs} run{run_plural}, {loops:,} loop{loop_plural} each)".format(
|
|
||||||
pm=pm,
|
|
||||||
runs=self.repeat,
|
|
||||||
loops=self.loops,
|
|
||||||
loop_plural="" if self.loops == 1 else "s",
|
|
||||||
run_plural="" if self.repeat == 1 else "s",
|
|
||||||
mean=format_time(self.average, self._precision),
|
|
||||||
std=format_time(self.stdev, self._precision),
|
|
||||||
best=format_time(self.best, self._precision),
|
|
||||||
worst=format_time(self.worst, self._precision),
|
|
||||||
)
|
|
||||||
|
|
||||||
def _repr_pretty_(self, p, cycle):
|
|
||||||
unic = self.__str__()
|
|
||||||
p.text("<TimeitResult : " + unic + ">")
|
|
||||||
|
|
||||||
|
|
||||||
class FPSResult(object):
|
|
||||||
"""
|
|
||||||
base https://github.com/ipython/ipython/blob/339c0d510a1f3cb2158dd8c6e7f4ac89aa4c89d8/IPython/core/magics/execution.py#L55
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, loops, repeat, best, worst, all_runs, precision):
|
|
||||||
self.loops = loops
|
|
||||||
self.repeat = repeat
|
|
||||||
self.best = 1 / best
|
|
||||||
self.worst = 1 / worst
|
|
||||||
self.all_runs = all_runs
|
|
||||||
self._precision = precision
|
|
||||||
self.fps = [1 / dt for dt in all_runs]
|
|
||||||
self.unit = "fps"
|
|
||||||
|
|
||||||
@property
|
|
||||||
def average(self):
|
|
||||||
return math.fsum(self.fps) / len(self.fps)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def stdev(self):
|
|
||||||
mean = self.average
|
|
||||||
return (math.fsum([(x - mean) ** 2 for x in self.fps]) / len(self.fps)) ** 0.5
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
pm = "+-"
|
|
||||||
if hasattr(sys.stdout, "encoding") and sys.stdout.encoding:
|
|
||||||
try:
|
|
||||||
"\xb1".encode(sys.stdout.encoding)
|
|
||||||
pm = "\xb1"
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
return "min:{best} max:{worst} mean:{mean} {pm} {std} per loop (mean {pm} std. dev. of {runs} run{run_plural}, {loops:,} loop{loop_plural} each)".format(
|
|
||||||
pm=pm,
|
|
||||||
runs=self.repeat,
|
|
||||||
loops=self.loops,
|
|
||||||
loop_plural="" if self.loops == 1 else "s",
|
|
||||||
run_plural="" if self.repeat == 1 else "s",
|
|
||||||
mean="%.*g%s" % (self._precision, self.average, self.unit),
|
|
||||||
std="%.*g%s" % (self._precision, self.stdev, self.unit),
|
|
||||||
best="%.*g%s" % (self._precision, self.best, self.unit),
|
|
||||||
worst="%.*g%s" % (self._precision, self.worst, self.unit),
|
|
||||||
)
|
|
||||||
|
|
||||||
def _repr_pretty_(self, p, cycle):
|
|
||||||
unic = self.__str__()
|
|
||||||
p.text("<FPSResult : " + unic + ">")
|
|
||||||
|
|
||||||
|
|
||||||
def format_time(timespan, precision=3):
|
|
||||||
"""
|
"""
|
||||||
https://github.com/ipython/ipython/blob/339c0d510a1f3cb2158dd8c6e7f4ac89aa4c89d8/IPython/core/magics/execution.py#L1473
|
https://github.com/ipython/ipython/blob/339c0d510a1f3cb2158dd8c6e7f4ac89aa4c89d8/IPython/core/magics/execution.py#L1473
|
||||||
Formats the timespan in a human readable form
|
Formats the timespan in a human readable form
|
||||||
@ -254,7 +126,7 @@ def format_time(timespan, precision=3):
|
|||||||
return "%.*g %s" % (precision, timespan * scaling[order], units[order])
|
return "%.*g %s" % (precision, timespan * scaling[order], units[order])
|
||||||
|
|
||||||
|
|
||||||
def filter_light(img_gray, img_blur, tau):
|
def filter_light(self, img_gray, img_blur, tau):
|
||||||
for i in range(img_gray.shape[1]):
|
for i in range(img_gray.shape[1]):
|
||||||
for j in range(img_gray.shape[0]):
|
for j in range(img_gray.shape[0]):
|
||||||
if img_gray[j, i] > tau:
|
if img_gray[j, i] > tau:
|
||||||
@ -264,7 +136,7 @@ def filter_light(img_gray, img_blur, tau):
|
|||||||
return img_blur
|
return img_blur
|
||||||
|
|
||||||
|
|
||||||
def pupil_detector_haar(img_gray, params):
|
def pupil_detector_haar(self, img_gray, params):
|
||||||
frame_num = 0
|
frame_num = 0
|
||||||
img_down = cv2.resize(
|
img_down = cv2.resize(
|
||||||
img_gray,
|
img_gray,
|
||||||
@ -277,7 +149,7 @@ def pupil_detector_haar(img_gray, params):
|
|||||||
|
|
||||||
if params["use_init_rect"]:
|
if params["use_init_rect"]:
|
||||||
tau = max(params["mu_outer"], params["mu_inner"] + 30)
|
tau = max(params["mu_outer"], params["mu_inner"] + 30)
|
||||||
filter_light(img_down, img_down, tau)
|
self.filter_light(img_down, img_down, tau)
|
||||||
|
|
||||||
# Coarse Detection
|
# Coarse Detection
|
||||||
(
|
(
|
||||||
@ -286,7 +158,7 @@ def pupil_detector_haar(img_gray, params):
|
|||||||
max_response_coarse,
|
max_response_coarse,
|
||||||
mu_inner,
|
mu_inner,
|
||||||
mu_outer,
|
mu_outer,
|
||||||
) = coarse_detection(img_down, params)
|
) = self.coarse_detection(img_down, params)
|
||||||
print(
|
print(
|
||||||
"Coarse Detection: ",
|
"Coarse Detection: ",
|
||||||
pupil_rect_coarse,
|
pupil_rect_coarse,
|
||||||
@ -306,14 +178,14 @@ def pupil_detector_haar(img_gray, params):
|
|||||||
|
|
||||||
# Fine Detection
|
# Fine Detection
|
||||||
if mu_outer - mu_inner >= 5:
|
if mu_outer - mu_inner >= 5:
|
||||||
pupil_rect_fine = fine_detection(img_down, pupil_rect_coarse)
|
pupil_rect_fine = self.fine_detection(img_down, pupil_rect_coarse)
|
||||||
else:
|
else:
|
||||||
pupil_rect_fine = pupil_rect_coarse
|
pupil_rect_fine = pupil_rect_coarse
|
||||||
|
|
||||||
# Postprocessing
|
# Postprocessing
|
||||||
pupil_rect_coarse = rect_scale(pupil_rect_coarse, params["ratio_downsample"], False)
|
pupil_rect_coarse = self.rect_scale(pupil_rect_coarse, params["ratio_downsample"], False)
|
||||||
outer_rect_coarse = rect_scale(outer_rect_coarse, params["ratio_downsample"], False)
|
outer_rect_coarse = self.rect_scale(outer_rect_coarse, params["ratio_downsample"], False)
|
||||||
pupil_rect_fine = rect_scale(pupil_rect_fine, params["ratio_downsample"], False)
|
pupil_rect_fine = self.rect_scale(pupil_rect_fine, params["ratio_downsample"], False)
|
||||||
|
|
||||||
center_coarse = (
|
center_coarse = (
|
||||||
pupil_rect_coarse[0] + pupil_rect_coarse[2] // 2,
|
pupil_rect_coarse[0] + pupil_rect_coarse[2] // 2,
|
||||||
@ -333,8 +205,8 @@ def pupil_detector_haar(img_gray, params):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@lru_cache(maxsize=lru_maxsize_vvs)
|
# @lru_cache(maxsize=self.lru_maxsize_vvs)
|
||||||
def get_empty_array(frame_shape, width_min, width_max, wh_step, xy_step, roi, ratio_outer):
|
def get_empty_array(self, frame_shape, width_min, width_max, wh_step, xy_step, roi, ratio_outer):
|
||||||
frame_int_dtype = np.intc
|
frame_int_dtype = np.intc
|
||||||
np_index_dtype = (
|
np_index_dtype = (
|
||||||
np.intc
|
np.intc
|
||||||
@ -450,8 +322,8 @@ def get_empty_array(frame_shape, width_min, width_max, wh_step, xy_step, roi, ra
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
# @profile
|
# @profile
|
||||||
def coarse_detection(img_gray, params):
|
def coarse_detection(self, img_gray, params):
|
||||||
ratio_outer = params["ratio_outer"]
|
ratio_outer = params["ratio_outer"]
|
||||||
kf = params["kf"]
|
kf = params["kf"]
|
||||||
width_min = params["width_min"]
|
width_min = params["width_min"]
|
||||||
@ -475,8 +347,8 @@ def coarse_detection(img_gray, params):
|
|||||||
outer_rect_coarse = (5, 5, 5, 5)
|
outer_rect_coarse = (5, 5, 5, 5)
|
||||||
|
|
||||||
if init_rect_flag:
|
if init_rect_flag:
|
||||||
init_rect_down = rect_scale(init_rect, params["ratio_downsample"], False)
|
init_rect_down = self.rect_scale(init_rect, params["ratio_downsample"], False)
|
||||||
init_rect_down = intersect_rect(init_rect_down, imgboundary)
|
init_rect_down = self.intersect_rect(init_rect_down, imgboundary)
|
||||||
img_blur = img_gray[
|
img_blur = img_gray[
|
||||||
init_rect_down[1] : init_rect_down[1] + init_rect_down[3],
|
init_rect_down[1] : init_rect_down[1] + init_rect_down[3],
|
||||||
init_rect_down[0] : init_rect_down[0] + init_rect_down[2],
|
init_rect_down[0] : init_rect_down[0] + init_rect_down[2],
|
||||||
@ -500,7 +372,7 @@ def coarse_detection(img_gray, params):
|
|||||||
wh_out_arr,
|
wh_out_arr,
|
||||||
mu_outer_rect,
|
mu_outer_rect,
|
||||||
mu_outer_rect2,
|
mu_outer_rect2,
|
||||||
) = get_empty_array(img_blur.shape, width_min, width_max, wh_step, xy_step, roi, ratio_outer)
|
) = self.get_empty_array(img_blur.shape, width_min, width_max, wh_step, xy_step, roi, ratio_outer)
|
||||||
cv2.integral(
|
cv2.integral(
|
||||||
img_blur, sum=frame_int, sdepth=cv2.CV_32S
|
img_blur, sum=frame_int, sdepth=cv2.CV_32S
|
||||||
) # memo: It becomes slower when using float64, probably because the increase in bits from 32 to 64 causes the arrays to be larger.
|
) # memo: It becomes slower when using float64, probably because the increase in bits from 32 to 64 causes the arrays to be larger.
|
||||||
@ -587,16 +459,16 @@ def coarse_detection(img_gray, params):
|
|||||||
return pupil_rect_coarse, outer_rect_coarse, max_response_coarse, mu_inner, mu_outer
|
return pupil_rect_coarse, outer_rect_coarse, max_response_coarse, mu_inner, mu_outer
|
||||||
|
|
||||||
|
|
||||||
def fine_detection(img_gray, pupil_rect_coarse):
|
def fine_detection(self, img_gray, pupil_rect_coarse):
|
||||||
boundary = (0, 0, img_gray.shape[1], img_gray.shape[0])
|
boundary = (0, 0, img_gray.shape[1], img_gray.shape[0])
|
||||||
valid_ratio = 1.2
|
valid_ratio = 1.2
|
||||||
valid_rect = intersect_rect(rect_scale(pupil_rect_coarse, valid_ratio), boundary)
|
valid_rect = self.intersect_rect(self.rect_scale(pupil_rect_coarse, valid_ratio), boundary)
|
||||||
img_pupil = img_gray[
|
img_pupil = img_gray[
|
||||||
valid_rect[1] : valid_rect[1] + valid_rect[3],
|
valid_rect[1] : valid_rect[1] + valid_rect[3],
|
||||||
valid_rect[0] : valid_rect[0] + valid_rect[2],
|
valid_rect[0] : valid_rect[0] + valid_rect[2],
|
||||||
]
|
]
|
||||||
img_pupil_blur = cv2.GaussianBlur(img_pupil, (5, 5), 0, 0)
|
img_pupil_blur = cv2.GaussianBlur(img_pupil, (5, 5), 0, 0)
|
||||||
edges_filter = detect_edges(img_pupil_blur)
|
edges_filter = self.detect_edges(img_pupil_blur)
|
||||||
# fit ellipse to edges
|
# fit ellipse to edges
|
||||||
contours, hierarchy = cv2.findContours(edges_filter, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
|
contours, hierarchy = cv2.findContours(edges_filter, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
|
||||||
# sort contours by area
|
# sort contours by area
|
||||||
@ -622,8 +494,8 @@ def fine_detection(img_gray, pupil_rect_coarse):
|
|||||||
pupil_rect_fine[2],
|
pupil_rect_fine[2],
|
||||||
pupil_rect_fine[3],
|
pupil_rect_fine[3],
|
||||||
)
|
)
|
||||||
pupil_rect_fine = intersect_rect(pupil_rect_fine, boundary)
|
pupil_rect_fine = self.intersect_rect(pupil_rect_fine, boundary)
|
||||||
pupil_rect_fine = rect_scale(pupil_rect_fine, 1 / valid_ratio)
|
pupil_rect_fine = self.rect_scale(pupil_rect_fine, 1 / valid_ratio)
|
||||||
else:
|
else:
|
||||||
pupil_rect_fine = pupil_rect_coarse
|
pupil_rect_fine = pupil_rect_coarse
|
||||||
center_fitting = (
|
center_fitting = (
|
||||||
@ -638,7 +510,7 @@ def fine_detection(img_gray, pupil_rect_coarse):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
def detect_edges(img_pupil_blur):
|
def detect_edges(self, img_pupil_blur):
|
||||||
tau1 = 1 - 20.0 / img_pupil_blur.shape[1]
|
tau1 = 1 - 20.0 / img_pupil_blur.shape[1]
|
||||||
edges = cv2.Canny(img_pupil_blur, 64, 128)
|
edges = cv2.Canny(img_pupil_blur, 64, 128)
|
||||||
|
|
||||||
@ -654,7 +526,7 @@ def detect_edges(img_pupil_blur):
|
|||||||
return edges_filter
|
return edges_filter
|
||||||
|
|
||||||
|
|
||||||
def fit_pupil_ellipse_swirski(img_pupil, edges_filter):
|
def fit_pupil_ellipse_swirski(self, img_pupil, edges_filter):
|
||||||
contours, hierarchy = cv2.findContours(edges_filter, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
|
contours, hierarchy = cv2.findContours(edges_filter, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
|
||||||
max_contour_area = 0
|
max_contour_area = 0
|
||||||
max_contour = None
|
max_contour = None
|
||||||
@ -672,7 +544,7 @@ def fit_pupil_ellipse_swirski(img_pupil, edges_filter):
|
|||||||
return ellipse
|
return ellipse
|
||||||
|
|
||||||
|
|
||||||
def rect_scale(rect, scale, round_up=True):
|
def rect_scale(self, rect, scale, round_up=True):
|
||||||
x, y, width, height = rect
|
x, y, width, height = rect
|
||||||
new_width = int(width * scale)
|
new_width = int(width * scale)
|
||||||
new_height = int(height * scale)
|
new_height = int(height * scale)
|
||||||
@ -684,7 +556,7 @@ def rect_scale(rect, scale, round_up=True):
|
|||||||
return new_x, new_y, new_width, new_height
|
return new_x, new_y, new_width, new_height
|
||||||
|
|
||||||
|
|
||||||
def intersect_rect(rect1, rect2):
|
def intersect_rect(self, rect1, rect2):
|
||||||
x1, y1, w1, h1 = rect1
|
x1, y1, w1, h1 = rect1
|
||||||
x2, y2, w2, h2 = rect2
|
x2, y2, w2, h2 = rect2
|
||||||
x = max(x1, x2)
|
x = max(x1, x2)
|
||||||
@ -694,11 +566,11 @@ def intersect_rect(rect1, rect2):
|
|||||||
return x, y, w, h
|
return x, y, w, h
|
||||||
|
|
||||||
|
|
||||||
def rect_suppression(rectlist, response, rectlist_out, response_out):
|
def rect_suppression(self, rectlist, response, rectlist_out, response_out):
|
||||||
for i in range(len(rectlist)):
|
for i in range(len(rectlist)):
|
||||||
flag_intersect = False
|
flag_intersect = False
|
||||||
for j in range(len(rectlist_out)):
|
for j in range(len(rectlist_out)):
|
||||||
tmp = intersect_rect(rectlist[i], rectlist_out[j])
|
tmp = self.intersect_rect(rectlist[i], rectlist_out[j])
|
||||||
if tmp[2] > 0 and tmp[3] > 0:
|
if tmp[2] > 0 and tmp[3] > 0:
|
||||||
flag_intersect = True
|
flag_intersect = True
|
||||||
if response[i] > response_out[j]:
|
if response[i] > response_out[j]:
|
||||||
@ -712,7 +584,7 @@ def rect_suppression(rectlist, response, rectlist_out, response_out):
|
|||||||
return rectlist_out, response_out
|
return rectlist_out, response_out
|
||||||
|
|
||||||
|
|
||||||
def put_number(img_bgr, number, position, color):
|
def put_number(self, img_bgr, number, position, color):
|
||||||
cv2.putText(
|
cv2.putText(
|
||||||
img_bgr,
|
img_bgr,
|
||||||
str(number),
|
str(number),
|
||||||
@ -725,87 +597,112 @@ def put_number(img_bgr, number, position, color):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def External_Run_AHSF(frame_gray):
|
|
||||||
|
def External_Run_AHSF(self, frame_gray):
|
||||||
average_color = np.mean(frame_gray)
|
average_color = np.mean(frame_gray)
|
||||||
|
|
||||||
# Get the dimensions of the rotated image
|
|
||||||
height, width = frame_gray.shape
|
height, width = frame_gray.shape
|
||||||
|
|
||||||
# Determine the size of the square background (choose the larger dimension)
|
|
||||||
max_dimension = max(height, width)
|
max_dimension = max(height, width)
|
||||||
|
|
||||||
# Create a square background with the average color
|
|
||||||
square_background = np.full((max_dimension, max_dimension), average_color, dtype=np.uint8)
|
square_background = np.full((max_dimension, max_dimension), average_color, dtype=np.uint8)
|
||||||
|
|
||||||
# Calculate the position to paste the rotated image onto the square background
|
|
||||||
x_offset = (max_dimension - width) // 2
|
x_offset = (max_dimension - width) // 2
|
||||||
y_offset = (max_dimension - height) // 2
|
y_offset = (max_dimension - height) // 2
|
||||||
|
|
||||||
# Paste the rotated image onto the square background
|
|
||||||
square_background[y_offset : y_offset + height, x_offset : x_offset + width] = frame_gray
|
square_background[y_offset : y_offset + height, x_offset : x_offset + width] = frame_gray
|
||||||
|
|
||||||
frame_gray = cv2.resize(square_background, (100, 100))
|
frame_gray = cv2.resize(square_background, (100, 100))
|
||||||
|
|
||||||
frame_clear_resize = frame_gray.copy()
|
frame_clear_resize = frame_gray.copy()
|
||||||
|
|
||||||
params = {
|
params = {
|
||||||
"ratio_downsample": 0.5,
|
"ratio_downsample": 0.5,
|
||||||
"use_init_rect": False,
|
"use_init_rect": False,
|
||||||
"mu_outer": 200, # aprroximatly how much pupil should be in the outer rect
|
"mu_outer": 200,
|
||||||
"mu_inner": 50, # aprroximatly how much pupil should be in the inner rect
|
"mu_inner": 50,
|
||||||
"ratio_outer": 0.9, # rectangular ratio. 1 means square (LIKE REGULAR HSF)
|
"ratio_outer": 0.9,
|
||||||
"kf": 1, # noise filter. May lose tracking if too high (or even never start)
|
"kf": 1,
|
||||||
"width_min": 16, # Minimum width of the pupil
|
"width_min": 16,
|
||||||
"width_max": 50, # Maximum width of the pupil
|
"width_max": 50,
|
||||||
"wh_step": 5, # Pupil width and height step search size
|
"wh_step": 5,
|
||||||
"xy_step": 10, # Kernel movement step search size
|
"xy_step": 10,
|
||||||
"roi": (0, 0, frame_gray.shape[1], frame_gray.shape[0]),
|
"roi": (0, 0, frame_gray.shape[1], frame_gray.shape[0]),
|
||||||
"init_rect_flag": False,
|
"init_rect_flag": False,
|
||||||
"init_rect": (0, 0, frame_gray.shape[1], frame_gray.shape[0]),
|
"init_rect": (0, 0, frame_gray.shape[1], frame_gray.shape[0]),
|
||||||
}
|
}
|
||||||
try:
|
try:
|
||||||
(
|
pupil_rect_coarse, outer_rect_coarse, max_response_coarse, mu_inner, mu_outer = self.coarse_detection(frame_gray, params)
|
||||||
pupil_rect_coarse,
|
ellipse_rect, center_fitting = self.fine_detection(frame_gray, pupil_rect_coarse)
|
||||||
outer_rect_coarse,
|
|
||||||
max_response_coarse,
|
|
||||||
mu_inner,
|
|
||||||
mu_outer,
|
|
||||||
) = coarse_detection(frame_gray, params)
|
|
||||||
ellipse_rect, center_fitting = fine_detection(frame_gray, pupil_rect_coarse)
|
|
||||||
except TypeError:
|
except TypeError:
|
||||||
# print("[WARN] AHSF NoneType Error")
|
|
||||||
return frame_gray, frame_gray, 0, 0, 0
|
return frame_gray, frame_gray, 0, 0, 0
|
||||||
|
|
||||||
# show
|
|
||||||
x_center = outer_rect_coarse[0] + outer_rect_coarse[2] / 2
|
x_center = outer_rect_coarse[0] + outer_rect_coarse[2] / 2
|
||||||
y_center = outer_rect_coarse[1] + outer_rect_coarse[3] / 2
|
y_center = outer_rect_coarse[1] + outer_rect_coarse[3] / 2
|
||||||
x, y, width, height = outer_rect_coarse
|
x, y, width, height = outer_rect_coarse
|
||||||
|
|
||||||
cv2.circle(frame_gray, (int(x_center), int(y_center)), 2, (255, 255, 255), -1)
|
cv2.circle(frame_gray, (int(x_center), int(y_center)), 2, (255, 255, 255), -1)
|
||||||
thickness = 1
|
thickness = 1
|
||||||
cv2.rectangle(
|
cv2.rectangle(frame_gray, (pupil_rect_coarse[0], pupil_rect_coarse[1]), (pupil_rect_coarse[0] + pupil_rect_coarse[2], pupil_rect_coarse[1] + pupil_rect_coarse[3]), (255, 255, 255), thickness)
|
||||||
frame_gray,
|
cv2.rectangle(frame_gray, (outer_rect_coarse[0], outer_rect_coarse[1]), (outer_rect_coarse[0] + outer_rect_coarse[2], outer_rect_coarse[1] + outer_rect_coarse[3]), (255, 255, 255), thickness)
|
||||||
(pupil_rect_coarse[0], pupil_rect_coarse[1]),
|
|
||||||
(pupil_rect_coarse[0] + pupil_rect_coarse[2], pupil_rect_coarse[1] + pupil_rect_coarse[3]),
|
|
||||||
(255, 255, 255),
|
|
||||||
thickness,
|
|
||||||
)
|
|
||||||
cv2.rectangle(
|
|
||||||
frame_gray,
|
|
||||||
(outer_rect_coarse[0], outer_rect_coarse[1]),
|
|
||||||
(outer_rect_coarse[0] + outer_rect_coarse[2], outer_rect_coarse[1] + outer_rect_coarse[3]),
|
|
||||||
(255, 255, 255),
|
|
||||||
thickness,
|
|
||||||
)
|
|
||||||
|
|
||||||
# center = (pupil_rect_coarse[0] + pupil_rect_coarse[2] // 2, pupil_rect_coarse[1] + pupil_rect_coarse[3] // 2)
|
|
||||||
# cv2.drawMarker(frame_gray, center, (255, 255, 255), cv2.MARKER_CROSS, 20, thickness)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Calculate the major and minor diameters
|
|
||||||
major_diameter = math.sqrt(width**2 + height**2)
|
major_diameter = math.sqrt(width**2 + height**2)
|
||||||
minor_diameter = min(width, height)
|
minor_diameter = min(width, height)
|
||||||
average_diameter = (major_diameter + minor_diameter) / 2
|
average_diameter = (major_diameter + minor_diameter) / 2
|
||||||
|
|
||||||
return frame_gray, frame_clear_resize, x_center, y_center, abs(width - height)
|
return frame_gray, frame_clear_resize, x_center, y_center, abs(width - height)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class FPSResult(object):
|
||||||
|
"""
|
||||||
|
base https://github.com/ipython/ipython/blob/339c0d510a1f3cb2158dd8c6e7f4ac89aa4c89d8/IPython/core/magics/execution.py#L55
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, loops, repeat, best, worst, all_runs, precision):
|
||||||
|
self.loops = loops
|
||||||
|
self.repeat = repeat
|
||||||
|
self.best = 1 / best
|
||||||
|
self.worst = 1 / worst
|
||||||
|
self.all_runs = all_runs
|
||||||
|
self._precision = precision
|
||||||
|
self.fps = [1 / dt for dt in all_runs]
|
||||||
|
self.unit = "fps"
|
||||||
|
|
||||||
|
@property
|
||||||
|
def average(self):
|
||||||
|
return math.fsum(self.fps) / len(self.fps)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def stdev(self):
|
||||||
|
mean = self.average
|
||||||
|
return (math.fsum([(x - mean) ** 2 for x in self.fps]) / len(self.fps)) ** 0.5
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
pm = "+-"
|
||||||
|
if hasattr(sys.stdout, "encoding") and sys.stdout.encoding:
|
||||||
|
try:
|
||||||
|
"\xb1".encode(sys.stdout.encoding)
|
||||||
|
pm = "\xb1"
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
return "min:{best} max:{worst} mean:{mean} {pm} {std} per loop (mean {pm} std. dev. of {runs} run{run_plural}, {loops:,} loop{loop_plural} each)".format(
|
||||||
|
pm=pm,
|
||||||
|
runs=self.repeat,
|
||||||
|
loops=self.loops,
|
||||||
|
loop_plural="" if self.loops == 1 else "s",
|
||||||
|
run_plural="" if self.repeat == 1 else "s",
|
||||||
|
mean="%.*g%s" % (self._precision, self.average, self.unit),
|
||||||
|
std="%.*g%s" % (self._precision, self.stdev, self.unit),
|
||||||
|
best="%.*g%s" % (self._precision, self.best, self.unit),
|
||||||
|
worst="%.*g%s" % (self._precision, self.worst, self.unit),
|
||||||
|
)
|
||||||
|
|
||||||
|
def _repr_pretty_(self, p, cycle):
|
||||||
|
unic = self.__str__()
|
||||||
|
p.text("<FPSResult : " + unic + ">")
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -168,6 +168,8 @@ class EyeProcessor:
|
|||||||
self.pupil_height = 0.0
|
self.pupil_height = 0.0
|
||||||
self.avg_velocity = 0.0
|
self.avg_velocity = 0.0
|
||||||
self.angle = 621
|
self.angle = 621
|
||||||
|
self.er_ahsf = None
|
||||||
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
min_cutoff = float(self.settings.gui_min_cutoff) # 0.0004
|
min_cutoff = float(self.settings.gui_min_cutoff) # 0.0004
|
||||||
@ -428,7 +430,7 @@ class EyeProcessor:
|
|||||||
self.rawx,
|
self.rawx,
|
||||||
self.rawy,
|
self.rawy,
|
||||||
self.radius,
|
self.radius,
|
||||||
) = External_Run_AHSF(self.current_image_gray)
|
) = self.er_ahsf.External_Run_AHSF(self.current_image_gray)
|
||||||
self.current_image_gray_clean = resize_img.copy()
|
self.current_image_gray_clean = resize_img.copy()
|
||||||
|
|
||||||
self.thresh = resize_img
|
self.thresh = resize_img
|
||||||
@ -554,7 +556,7 @@ class EyeProcessor:
|
|||||||
self.rawx,
|
self.rawx,
|
||||||
self.rawy,
|
self.rawy,
|
||||||
self.radius,
|
self.radius,
|
||||||
) = External_Run_AHSF(self.current_image_gray)
|
) = self.er_ahsf.External_Run_AHSF(self, self.current_image_gray)
|
||||||
self.thresh = self.current_image_gray
|
self.thresh = self.current_image_gray
|
||||||
self.out_x, self.out_y, self.avg_velocity = cal.cal_osc(self, self.rawx, self.rawy, self.angle)
|
self.out_x, self.out_y, self.avg_velocity = cal.cal_osc(self, self.rawx, self.rawy, self.angle)
|
||||||
self.current_algorithm = EyeInfoOrigin.HSF
|
self.current_algorithm = EyeInfoOrigin.HSF
|
||||||
@ -633,9 +635,13 @@ class EyeProcessor:
|
|||||||
|
|
||||||
# set algo priorities
|
# set algo priorities
|
||||||
if self.settings.gui_AHSFRAC:
|
if self.settings.gui_AHSFRAC:
|
||||||
|
if self.er_ahsf is None:
|
||||||
|
self.er_ahsf = AHSF(self.current_image_gray)
|
||||||
algolist[self.settings.gui_AHSFRACP] = self.AHSFRACM
|
algolist[self.settings.gui_AHSFRACP] = self.AHSFRACM
|
||||||
|
|
||||||
if self.settings.gui_AHSF:
|
if self.settings.gui_AHSF:
|
||||||
|
if self.er_ahsf is None:
|
||||||
|
self.er_ahsf = AHSF(self.current_image_gray)
|
||||||
algolist[self.settings.gui_AHSFP] = self.AHSFM
|
algolist[self.settings.gui_AHSFP] = self.AHSFM
|
||||||
|
|
||||||
if self.settings.gui_HSF:
|
if self.settings.gui_HSF:
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user