Delete RANSAC directory

This commit is contained in:
Prohurtz 2022-07-03 12:20:36 -07:00 committed by GitHub
parent 2f7f67f1d4
commit 2ab51f4600
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 0 additions and 985 deletions

View File

@ -1,188 +0,0 @@
import kivy
from multiprocessing import Process,Queue,Pipe
kivy.require("1.9.1")
from kivy.app import App
from kivy.uix.gridlayout import GridLayout
from kivy.uix.slider import Slider
from kivy.uix.label import Label
from kivy.uix.floatlayout import FloatLayout
from kivy.properties import NumericProperty
from kivy.uix.scatter import Scatter
from kivy.uix.textinput import TextInput
from kivy.uix.boxlayout import BoxLayout
from kivy.core.window import Window
import time
###############################################################################
Window.size = (700, 200)
class WidgetContainer(GridLayout):
def __init__(self, **kwargs):
super(WidgetContainer, self).__init__(**kwargs)
############################################################################### right
self.cols = 3
self.xcc = Slider(min = 1, max = 240,
value_track = True,
value_track_color =[1, 1, 1, 1])
self.add_widget(Label(text ='Search Size X R'))
self.add_widget(self.xcc)
self.xValue = Label(text ='1')
self.add_widget(self.xValue)
self.xcc.bind(value = self.on_value)
############################################################################### bottom
self.Y = Slider(min = 1, max = 240,
value_track = True,
value_track_color =[1, 1, 1, 1])
self.add_widget(Label(text ='Search Size Y R'))
self.add_widget(self.Y)
self.YV = Label(text ='1')
self.add_widget(self.YV)
self.Y.bind(value = self.on_value1)
############################################################################### left
self.xlc = Slider(min = 1, max = 240,
value_track = True,
value_track_color =[1, 1, 1, 1])
self.add_widget(Label(text ='Search Size X L'))
self.add_widget(self.xlc)
self.xlValue = Label(text ='1')
self.add_widget(self.xlValue)
self.xlc.bind(value = self.on_value2)
############################################################################### top
self.ylc = Slider(min = 1, max = 240,
value_track = True,
value_track_color =[1, 1, 1, 1])
self.add_widget(Label(text ='Search Size Y L'))
self.add_widget(self.ylc)
self.ylValue = Label(text ='1')
self.add_widget(self.ylValue)
self.ylc.bind(value = self.on_value3)
############################################################################### detection
# self.deth = Slider(min = 1, max = 40,
# value_track = True,
#value_track_color =[1, 1, 1, 1])
#self.add_widget(Label(text ='Detection thresh DEFAULT:18'))
#self.add_widget(self.deth)
#self.dethv= Label(text ='1')
#self.add_widget(self.dethv)
#self.deth.bind(value = self.on_value4)
############################################################################### camera input
self.rota = Slider(min = 0, max = 360,
value_track = True,
value_track_color =[1, 1, 1, 1])
self.add_widget(Label(text ='Rotation'))
self.add_widget(self.rota)
self.rotav= Label(text ='Select')
self.add_widget(self.rotav)
self.rota.bind(value = self.on_value5)
###############################################################################
# self.sav = Slider(min = 0, max = 360,
#value_track = True,
#value_track_color =[1, 1, 1, 1])
#self.add_widget(Label(text ='Rotation'))
#self.add_widget(self.sav)
#self.sav= Label(text ='Select')
#self.add_widget(self.sav)
#self.rotav.bind(value = self.on_value5)
def on_value(self, instance, brightness):
self.xValue.text = "% d"% brightness
confg.fx = self.xValue.text
configsave()
time.sleep(0.1)
def on_value1(self, instance, brightness,):
self.YV.text = "% d"% brightness
confg.fy = self.YV.text
configsave()
time.sleep(0.1)
def on_value2(self, instance, brightness):
self.xlValue.text = "% d"% brightness
confg.fxl = self.xlValue.text
configsave()
time.sleep(0.1)
def on_value3(self, instance, brightness,):
self.ylValue.text = "% d"% brightness
confg.fyl = self.ylValue.text
configsave()
time.sleep(0.1)
#def on_value4(self, instance, brightness,):
# self.dethv.text = "% d"% brightness
# confg.fxl = self.YV.text
def on_value5(self, instance, brightness,):
self.rotav.text = "% d"% brightness
confg.rv = self.rotav.text
configsave()
time.sleep(0.1)
class EyetrackGUI(App):
def build(self):
widgetContainer = WidgetContainer()
print()
return widgetContainer
def confg():
confg.fx = 128
confg.fy = 128
confg.fxl = 1
confg.fyl = 1
confg.rv = 0
def configsave():
with open('config.txt', 'w+') as cw:
cw.write(str(confg.fx))
cw.write('\n')
cw.write(str(confg.fy))
cw.write('\n')
cw.write(str(confg.fxl))
cw.write('\n')
cw.write(str(confg.fyl))
cw.write('\n')
cw.write(str(confg.rv))
cw.write('\n')
cw.close()
confg()
rootGUI = EyetrackGUI()
rootGUI.run()

View File

@ -1,489 +0,0 @@
from tkinter import E
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from time import time
import pyttsx3
engine = pyttsx3.init()
def vc():
vc.xmax = 1
vc.xmin = 6969
vc.ymax = 1
vc.ymin = 6969
vc.cfc = 50
vc.cc = 1
vc.cu = 0
vc.cd = 0
vc.cl = 0
vc.cr = 0
vc.fc = 0
vc()
def savecalibvalues(calibcenterx, calibcentery, calibrightx, calibleftx, calibupy, calibdowny):
with open('eyeconfig.cfg', 'w+') as cw:
cw.write(str(calibcenterx))
cw.write('\n')
cw.write(str(calibcentery))
cw.write('\n')
cw.write(str(calibrightx))
cw.write('\n')
cw.write(str(calibleftx))
cw.write('\n')
cw.write(str(calibupy))
cw.write('\n')
cw.write(str(calibdowny))
cw.close()
with open("config.txt") as calibratefl:
lines = calibratefl.readlines()
vx = float(lines[0].strip())
vy = float(lines[1].strip())
vxl = float(lines[2].strip())
vyl = float(lines[3].strip())
rv = float(lines[4].strip())
calibratefl.close()
def fit_rotated_ellipse_ransac(
data, iter=90, sample_num=10, offset=80.0
): # before changing these values, please read up on the ransac algorithm
# However if you want to change any value just know that higher iterations will make processing frames slower
count_max = 0
effective_sample = None
for i in range(iter):
sample = np.random.choice(len(data), sample_num, replace=False)
xs = data[sample][:, 0].reshape(-1, 1)
ys = data[sample][:, 1].reshape(-1, 1)
J = np.mat(
np.hstack((xs * ys, ys**2, xs, ys, np.ones_like(xs, dtype=np.float)))
)
Y = np.mat(-1 * xs**2)
P = (J.T * J).I * J.T * Y
# fitter a*x**2 + b*x*y + c*y**2 + d*x + e*y + f = 0
a = 1.0
b = P[0, 0]
c = P[1, 0]
d = P[2, 0]
e = P[3, 0]
f = P[4, 0]
ellipse_model = (
lambda x, y: a * x**2 + b * x * y + c * y**2 + d * x + e * y + f
)
# threshold
ran_sample = np.array(
[[x, y] for (x, y) in data if np.abs(ellipse_model(x, y)) < offset]
)
if len(ran_sample) > count_max:
count_max = len(ran_sample)
effective_sample = ran_sample
return fit_rotated_ellipse(effective_sample)
def fit_rotated_ellipse(data):
xs = data[:, 0].reshape(-1, 1)
ys = data[:, 1].reshape(-1, 1)
J = np.mat(np.hstack((xs * ys, ys**2, xs, ys, np.ones_like(xs, dtype=np.float))))
Y = np.mat(-1 * xs**2)
P = (J.T * J).I * J.T * Y
a = 1.0
b = P[0, 0]
c = P[1, 0]
d = P[2, 0]
e = P[3, 0]
f = P[4, 0]
theta = 0.5 * np.arctan(b / (a - c))
cx = (2 * c * d - b * e) / (b**2 - 4 * a * c)
cy = (2 * a * e - b * d) / (b**2 - 4 * a * c)
cu = a * cx**2 + b * cx * cy + c * cy**2 - f
w = np.sqrt(
cu
/ (
a * np.cos(theta) ** 2
+ b * np.cos(theta) * np.sin(theta)
+ c * np.sin(theta) ** 2
)
)
h = np.sqrt(
cu
/ (
a * np.sin(theta) ** 2
- b * np.cos(theta) * np.sin(theta)
+ c * np.cos(theta) ** 2
)
)
ellipse_model = lambda x, y: a * x**2 + b * x * y + c * y**2 + d * x + e * y + f
error_sum = np.sum([ellipse_model(x, y) for x, y in data])
print("fitting error = %.3f" % (error_sum))
return (cx, cy, w, h, theta)
def increase_brightness(img, value):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
lim = 255 - value
v[v > lim] = 255
v[v <= lim] += value
final_hsv = cv2.merge((h, s, v))
img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
return img
#cap = cv2.VideoCapture("http://192.168.0.202:81/stream") # change this to the video you want to test
#if cap.isOpened() == False:
# print("Error opening video stream or file")
while True:
if vc.cc == 1:
engine.say("a saved calibration file was not found.")
engine.say("Calibration starting, 3. 2. 1. please look straight forward")
engine.runAndWait()
vc.cc = 2
if vc.cc == 2:
cap = cv2.VideoCapture("http://192.168.0.202:81/stream")
ret, img = cap.read()
img = img[int(vxl): int(float(vy)), int(vyl): int(float(vx))]
if ret == True:
newImage2 = img.copy()
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
image_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(
image_gray, 120, 255, cv2.THRESH_BINARY
) # this will need to be adjusted everytime hardwere is changed (brightness of IR, Camera postion, etc)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
image = 255 - closing
contours, hierarchy = cv2.findContours(
image, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE
)
hull = []
for i in range(len(contours)):
hull.append(cv2.convexHull(contours[i], False))
try:
cv2.drawContours(img, contours, -1, (255, 0, 0), 1)
cnt = sorted(hull, key=cv2.contourArea)
maxcnt = cnt[-1]
ellipse = cv2.fitEllipse(maxcnt)
cx, cy, w, h, theta = fit_rotated_ellipse_ransac(maxcnt.reshape(-1, 2))
print(cx, cy)
cv2.circle(newImage2, (int(cx), int(cy)), 2, (0, 0, 255), -1)
cx1, cy1, w1, h1, theta1 = fit_rotated_ellipse(maxcnt.reshape(-1, 2))
cv2.ellipse(
newImage2,
(int(cx), int(cy)),
(int(w), int(h)),
theta * 180.0 / np.pi,
0.0,
360.0,
(50, 250, 200),
1,
)
except:
pass
cv2.imshow("Ransac", newImage2)
cv2.imshow("gray", image_gray)
cv2.imshow("thresh", thresh)
cap.release()
cv2.destroyAllWindows()
calibcenterx = cx
calibcentery = cy
print(cx, cy)
engine.say("center calibration complete, please look right")
engine.runAndWait()
vc.cr = 1
vc.cc = 3
if vc.cr == 1:
engine.say("Right calibration starting")
engine.runAndWait()
vc.cr = 2
if vc.cr == 2:
cap = cv2.VideoCapture("http://192.168.0.202:81/stream")
ret, img = cap.read()
img = img[int(vxl): int(float(vy)), int(vyl): int(float(vx))]
if ret == True:
newImage2 = img.copy()
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
image_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(
image_gray, 120, 255, cv2.THRESH_BINARY
) # this will need to be adjusted everytime hardwere is changed (brightness of IR, Camera postion, etc)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
image = 255 - closing
contours, hierarchy = cv2.findContours(
image, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE
)
hull = []
for i in range(len(contours)):
hull.append(cv2.convexHull(contours[i], False))
try:
cv2.drawContours(img, contours, -1, (255, 0, 0), 1)
cnt = sorted(hull, key=cv2.contourArea)
maxcnt = cnt[-1]
ellipse = cv2.fitEllipse(maxcnt)
cx, cy, w, h, theta = fit_rotated_ellipse_ransac(maxcnt.reshape(-1, 2))
print(cx, cy)
cv2.circle(newImage2, (int(cx), int(cy)), 2, (0, 0, 255), -1)
cx1, cy1, w1, h1, theta1 = fit_rotated_ellipse(maxcnt.reshape(-1, 2))
cv2.ellipse(
newImage2,
(int(cx), int(cy)),
(int(w), int(h)),
theta * 180.0 / np.pi,
0.0,
360.0,
(50, 250, 200),
1,
)
except:
pass
cv2.imshow("Ransac", newImage2)
cv2.imshow("gray", image_gray)
cv2.imshow("thresh", thresh)
calibrightx = cx
calibrighty = cy
print(cx, cy)
cap.release()
cv2.destroyAllWindows()
engine.say("Right calibration complete, please look left")
engine.runAndWait()
vc.cl = 1
vc.cr = 3
if vc.cl == 1:
engine.say("left calibration starting")
engine.runAndWait()
vc.cl = 2
if vc.cl == 2:
cap = cv2.VideoCapture("http://192.168.0.202:81/stream")
ret, img = cap.read()
img = img[int(vxl): int(float(vy)), int(vyl): int(float(vx))]
if ret == True:
newImage2 = img.copy()
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
image_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(
image_gray, 120, 255, cv2.THRESH_BINARY
) # this will need to be adjusted everytime hardwere is changed (brightness of IR, Camera postion, etc)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
image = 255 - closing
contours, hierarchy = cv2.findContours(
image, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE
)
hull = []
for i in range(len(contours)):
hull.append(cv2.convexHull(contours[i], False))
try:
cv2.drawContours(img, contours, -1, (255, 0, 0), 1)
cnt = sorted(hull, key=cv2.contourArea)
maxcnt = cnt[-1]
ellipse = cv2.fitEllipse(maxcnt)
cx, cy, w, h, theta = fit_rotated_ellipse_ransac(maxcnt.reshape(-1, 2))
print(cx, cy)
cv2.circle(newImage2, (int(cx), int(cy)), 2, (0, 0, 255), -1)
cx1, cy1, w1, h1, theta1 = fit_rotated_ellipse(maxcnt.reshape(-1, 2))
cv2.ellipse(
newImage2,
(int(cx), int(cy)),
(int(w), int(h)),
theta * 180.0 / np.pi,
0.0,
360.0,
(50, 250, 200),
1,
)
except:
pass
cv2.imshow("Ransac", newImage2)
cv2.imshow("gray", image_gray)
cv2.imshow("thresh", thresh)
calibleftx = cx
calibclefty = cy
print(cx, cy)
cap.release()
cv2.destroyAllWindows()
engine.say("left calibration complete, please look up")
engine.runAndWait()
vc.cl = 3
vc.cu = 1
if vc.cu == 1:
engine.say("up calibration starting")
engine.runAndWait()
vc.cu = 2
if vc.cu == 2:
cap = cv2.VideoCapture("http://192.168.0.202:81/stream")
ret, img = cap.read()
img = img[int(vxl): int(float(vy)), int(vyl): int(float(vx))]
if ret == True:
newImage2 = img.copy()
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
image_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(
image_gray, 120, 255, cv2.THRESH_BINARY
) # this will need to be adjusted everytime hardwere is changed (brightness of IR, Camera postion, etc)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
image = 255 - closing
contours, hierarchy = cv2.findContours(
image, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE
)
hull = []
for i in range(len(contours)):
hull.append(cv2.convexHull(contours[i], False))
try:
cv2.drawContours(img, contours, -1, (255, 0, 0), 1)
cnt = sorted(hull, key=cv2.contourArea)
maxcnt = cnt[-1]
ellipse = cv2.fitEllipse(maxcnt)
cx, cy, w, h, theta = fit_rotated_ellipse_ransac(maxcnt.reshape(-1, 2))
print(cx, cy)
cv2.circle(newImage2, (int(cx), int(cy)), 2, (0, 0, 255), -1)
cx1, cy1, w1, h1, theta1 = fit_rotated_ellipse(maxcnt.reshape(-1, 2))
cv2.ellipse(
newImage2,
(int(cx), int(cy)),
(int(w), int(h)),
theta * 180.0 / np.pi,
0.0,
360.0,
(50, 250, 200),
1,
)
except:
pass
cv2.imshow("Ransac", newImage2)
cv2.imshow("gray", image_gray)
cv2.imshow("thresh", thresh)
calibupx = cx
calibupy = cy
print(cx, cy)
cap.release()
cv2.destroyAllWindows()
engine.say("up calibration complete, please look down")
engine.runAndWait()
vc.cd = 1
vc.cu = 3
if vc.cd == 1:
engine.say("down calibration starting")
engine.runAndWait()
vc.cd = 2
if vc.cd == 2:
cap = cv2.VideoCapture("http://192.168.0.202:81/stream")
ret, img = cap.read()
img = img[int(vxl): int(float(vy)), int(vyl): int(float(vx))]
if ret == True:
newImage2 = img.copy()
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
image_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(
image_gray, 120, 255, cv2.THRESH_BINARY
) # this will need to be adjusted everytime hardwere is changed (brightness of IR, Camera postion, etc)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
image = 255 - closing
contours, hierarchy = cv2.findContours(
image, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE
)
hull = []
for i in range(len(contours)):
hull.append(cv2.convexHull(contours[i], False))
try:
cv2.drawContours(img, contours, -1, (255, 0, 0), 1)
cnt = sorted(hull, key=cv2.contourArea)
maxcnt = cnt[-1]
ellipse = cv2.fitEllipse(maxcnt)
cx, cy, w, h, theta = fit_rotated_ellipse_ransac(maxcnt.reshape(-1, 2))
print(cx, cy)
cv2.circle(newImage2, (int(cx), int(cy)), 2, (0, 0, 255), -1)
cx1, cy1, w1, h1, theta1 = fit_rotated_ellipse(maxcnt.reshape(-1, 2))
cv2.ellipse(
newImage2,
(int(cx), int(cy)),
(int(w), int(h)),
theta * 180.0 / np.pi,
0.0,
360.0,
(50, 250, 200),
1,
)
except:
pass
cv2.imshow("Ransac", newImage2)
cv2.imshow("gray", image_gray)
cv2.imshow("thresh", thresh)
calibdownx = cx
calibdowny = cy
print(cx, cy)
cap.release()
cv2.destroyAllWindows()
engine.say("calibration complete")
engine.runAndWait()
vc.cd = 3
else:
print('CALIBCOMPLETE')
savecalibvalues(calibcenterx, calibcentery, calibrightx, calibleftx, calibupy, calibdowny)
vc.cfc = 2
vc.fc = 1
print('CALIBCOMPLETE22q2')
break

View File

@ -1 +0,0 @@
This method is from https://github.com/SummerSigh/TheVrMLEyeToolbox/

View File

@ -1,307 +0,0 @@
from tkinter import E
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from time import time
import sys
from pythonosc import udp_client
import torch
#model = torch.hub.load('ultralytics/yolov5', 'custom', path='best.pt',force_reload=True)
#model.conf = 0.25 # NMS confidence threshold
#model.iou = 0.45 # NMS IoU threshold
#model.agnostic = False # NMS class-agnostic
#model.multi_label = False # NMS multiple labels per box
#model.max_det = 1 # maximum number of detections per image
#model.amp = False # Automatic Mixed Precision (AMP) inference
cx = 0.5
cy = 0.5
def vc():
vc.lidmax = 1
vc.lidmin = 6969 #( ͡° ͜ʖ ͡°) yes i know im stupid
vc.cfc = 1
vc.cc = 1
vc.cu = 0
vc.cd = 0
vc.cl = 0
vc.cr = 0
vc.fc = 0
vc.el = 2
vc.eyelidv = 1
vc.src = '1'
vc()
OSCip="127.0.0.1"
OSCport=9000 #VR Chat OSC port
client = udp_client.SimpleUDPClient(OSCip, OSCport)
def fit_rotated_ellipse_ransac(
data, iter=90, sample_num=10, offset=80.0
): # before changing these values, please read up on the ransac algorithm
# However if you want to change any value just know that higher iterations will make processing frames slower
count_max = 0
effective_sample = None
for i in range(iter):
sample = np.random.choice(len(data), sample_num, replace=False)
xs = data[sample][:, 0].reshape(-1, 1)
ys = data[sample][:, 1].reshape(-1, 1)
J = np.mat(
np.hstack((xs * ys, ys**2, xs, ys, np.ones_like(xs, dtype=np.float)))
)
Y = np.mat(-1 * xs**2)
P = (J.T * J).I * J.T * Y
# fitter a*x**2 + b*x*y + c*y**2 + d*x + e*y + f = 0
a = 1.0
b = P[0, 0]
c = P[1, 0]
d = P[2, 0]
e = P[3, 0]
f = P[4, 0]
ellipse_model = (
lambda x, y: a * x**2 + b * x * y + c * y**2 + d * x + e * y + f
)
# threshold
ran_sample = np.array(
[[x, y] for (x, y) in data if np.abs(ellipse_model(x, y)) < offset]
)
if len(ran_sample) > count_max:
count_max = len(ran_sample)
effective_sample = ran_sample
return fit_rotated_ellipse(effective_sample)
def fit_rotated_ellipse(data):
xs = data[:, 0].reshape(-1, 1)
ys = data[:, 1].reshape(-1, 1)
J = np.mat(np.hstack((xs * ys, ys**2, xs, ys, np.ones_like(xs, dtype=np.float))))
Y = np.mat(-1 * xs**2)
P = (J.T * J).I * J.T * Y
a = 1.0
b = P[0, 0]
c = P[1, 0]
d = P[2, 0]
e = P[3, 0]
f = P[4, 0]
theta = 0.5 * np.arctan(b / (a - c))
cx = (2 * c * d - b * e) / (b**2 - 4 * a * c)
cy = (2 * a * e - b * d) / (b**2 - 4 * a * c)
cu = a * cx**2 + b * cx * cy + c * cy**2 - f
w = np.sqrt(
cu
/ (
a * np.cos(theta) ** 2
+ b * np.cos(theta) * np.sin(theta)
+ c * np.sin(theta) ** 2
)
)
h = np.sqrt(
cu
/ (
a * np.sin(theta) ** 2
- b * np.cos(theta) * np.sin(theta)
+ c * np.cos(theta) ** 2
)
)
ellipse_model = lambda x, y: a * x**2 + b * x * y + c * y**2 + d * x + e * y + f
error_sum = np.sum([ellipse_model(x, y) for x, y in data])
print("fitting error = %.3f" % (error_sum))
return (cx, cy, w, h, theta)
def increase_brightness(img, value):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
lim = 255 - value
v[v > lim] = 255
v[v <= lim] += value
final_hsv = cv2.merge((h, s, v))
img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
return img
cap = cv2.VideoCapture("http://192.168.0.202:81/stream")
#cap = cv2.VideoCapture("http://192.168.1.177:4747/video")
# change this to the video you want to test
if cap.isOpened() == False:
print("Error opening video stream or file")
while cap.isOpened():
with open("config.txt") as calibratefl:
lines = calibratefl.readlines()
vx = float(lines[0].strip())
vy = float(lines[1].strip())
vxl = float(lines[2].strip())
vyl = float(lines[3].strip())
rv = float(lines[4].strip())
calibratefl.close()
# try:
ret, img = cap.read()
img = img[int(vxl): int(float(vy)), int(vyl): int(float(vx))]
if ret == True:
newImage2 = img.copy()
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
image_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(
image_gray, 125, 255, cv2.THRESH_BINARY
) # this will need to be adjusted everytime hardwere is changed (brightness of IR, Camera postion, etc)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
image = 255 - closing
contours, hierarchy = cv2.findContours(
image, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE
)
hull = []
for i in range(len(contours)):
hull.append(cv2.convexHull(contours[i], False))
try:
cv2.drawContours(img, contours, -1, (255, 0, 0), 1)
cnt = sorted(hull, key=cv2.contourArea)
maxcnt = cnt[-1]
ellipse = cv2.fitEllipse(maxcnt)
cx, cy, w, h, theta = fit_rotated_ellipse_ransac(maxcnt.reshape(-1, 2))
print(cx, cy)
cv2.circle(newImage2, (int(cx), int(cy)), 2, (0, 0, 255), -1)
cx1, cy1, w1, h1, theta1 = fit_rotated_ellipse(maxcnt.reshape(-1, 2))
cv2.ellipse(
newImage2,
(int(cx), int(cy)),
(int(w), int(h)),
theta * 180.0 / np.pi,
0.0,
360.0,
(50, 250, 200),
1,
)
except:
pass
if vc.el == 2:
print('here')
vc.el = 5
#results = model(img) # inference
#for box in results.xyxy[0]: # box is a list of 4 numbers
# if box[5]==0: # if the confidence is 0, then skip
# xB = int(box[2]) # xB is the x coordinate of the bottom right corner
# xA = int(box[0]) # xA is the x coordinate of the top left corner
# yB = int(box[3]) # yB is the y coordinate of the bottom right corner
# yA = int(box[1]) # yA is the y coordinate of the top left corner
# cv2.rectangle(img, (xA, yA), (xB, yB), (0, 255, 0), 2) # draw a rectangle around the detected object
#cv2.circle(img, (int((xA+xB)/2), int((yA+yB)/2)), 2, (0, 0, 255), -1)
#cv2.imshow('EYEMODEL',img)
print('shown')
vc.el = vc.el - 1
print(vc.el)
if vc.cfc == 1:
try:
with open("eyeconfig.cfg") as eyecalib:
lines = eyecalib.readlines()
calibcenterx = float(lines[0].strip())
calibcentery = float(lines[1].strip())
calibrightx = float(lines[2].strip())
calibleftx = float(lines[3].strip())
calibupy = float(lines[4].strip())
calibdowny = float(lines[5].strip())
eyecalib.close()
vc.cfc = 2
except:
print('eror')
sys.exit()
#percentage = (((input - min) * 100) / (max - min)) / 100 only for reference because im dum and forget stuff
xr = float((((cx - calibcenterx) * 100) / (calibrightx - calibcenterx)) / 100)
xl = float((((cx - calibcenterx) * 100) / (calibleftx - calibcenterx)) / 100)
yu = float((((cy - calibcentery) * 100) / (calibupy - calibcentery)) / 100)
yd = float((((cy - calibcentery) * 100) / (calibdowny - calibcentery)) / 100)
if xr > 0:
if xr > 1:
xr = 1.0
client.send_message("/avatar/parameters/RightEyeX", xr)
client.send_message("/avatar/parameters/LeftEyeX", xr)
print('XR', xr)
if xl > 0:
if xl > 1:
xl = 1.0
client.send_message("/avatar/parameters/RightEyeX", -abs(xl))
client.send_message("/avatar/parameters/LeftEyeX", -abs(xl))
print('XL', xl)
if yd > 0:
if yd > 1:
yd = 1.0
client.send_message("/avatar/parameters/EyesY", -abs(yd))
# print('YD', yd)
if yu > 0:
if yu > 1:
yu = 1.0
client.send_message("/avatar/parameters/EyesY", yu)
#print('YU', yu)
cv2.imshow("Ransac", newImage2)
cv2.imshow("gray", image_gray)
cv2.imshow("thresh", thresh)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
#except:
# print('error')