Delete InceptionNet directory

This commit is contained in:
Prohurtz 2022-07-03 12:20:29 -07:00 committed by GitHub
parent eed5288b5e
commit 2f7f67f1d4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 0 additions and 2274 deletions

View File

@ -1,188 +0,0 @@
import kivy
from multiprocessing import Process,Queue,Pipe
kivy.require("1.9.1")
from kivy.app import App
from kivy.uix.gridlayout import GridLayout
from kivy.uix.slider import Slider
from kivy.uix.label import Label
from kivy.uix.floatlayout import FloatLayout
from kivy.properties import NumericProperty
from kivy.uix.scatter import Scatter
from kivy.uix.textinput import TextInput
from kivy.uix.boxlayout import BoxLayout
from kivy.core.window import Window
import time
###############################################################################
Window.size = (700, 200)
class WidgetContainer(GridLayout):
def __init__(self, **kwargs):
super(WidgetContainer, self).__init__(**kwargs)
############################################################################### right
self.cols = 3
self.xcc = Slider(min = 1, max = 240,
value_track = True,
value_track_color =[1, 1, 1, 1])
self.add_widget(Label(text ='Search Size X R'))
self.add_widget(self.xcc)
self.xValue = Label(text ='1')
self.add_widget(self.xValue)
self.xcc.bind(value = self.on_value)
############################################################################### bottom
self.Y = Slider(min = 1, max = 240,
value_track = True,
value_track_color =[1, 1, 1, 1])
self.add_widget(Label(text ='Search Size Y R'))
self.add_widget(self.Y)
self.YV = Label(text ='1')
self.add_widget(self.YV)
self.Y.bind(value = self.on_value1)
############################################################################### left
self.xlc = Slider(min = 1, max = 240,
value_track = True,
value_track_color =[1, 1, 1, 1])
self.add_widget(Label(text ='Search Size X L'))
self.add_widget(self.xlc)
self.xlValue = Label(text ='1')
self.add_widget(self.xlValue)
self.xlc.bind(value = self.on_value2)
############################################################################### top
self.ylc = Slider(min = 1, max = 240,
value_track = True,
value_track_color =[1, 1, 1, 1])
self.add_widget(Label(text ='Search Size Y L'))
self.add_widget(self.ylc)
self.ylValue = Label(text ='1')
self.add_widget(self.ylValue)
self.ylc.bind(value = self.on_value3)
############################################################################### detection
# self.deth = Slider(min = 1, max = 40,
# value_track = True,
#value_track_color =[1, 1, 1, 1])
#self.add_widget(Label(text ='Detection thresh DEFAULT:18'))
#self.add_widget(self.deth)
#self.dethv= Label(text ='1')
#self.add_widget(self.dethv)
#self.deth.bind(value = self.on_value4)
############################################################################### camera input
self.rota = Slider(min = 0, max = 360,
value_track = True,
value_track_color =[1, 1, 1, 1])
self.add_widget(Label(text ='Rotation'))
self.add_widget(self.rota)
self.rotav= Label(text ='Select')
self.add_widget(self.rotav)
self.rota.bind(value = self.on_value5)
###############################################################################
# self.sav = Slider(min = 0, max = 360,
#value_track = True,
#value_track_color =[1, 1, 1, 1])
#self.add_widget(Label(text ='Rotation'))
#self.add_widget(self.sav)
#self.sav= Label(text ='Select')
#self.add_widget(self.sav)
#self.rotav.bind(value = self.on_value5)
def on_value(self, instance, brightness):
self.xValue.text = "% d"% brightness
confg.fx = self.xValue.text
configsave()
time.sleep(0.1)
def on_value1(self, instance, brightness,):
self.YV.text = "% d"% brightness
confg.fy = self.YV.text
configsave()
time.sleep(0.1)
def on_value2(self, instance, brightness):
self.xlValue.text = "% d"% brightness
confg.fxl = self.xlValue.text
configsave()
time.sleep(0.1)
def on_value3(self, instance, brightness,):
self.ylValue.text = "% d"% brightness
confg.fyl = self.ylValue.text
configsave()
time.sleep(0.1)
#def on_value4(self, instance, brightness,):
# self.dethv.text = "% d"% brightness
# confg.fxl = self.YV.text
def on_value5(self, instance, brightness,):
self.rotav.text = "% d"% brightness
confg.rv = self.rotav.text
configsave()
time.sleep(0.1)
class EyetrackGUI(App):
def build(self):
widgetContainer = WidgetContainer()
print()
return widgetContainer
def confg():
confg.fx = 128
confg.fy = 128
confg.fxl = 1
confg.fyl = 1
confg.rv = 0
def configsave():
with open('config.txt', 'w+') as cw:
cw.write(str(confg.fx))
cw.write('\n')
cw.write(str(confg.fy))
cw.write('\n')
cw.write(str(confg.fxl))
cw.write('\n')
cw.write(str(confg.fyl))
cw.write('\n')
cw.write(str(confg.rv))
cw.write('\n')
cw.close()
confg()
rootGUI = EyetrackGUI()
rootGUI.run()

View File

@ -1,36 +0,0 @@
***This is the first working method for VR Chat***
Not perfect and a lot more work needs to be done on all aspects of this project.
**How to run**
You will fist need python 3.9.x or earlier installed
Then install the dependencies in requirements.txt
You will want to run the gui and make first 2 sliders max then re adjust the next 2 up and back to the minimum.
Now open both inferno calib and inferno and set your device stream address in the cv2.VideoCapture('[set your address here]') line
Now run the inferno calib program and follow the audio prompts to calibrate.
After you have calibrated it you can start inferno.py and watch your eyes more in vrchat
*Please note this process will change and become more streamlined in the near future.*
***This is a model implementaion form https://github.com/isohrab/Pupil-locator***
**what is this Model**
This Model is a hybrid model inspiered by YOLO, Network in Network (NiN) cnns and using YINInception as the core CNN to predict the pupil location inside the image of the eye.
**Why did you name the techique InceptionNet if thats not what its called?**
The authors of the original paper did not specifiy a name, and since its super close to InceptionNet I just named it that
**How good is this method**
In terms of easy of implementaion and accuracy its currently one of the best methods of eye tracking in this repo
This model setup comes from https://github.com/SummerSigh/TheVrMLEyeToolbox

View File

@ -1,162 +0,0 @@
import tensorflow.compat.v1 as tf
from tensorflow.python.ops import control_flow_ops
tf.disable_v2_behavior()
# YOLO implementation
# https://github.com/WojciechMormul/yolo2/blob/master/train.py
class BaseModel(object):
"""
This class serve basic methods for other models
"""
def __init__(self, model_name, cfg):
self.cfg = cfg
self.model_name = model_name
self.l2beta = cfg["l2_beta"]
self.model_dir = "models/" + model_name + "/"
self.mode = 'train'
self.max_gradient_norm = cfg["MAX_GRADIANT_NORM"]
self.global_step = tf.Variable(0, trainable=False, name='global_step')
self.global_epoch_step = tf.Variable(0, trainable=False, name='global_epoch_step')
self.global_epoch_step_op = tf.assign(self.global_epoch_step, self.global_epoch_step + 1)
self.update = None
self.loss = None
self.logits = None
def init_placeholders(self):
# shape: [Batch_size, Width, Height, Channels]
self.X = tf.placeholder(dtype=tf.float32,
shape=(None,
self.cfg["input_height"],
self.cfg["input_width"],
self.cfg["input_channel"]),
name="images_input")
# shape: [Batch_size, 5] (x,y,w,h,a)
self.Y = tf.placeholder(dtype=tf.float32,
shape=(None, self.cfg["output_dim"]),
name="ground_truth")
self.keep_prob = tf.placeholder(dtype=tf.float32,
shape=(),
name="keep_prob")
self.train_flag = tf.placeholder(dtype=tf.bool, name='flag_placeholder')
self.learning_rate = tf.placeholder(dtype=tf.float32, shape=(), name="learning_rate")
def init_optimizer(self):
print("setting optimizer..")
# add L2 loss to main loss, do backpropagation
self.l2_loss = tf.losses.get_regularization_loss()
tf.summary.scalar("l2_loss", self.l2_loss)
self.total_loss = tf.add(self.loss, self.l2_loss)
tf.summary.scalar('final_loss', self.total_loss)
# we need to define a dependency before calculating the total_loss
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if update_ops:
updates = tf.group(*update_ops)
self.final_loss = control_flow_ops.with_dependencies([updates], self.total_loss)
with tf.control_dependencies(update_ops):
trainable_params = tf.trainable_variables()
opt = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
# Compute gradients of loss w.r.t. all trainable variables
gradients = tf.gradients(self.final_loss, trainable_params)
# Clip gradients by a given maximum_gradient_norm
clip_gradients, _ = tf.clip_by_global_norm(gradients, self.max_gradient_norm)
# Update the model
self.update = opt.apply_gradients(zip(clip_gradients, trainable_params),
global_step=self.global_step)
def train(self, sess, images, labels, keep_prob, lr):
"""Run a train step of the model feeding the given inputs.
Args:
session: tensorflow session to use.
encoder_inputs: a numpy int matrix of [batch_size, max_source_time_steps]
to feed as encoder inputs
encoder_inputs_length: a numpy int vector of [batch_size]
to feed as sequence lengths for each element in the given batch
Returns:
A triple consisting of gradient norm (or None if we did not do backward),
average perplexity, and the outputs.
"""
# Check if the model is 'training' mode
self.mode = 'train'
input_feed = {self.X.name: images,
self.Y.name: labels,
self.keep_prob.name: keep_prob,
self.train_flag.name: True,
self.learning_rate.name: lr}
output_feed = [self.update, # Update Op that does optimization
self.loss, # Loss for current batch
self.summary_op]
outputs = sess.run(output_feed, input_feed)
return outputs[1], outputs[2]
def eval(self, sess, images, labels):
"""Run a evaluation step of the model feeding the given inputs.
Args:
session: tensorflow session to use.
encoder_inputs: a numpy int matrix of [batch_size, max_source_time_steps]
to feed as encoder inputs
encoder_inputs_length: a numpy int vector of [batch_size]
to feed as sequence lengths for each element in the given batch
Returns:
A triple consisting of gradient norm (or None if we did not do backward),
average perplexity, and the outputs.
"""
self.mode = "eval"
input_feed = {self.X.name: images,
self.Y.name: labels,
self.keep_prob.name: 1.0,
self.train_flag.name: False}
output_feed = [self.loss, # Loss for current batch
self.summary_op,
self.logits]
outputs = sess.run(output_feed, input_feed)
return outputs[0], outputs[1], outputs[2]
def predict(self, sess, images):
"""
predict the label for the given images
:param sess: current tf.session
:param images: input test images
:return: predicted labels
"""
self.mode = 'test'
# Input feeds for dropout
input_feed = {self.X.name: images,
self.keep_prob.name: 1.0,
self.train_flag.name: False}
output_feed = [self.logits]
outputs = sess.run(output_feed, input_feed)
return outputs[0]
def restore(self, sess, path, var_list=None):
"""
restore a model from file
:param sess: active (current) tf.session
:param path: path to saved folder
:param var_list: load desire variables, if none, all variables will be returned
:return: load model to graph
"""
# var_list = None returns the list of all saveable variables
saver = tf.train.Saver(var_list)
saver.restore(sess, save_path=path)

View File

@ -1,91 +0,0 @@
config = dict()
# There are 342176 images in train set
# There are 38019 images in validation set
config["total_train"] = 342176
config["total_test"] = 38019
config["batch_size"] = 64
# calculate the validate every based on the number of available data
config["validate_every"] = int(
config["total_train"] / config["batch_size"]
) # Usually equal to one epoch
config["validate_for"] = int(config["total_test"] / config["batch_size"])
config["save_every"] = 3 * config["validate_every"]
# number of epochs
config["total_steps"] = config["validate_every"] * 60
# BASIC MODEL hyperparameters
config["n_filters"] = [16, 32, 128, 128, 256, 256]
config["filter_sizes"] = [3, 3, 3, 3, 3, 3]
config["max_pool"] = [1, 1, 1, 1, 1, 1]
config["fc_layers"] = [256, 128]
# # OPTIMIZATION hyperparameters
config["learning_rate"] = [
0.001,
0.0009,
0.0006,
0.0003,
0.0001,
0.00005,
0.00001,
0.000005,
0.000001,
]
config["decay_rate"] = 0.96
# Usually decay every half of epochs
config["decay_step"] = 5 * config["validate_every"]
config["optimizer"] = "ADAM"
config["keep_prob"] = 0.85
config["MAX_GRADIANT_NORM"] = 5.0
# L2 regularization
config["l2_beta"] = 0.0005
# input info
config["input_width"] = 192
config["input_height"] = 192
config["input_channel"] = 1
# Output shape
config["output_dim"] = 3
config["output_weights"] = [1.0, 1.0, 1.0, 1.0, 0.5]
# Augmentation parameters
config["prob_downscale"] = 0.75
config["max_downscale"] = 0.95
config["min_downscale"] = 0.5
config["prob_reflection"] = 0.25
config["min_reflection"] = 0.25
config["max_reflection"] = 0.75
config["prob_blur"] = 0.25
config["min_blurSize"] = 3
config["max_blurSize"] = 9
config["min_sigmaRatio"] = 0.25
config["max_sigmaRatio"] = 0.75
# config["prob_occlusion"] = 0.5
config["min_occlusion"] = 0.05
config["max_occlusion"] = 0.25
config["occlusion_max_obj"] = 6
# exposure on noisy frames
config["prob_exposure"] = 0.25
config["min_exposure"] = 0.7
config["max_exposure"] = 1.2
# crop input image
config["crop_probability"] = 0.5
config["crop_min_ratio"] = 0.5
config["crop_max_ratio"] = 0.95
# flip image
config["flip_probability"] = 0.5
# add Pupil
config["prob_pupil"] = 0.25

View File

@ -1,303 +0,0 @@
import os
import cv2
import numpy as np
from sympy import N
import tensorflow.compat.v1 as tf
from config import config
from models import Inception
from utils import change_channel, gray_normalizer
import time
from pythonosc import udp_client
from scipy import ndimage
import sys
import pyttsx3
engine = pyttsx3.init()
tf.disable_v2_behavior()
def load_model(session, m_type, m_name):
# load the weights based on best loss
best_dir = "best_loss"
# check model dir
model_path = "models/" + m_name
path = os.path.join(model_path, best_dir)
if not os.path.exists(path):
raise FileNotFoundError
model = Inception(m_name, config)
# load the best saved weights
ckpt = tf.train.get_checkpoint_state(path)
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
model.restore(session, ckpt.model_checkpoint_path)
else:
raise ValueError("There is no best model with given model")
return model
def rescale(image):
"""
If the input video is other than network size, it will resize the input video
:param image: a frame form input video
:return: scaled down frame
"""
scale_side = max(image.shape)
# image width and height are equal to 192
scale_value = config["input_width"] / scale_side
# scale down or up the input image
scaled_image = cv2.resize(image, dsize=None, fx=scale_value, fy=scale_value)
# convert to numpy array
scaled_image = np.asarray(scaled_image, dtype=np.uint8)
# one of pad should be zero
w_pad = int((config["input_width"] - scaled_image.shape[1]) / 2)
h_pad = int((config["input_width"] - scaled_image.shape[0]) / 2)
# create a new image with size of: (config["image_width"], config["image_height"])
new_image = (
np.ones((config["input_width"], config["input_height"]), dtype=np.uint8) * 250
)
# put the scaled image in the middle of new image
new_image[
h_pad : h_pad + scaled_image.shape[0], w_pad : w_pad + scaled_image.shape[1]
] = scaled_image
return new_image
def writet(addressipn):
addressips = addressipn.strip().lower()
camadd = open("cam.txt","w+")
camadd.write(str(addressips))
print(addressips)
camadd.close
#def eyelid(frame1):
# results = model1(frame1) # inference
# for box in results.xyxy[0]: # box is a list of 4 numbers
# if box[5]==0: # if the confidence is 0, then skip
# xB = int(box[2]) # xB is the x coordinate of the bottom right corner
# xA = int(box[0]) # xA is the x coordinate of the top left corner
# yB = int(box[3]) # yB is the y coordinate of the bottom right corner
# yA = int(box[1]) # yA is the y coordinate of the top left corner
# vc.eyelidv = yA - yB
# cv2.rectangle(frame1, (xA, yA), (xB, yB), (0, 255, 0), 2) # draw a rectangle around the detected object
# if vc.eyelidv > vc.lidmax:
# if vc.lidmax != 0:
# vc.lidmax = vc.eyelidv
#
# if vc.eyelidv < vc.lidmin:
# if vc.xmin != 0:
# vc.xmin = vc.eyelidv
#cv2.circle(img, (int((xA+xB)/2), int((yA+yB)/2)), 2, (0, 0, 255), -1)
#cv2.imshow('EYEMODEL',frame1)
def main(
m_type,
m_name,
):
with tf.Session() as sess: # start a session
# load best model
model = load_model(sess, m_type, m_name) # load the best model
cap = cv2.VideoCapture(vc.src) # load the camera
#cap = rotated = ndimage.rotate(capu, 45)
while cap.isOpened():
with open("config.txt") as calibratefl:
lines = calibratefl.readlines()
vx = float(lines[0].strip())
vy = float(lines[1].strip())
vxl = float(lines[2].strip())
vyl = float(lines[3].strip())
rv = float(lines[4].strip())
calibratefl.close()
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
try:
frame1 = ndimage.rotate(frame, int(rv), reshape=True)
frame1 = frame1[int(vxl): int(float(vy)), int(vyl): int(float(vx))]
if frame1.shape[0] != 192:
frame1 = rescale(frame1)
image = gray_normalizer(frame1)
image = change_channel(image, config["input_channel"])
# vc.el - 1
# if vc.el == 1:
# eyelid(frame1)
# vc.el = 3
[p] = model.predict(sess, [image])
cv2.circle(frame1, (int(p[0]), int(p[1])), int(p[2]), (0, 0, 255), 2)
cv2.circle(frame1, (int(p[0]), int(p[1])), 1, (0, 0, 255), -1)
#print(int(p[0]), int(p[1]), int(p[2])) #int(p[2]) pupil pixel size (circ diamiter)
xt = int(p[0])
yt = int(p[1])
if vc.cfc == 1:
try:
with open("eyeconfig.cfg") as eyecalib:
lines = eyecalib.readlines()
calibcenterx = float(lines[0].strip())
calibcentery = float(lines[1].strip())
calibrightx = float(lines[2].strip())
calibleftx = float(lines[3].strip())
calibupy = float(lines[4].strip())
calibdowny = float(lines[5].strip())
eyecalib.close()
vc.cfc = 2
except:
print('eror')
engine.say("A saved calibration file was not found. Please run the clibration program first.")
#will start the calibration program exe on release and close this one
engine.runAndWait()
sys.exit()
#percentage = (((input - min) * 100) / (max - min)) / 100 only for reference because im dum and forget stuff
xr = float((((xt - calibcenterx) * 100) / (calibrightx - calibcenterx)) / 100)
xl = float((((xt - calibcenterx) * 100) / (calibleftx - calibcenterx)) / 100)
yu = float((((yt - calibcentery) * 100) / (calibupy - calibcentery)) / 100)
yd = float((((yt - calibcentery) * 100) / (calibdowny - calibcentery)) / 100)
if xr > 0:
if xr > 1:
xr = 1.0
client.send_message("/avatar/parameters/RightEyeX", xr)
client.send_message("/avatar/parameters/LeftEyeX", xr)
#print('XR', xr)
if xl > 0:
if xl > 1:
xl = 1.0
client.send_message("/avatar/parameters/RightEyeX", -abs(xl))
client.send_message("/avatar/parameters/LeftEyeX", -abs(xl))
if yd > 0:
if yd > 1:
yd = 1.0
client.send_message("/avatar/parameters/EyesY", -abs(yd))
if yu > 0:
if yu > 1:
yu = 1.0
client.send_message("/avatar/parameters/EyesY", yu)
cv2.imshow("frame", frame1)
cv2.imshow("img", image)
except:
print('[ERROR] Main Loop Error')
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
model_name = "3A4Bh-Ref25"
model_type = "INC"
video_path = 0
# with open("config.txt") as calibratefl:
# lines = calibratefl.readlines()
# rv = float(lines[4].strip())
# calibratefl.close()
def vc():
vc.lidmax = 1
vc.lidmin = 6969 #( ͡° ͜ʖ ͡°) yes i know im stupid
vc.cfc = 1
vc.cc = 1
vc.cu = 0
vc.cd = 0
vc.cl = 0
vc.cr = 0
vc.fc = 0
vc.el = 2
vc.eyelidv = 1
vc.src = '1'
vc()
try:
OSCip="127.0.0.1"
OSCport=9000 #VR Chat OSC port
client = udp_client.SimpleUDPClient(OSCip, OSCport)
except:
print('[ERROR] Connection to VR Chat via OSC Failed')
try:
camadd= open("cam.txt","r+")
vc.src = camadd.read().strip()
camadd.close
except:
addressipn = input('Enter IP Stream Address of Camera :>: ')
writet(addressipn)
vc.src = addressipn.strip().lower()
# initial a logger
main(model_type, model_name)
# 【=◈︿◈=】

View File

@ -1,440 +0,0 @@
import os
import cv2
import numpy as np
from sympy import N
import tensorflow.compat.v1 as tf
from config import config
from models import Inception
from utils import change_channel, gray_normalizer
import time
from pythonosc import udp_client
from scipy import ndimage
import pyttsx3
engine = pyttsx3.init()
tf.disable_v2_behavior()
def load_model(session, m_type, m_name):
# load the weights based on best loss
best_dir = "best_loss"
# check model dir
model_path = "models/" + m_name
path = os.path.join(model_path, best_dir)
if not os.path.exists(path):
raise FileNotFoundError
model = Inception(m_name, config)
# load the best saved weights
ckpt = tf.train.get_checkpoint_state(path)
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
model.restore(session, ckpt.model_checkpoint_path)
else:
raise ValueError("There is no best model with given model")
return model
def rescale(image):
"""
If the input video is other than network size, it will resize the input video
:param image: a frame form input video
:return: scaled down frame
"""
scale_side = max(image.shape)
# image width and height are equal to 192
scale_value = config["input_width"] / scale_side
# scale down or up the input image
scaled_image = cv2.resize(image, dsize=None, fx=scale_value, fy=scale_value)
# convert to numpy array
scaled_image = np.asarray(scaled_image, dtype=np.uint8)
# one of pad should be zero
w_pad = int((config["input_width"] - scaled_image.shape[1]) / 2)
h_pad = int((config["input_width"] - scaled_image.shape[0]) / 2)
# create a new image with size of: (config["image_width"], config["image_height"])
new_image = (
np.ones((config["input_width"], config["input_height"]), dtype=np.uint8) * 250
)
# put the scaled image in the middle of new image
new_image[
h_pad : h_pad + scaled_image.shape[0], w_pad : w_pad + scaled_image.shape[1]
] = scaled_image
return new_image
def writet(addressipn):
addressips = addressipn.strip().lower()
camadd = open("cam.txt","w+")
camadd.write(str(addressips))
print(addressips)
camadd.close
def main(m_type, m_name):
with tf.Session() as sess: # start a session
# load best model
model = load_model(sess, m_type, m_name) # load the best model
#cap = cv2.VideoCapture('http://192.168.0.202:81/stream') # load the camera
#cap = rotated = ndimage.rotate(capu, 45)
#while cap.isOpened():
with open("config.txt") as calibratefl:
lines = calibratefl.readlines()
vx = float(lines[0].strip())
vy = float(lines[1].strip())
vxl = float(lines[2].strip())
vyl = float(lines[3].strip())
rv = float(lines[4].strip())
calibratefl.close()
#cap = cv2.VideoCapture('http://192.168.0.202:81/stream')
#ret, frame = cap.read()
#frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#try:
# frame1 = ndimage.rotate(frame, int(rv), reshape=True)
# frame1 = frame1[int(vxl): int(float(vy)), int(vyl): int(float(vx))]
# if frame1.shape[0] != 192:
# frame1 = rescale(frame1)
#image = gray_normalizer(frame1)
#image = change_channel(image, config["input_channel"])
# [p] = model.predict(sess, [image])
#@ cv2.circle(frame1, (int(p[0]), int(p[1])), int(p[2]), (0, 0, 255), 2)
#cv2.circle(frame1, (int(p[0]), int(p[1])), 1, (0, 0, 255), -1)
#print(int(p[0]), int(p[1]), int(p[2])) #int(p[2]) pupil pixel size (circ diamiter)
#xt = int(p[0])
#yt = int(p[1])
#cap.release()
#except:
# print('preoc error')
#try:
# xpercentage = (((xt - vc.xmin) * 100) / (vc.xmax - vc.xmin)) / 100 #TESTING NEEDED AM UNSURE IF VALUES NEED TO BE FLIPPED
# ypercentage = (((yt - vc.ymin) * 100) / (vc.ymax - vc.ymin)) / 100
#if vc.cfc == 1 and vc.fc != 1:
#if vc.cfc == 1:
#00 try:
# with open("eyeconfig.cfg") as eyecalib:
# lines = eyecalib.readlines()
# calibcenterx = float(lines[0].strip())
# calibcentery = float(lines[1].strip())
# calibrightx = float(lines[2].strip())
# calibleftx = float(lines[3].strip())
# calibrighty = float(lines[4].strip())
# caliblefty = float(lines[5].strip())
#calibupx = float(lines[6].strip())
#calibupy = float(lines[7].strip())
#vc.cfc = 1
#eyecalib.close()
#except:
while True:
if vc.cc == 1:
engine.say("a saved calibration file was not found.")
engine.say("Calibration starting, 3. 2. 1. please look straight forward")
engine.runAndWait()
vc.cc = 2
if vc.cc == 2:
cap = cv2.VideoCapture('http://192.168.0.202:81/stream')
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
try:
frame1 = ndimage.rotate(frame, int(rv), reshape=True)
frame1 = frame1[int(vxl): int(float(vy)), int(vyl): int(float(vx))]
if frame1.shape[0] != 192:
frame1 = rescale(frame1)
image = gray_normalizer(frame1)
image = change_channel(image, config["input_channel"])
[p] = model.predict(sess, [image])
cv2.circle(frame1, (int(p[0]), int(p[1])), int(p[2]), (0, 0, 255), 2)
cv2.circle(frame1, (int(p[0]), int(p[1])), 1, (0, 0, 255), -1)
#print(int(p[0]), int(p[1]), int(p[2])) #int(p[2]) pupil pixel size (circ diamiter)
xt = int(p[0])
yt = int(p[1])
cap.release()
cv2.destroyAllWindows()
except:
print('preoc error')
calibcenterx = xt
calibcentery = yt
print(xt, yt)
engine.say("center calibration complete, please look right")
engine.runAndWait()
vc.cr = 1
vc.cc = 3
if vc.cr == 1:
engine.say("Right calibration starting")
engine.runAndWait()
vc.cr = 2
if vc.cr == 2:
cap = cv2.VideoCapture('http://192.168.0.202:81/stream')
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
try:
frame1 = ndimage.rotate(frame, int(rv), reshape=True)
frame1 = frame1[int(vxl): int(float(vy)), int(vyl): int(float(vx))]
if frame1.shape[0] != 192:
frame1 = rescale(frame1)
image = gray_normalizer(frame1)
image = change_channel(image, config["input_channel"])
[p] = model.predict(sess, [image])
cv2.circle(frame1, (int(p[0]), int(p[1])), int(p[2]), (0, 0, 255), 2)
cv2.circle(frame1, (int(p[0]), int(p[1])), 1, (0, 0, 255), -1)
#print(int(p[0]), int(p[1]), int(p[2])) #int(p[2]) pupil pixel size (circ diamiter)
xt = int(p[0])
yt = int(p[1])
cap.release()
cv2.destroyAllWindows()
except:
print('preoc error')
calibrightx = xt
calibrighty = yt
print(xt, yt)
engine.say("Right calibration complete, please look left")
engine.runAndWait()
vc.cl = 1
vc.cr = 3
if vc.cl == 1:
engine.say("left calibration starting")
engine.runAndWait()
vc.cl = 2
if vc.cl == 2:
cap = cv2.VideoCapture('http://192.168.0.202:81/stream')
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
try:
frame1 = ndimage.rotate(frame, int(rv), reshape=True)
frame1 = frame1[int(vxl): int(float(vy)), int(vyl): int(float(vx))]
if frame1.shape[0] != 192:
frame1 = rescale(frame1)
image = gray_normalizer(frame1)
image = change_channel(image, config["input_channel"])
[p] = model.predict(sess, [image])
cv2.circle(frame1, (int(p[0]), int(p[1])), int(p[2]), (0, 0, 255), 2)
cv2.circle(frame1, (int(p[0]), int(p[1])), 1, (0, 0, 255), -1)
#print(int(p[0]), int(p[1]), int(p[2])) #int(p[2]) pupil pixel size (circ diamiter)
xt = int(p[0])
yt = int(p[1])
cap.release()
cv2.destroyAllWindows()
except:
print('preoc error')
calibleftx = xt
caliblefty = yt
print(xt, yt)
engine.say("left calibration complete, please look up")
engine.runAndWait()
vc.cl = 3
vc.cu = 1
if vc.cu == 1:
engine.say("up calibration starting")
engine.runAndWait()
vc.cu = 2
if vc.cu == 2:
cap = cv2.VideoCapture('http://192.168.0.202:81/stream')
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
try:
frame1 = ndimage.rotate(frame, int(rv), reshape=True)
frame1 = frame1[int(vxl): int(float(vy)), int(vyl): int(float(vx))]
if frame1.shape[0] != 192:
frame1 = rescale(frame1)
image = gray_normalizer(frame1)
image = change_channel(image, config["input_channel"])
[p] = model.predict(sess, [image])
cv2.circle(frame1, (int(p[0]), int(p[1])), int(p[2]), (0, 0, 255), 2)
cv2.circle(frame1, (int(p[0]), int(p[1])), 1, (0, 0, 255), -1)
#print(int(p[0]), int(p[1]), int(p[2])) #int(p[2]) pupil pixel size (circ diamiter)
xt = int(p[0])
yt = int(p[1])
cap.release()
cv2.destroyAllWindows()
except:
print('preoc error')
calibupx = xt
calibupy = yt
print(xt, yt)
engine.say("up calibration complete, please look down")
engine.runAndWait()
vc.cd = 1
vc.cu = 3
if vc.cd == 1:
engine.say("down calibration starting")
engine.runAndWait()
vc.cd = 2
if vc.cd == 2:
cap = cv2.VideoCapture('http://192.168.0.202:81/stream')
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
try:
frame1 = ndimage.rotate(frame, int(rv), reshape=True)
frame1 = frame1[int(vxl): int(float(vy)), int(vyl): int(float(vx))]
if frame1.shape[0] != 192:
frame1 = rescale(frame1)
image = gray_normalizer(frame1)
image = change_channel(image, config["input_channel"])
[p] = model.predict(sess, [image])
cv2.circle(frame1, (int(p[0]), int(p[1])), int(p[2]), (0, 0, 255), 2)
cv2.circle(frame1, (int(p[0]), int(p[1])), 1, (0, 0, 255), -1)
#print(int(p[0]), int(p[1]), int(p[2])) #int(p[2]) pupil pixel size (circ diamiter)
xt = int(p[0])
yt = int(p[1])
cap.release()
cv2.destroyAllWindows()
except:
print('preoc error')
calibdownx = xt
calibdowny = yt
print(xt, yt)
engine.say("calibration complete")
engine.runAndWait()
vc.cd = 3
else:
print('CALIBCOMPLETE')
savecalibvalues(calibcenterx, calibcentery, calibrightx, calibleftx, calibupy, calibdowny)
vc.cfc = 2
vc.fc = 1
print('CALIBCOMPLETE22q2')
break
# client.send_message("/avatar/parameters/RightEyeX", xper) #sends to vr chat needs to use calibration function
#client.send_message("/avatar/parameters/LeftEyeX", xper)
# client.send_message("/Avatar/LeftEyeY", ypercentage)
#client.send_message("/Avatar/RightEyeY", ypercentage)
#client.send_message("/avatar/parameters/EyesY", yper)
#client.send_message("/avatar/parameters/RightEyeLid", 0)
#client.send_message("/avatar/parameters/LeftEyeLid", 0)
#print('X: ', xper, ' Y: ', yper)
#except:
# print('[WARN] Calculation Error: Move Eye Around or Adjust Detection Threshold.')
#xt = int(p[0])
#yt = int(p[1])
# cv2.imshow("frame", frame1)
# cv2.imshow("img", image)
# except:
# print('sussyy e rawr')
# if cv2.waitKey(1) & 0xFF == ord("q"):
#break
# cv2.destroyAllWindows()
if __name__ == "__main__":
model_name = "3A4Bh-Ref25"
model_type = "INC"
video_path = 0
# with open("config.txt") as calibratefl:
# lines = calibratefl.readlines()
# rv = float(lines[4].strip())
# calibratefl.close()
def vc():
vc.xmax = 1
vc.xmin = 6969
vc.ymax = 1
vc.ymin = 6969
vc.cfc = 50
vc.cc = 1
vc.cu = 0
vc.cd = 0
vc.cl = 0
vc.cr = 0
vc.fc = 0
vc()
def savecalibvalues(calibcenterx, calibcentery, calibrightx, calibleftx, calibupy, calibdowny):
with open('eyeconfig.cfg', 'w+') as cw:
cw.write(str(calibcenterx))
cw.write('\n')
cw.write(str(calibcentery))
cw.write('\n')
cw.write(str(calibrightx))
cw.write('\n')
cw.write(str(calibleftx))
cw.write('\n')
cw.write(str(calibupy))
cw.write('\n')
cw.write(str(calibdowny))
cw.close()
# initial a logger
main(model_type, model_name)

View File

@ -1,11 +0,0 @@
import random
import time
from machine import Pin, Timer
led = Pin(15, Pin.OUT)
while True:
t = random.randint(5, 45)
print(t)
print('PING')
led.toggle()
time.sleep(t * 60)

View File

@ -1,814 +0,0 @@
import tensorflow
import tensorflow.compat.v1 as tf
import tensorflow_hub as hub
from base_model import BaseModel
tf.disable_v2_behavior()
class Inception(BaseModel):
"""
Google inception model
"""
def __init__(self, model_name, cfg):
super(Inception, self).__init__(model_name, cfg)
self.m = 0.5
self.l2_reg = tf.keras.regularizers.l2(cfg["l2_beta"])
self.init_placeholders()
self.init_forward()
self.init_optimizer()
self.summary_op = tf.summary.merge_all()
def bn_lrelu(self, x, train_logical):
x = tf.layers.batch_normalization(
x, training=train_logical, momentum=0.9997, scale=True, center=True
)
x = tf.nn.leaky_relu(x, alpha=0.17)
return x
# Inception Block A
def block_a(self, net, name_scope, is_training):
with tf.variable_scope(
name_or_scope=name_scope, default_name="Inception_block_A"
):
# Branch 0, 1x1
with tf.variable_scope("branch_0"):
branch_0 = tf.layers.conv2d(
inputs=net,
filters=96 * self.m,
kernel_size=(1, 1),
strides=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="conv2d_0a_1x1",
)
branch_0 = self.bn_lrelu(branch_0, is_training)
# Branch 1: 1x1 + 3x3
with tf.variable_scope("branch_1"):
branch_1 = tf.layers.conv2d(
inputs=net,
filters=64 * self.m,
kernel_size=(1, 1),
strides=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="conv2d_1a_1x1",
)
branch_1 = self.bn_lrelu(branch_1, is_training)
branch_1 = tf.layers.conv2d(
inputs=branch_1,
filters=96 * self.m,
kernel_size=(3, 3),
strides=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="conv2d_1b_3x3",
)
branch_1 = self.bn_lrelu(branch_1, is_training)
# Branch 2: 1x1 + 3x3 + 3x3
with tf.variable_scope("branch_2"):
branch_2 = tf.layers.conv2d(
inputs=net,
filters=64 * self.m,
kernel_size=(1, 1),
strides=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="conv2d_2a_1x1",
)
branch_2 = self.bn_lrelu(branch_2, is_training)
branch_2 = tf.layers.conv2d(
inputs=branch_2,
filters=96 * self.m,
kernel_size=(3, 3),
strides=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="conv2d_2b_3x3",
)
branch_2 = self.bn_lrelu(branch_2, is_training)
branch_2 = tf.layers.conv2d(
inputs=branch_2,
filters=96 * self.m,
kernel_size=(3, 3),
strides=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="conv2d_2c_3x3",
)
branch_2 = self.bn_lrelu(branch_2, is_training)
# Branch 3: AvgPool + 1x1
with tf.variable_scope("branch_3"):
branch_3 = tf.layers.average_pooling2d(
inputs=net,
pool_size=(3, 3),
strides=(1, 1),
padding="SAME",
name="AvgPool_3a_3x3",
)
branch_3 = tf.layers.conv2d(
inputs=branch_3,
filters=96 * self.m,
kernel_size=(1, 1),
strides=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="conv2d_3b_1x1",
)
branch_3 = self.bn_lrelu(branch_3, is_training)
return tf.concat([branch_0, branch_1, branch_2, branch_3], axis=3)
# Reduction block A
def block_a_reduction(self, net, name_scope, is_training):
with tf.variable_scope(
name_or_scope=name_scope, default_name="Reduction_block_A"
):
# Branch 0, 3x3(V2)
with tf.variable_scope("branch_0"):
branch_0 = tf.layers.conv2d(
inputs=net,
filters=384 * self.m,
kernel_size=(3, 3),
strides=(2, 2),
padding="VALID",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="conv2d_0a_3x3V2",
)
branch_0 = self.bn_lrelu(branch_0, is_training)
# Branch 1: 1x1 + 3x3 + 3x3V2
with tf.variable_scope("branch_1"):
branch_1 = tf.layers.conv2d(
inputs=net,
filters=192 * self.m,
kernel_size=(1, 1),
strides=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="conv2d_1a_1x1",
)
branch_1 = self.bn_lrelu(branch_1, is_training)
branch_1 = tf.layers.conv2d(
inputs=branch_1,
filters=224 * self.m,
kernel_size=(3, 3),
strides=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="conv2_1b_3x3",
)
branch_1 = self.bn_lrelu(branch_1, is_training)
branch_1 = tf.layers.conv2d(
inputs=branch_1,
filters=256 * self.m,
kernel_size=(3, 3),
strides=(2, 2),
padding="VALID",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="conv2_1c_3x3V2",
)
branch_1 = self.bn_lrelu(branch_1, is_training)
# Branch 2: MaxPool(3x3)
with tf.variable_scope("branch_3"):
branch_2 = tf.layers.max_pooling2d(
inputs=net,
pool_size=(3, 3),
strides=(2, 2),
padding="VALID",
name="MaxPool_2a_3x3V2",
)
return tf.concat([branch_0, branch_1, branch_2], axis=3)
# Inception Block B
def block_b(self, net, name_scope, is_training):
with tf.variable_scope(
name_or_scope=name_scope, default_name="Inception_block_B"
):
# Branch 0: 1x1
with tf.variable_scope("branch_0"):
branch_0 = tf.layers.conv2d(
inputs=net,
filters=384 * self.m,
kernel_size=(1, 1),
strides=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="Conv2d_0a_1x1",
)
branch_0 = self.bn_lrelu(branch_0, is_training)
# branch 1: 1x1 + 1x7 + 7x1
with tf.variable_scope("branch_1"):
branch_1 = tf.layers.conv2d(
inputs=net,
filters=192 * self.m,
kernel_size=(1, 1),
strides=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="Conv2d_1a_1x1",
)
branch_1 = self.bn_lrelu(branch_1, is_training)
branch_1 = tf.layers.conv2d(
inputs=branch_1,
filters=224 * self.m,
kernel_size=(1, 7),
strides=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="Conv2d_1b_1x7",
)
branch_1 = self.bn_lrelu(branch_1, is_training)
branch_1 = tf.layers.conv2d(
inputs=branch_1,
filters=256 * self.m,
kernel_size=(7, 1),
strides=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="Conv2d_1c_7x1",
)
branch_1 = self.bn_lrelu(branch_1, is_training)
# branch 2: 1x1 + 1x7 + 7x1 + 1x7 + 7x1
with tf.variable_scope("branch_2"):
branch_2 = tf.layers.conv2d(
inputs=net,
filters=192 * self.m,
kernel_size=(1, 1),
strides=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="Conv2_2a_1x1",
)
branch_2 = self.bn_lrelu(branch_2, is_training)
branch_2 = tf.layers.conv2d(
inputs=branch_2,
filters=192 * self.m,
kernel_size=(1, 7),
strides=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="Conv2d_2b_1x7",
)
branch_2 = self.bn_lrelu(branch_2, is_training)
branch_2 = tf.layers.conv2d(
inputs=branch_2,
filters=224 * self.m,
kernel_size=(7, 1),
strides=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="Conv2d_2c_7x1",
)
branch_2 = self.bn_lrelu(branch_2, is_training)
branch_2 = tf.layers.conv2d(
inputs=branch_2,
filters=224 * self.m,
kernel_size=(1, 7),
strides=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="Conv2d_2d_1x7",
)
branch_2 = self.bn_lrelu(branch_2, is_training)
branch_2 = tf.layers.conv2d(
inputs=branch_2,
filters=256 * self.m,
kernel_size=(7, 1),
strides=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="Conv2d_2e_7x1",
)
branch_2 = self.bn_lrelu(branch_2, is_training)
# Branch 3: AvgPool + 1x1
with tf.variable_scope("branch_3"):
branch_3 = tf.layers.average_pooling2d(
inputs=net,
pool_size=(3, 3),
strides=(1, 1),
padding="SAME",
name="AvgPool_3a_3x3",
)
branch_3 = tf.layers.conv2d(
inputs=branch_3,
filters=128 * self.m,
kernel_size=(1, 1),
strides=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="conv2d_3b_1x1",
)
branch_3 = self.bn_lrelu(branch_3, is_training)
return tf.concat([branch_0, branch_1, branch_2, branch_3], axis=3)
# Reduction block B
def block_b_reduction(self, net, name_scope, is_training):
with tf.variable_scope(
name_or_scope=name_scope, default_name="Reduction_block_B"
):
# Branch 0: 1x1 + 3x3(V,2)
with tf.variable_scope("branch_0"):
branch_0 = tf.layers.conv2d(
inputs=net,
filters=192 * self.m,
kernel_size=(1, 1),
strides=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="conv2d_0a_1x1",
)
branch_0 = self.bn_lrelu(branch_0, is_training)
branch_0 = tf.layers.conv2d(
inputs=branch_0,
filters=192 * self.m,
kernel_size=(3, 3),
strides=(2, 2),
padding="VALID",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="conv2d_0b_3x3V2",
)
branch_0 = self.bn_lrelu(branch_0, is_training)
# Branch 1: 1x1 + 1x7 + 7x1 + 3x3(V,2)
with tf.variable_scope("branch_1"):
branch_1 = tf.layers.conv2d(
inputs=net,
filters=256 * self.m,
kernel_size=(1, 1),
strides=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="Conv2d_1a_1x1",
)
branch_1 = self.bn_lrelu(branch_1, is_training)
branch_1 = tf.layers.conv2d(
inputs=branch_1,
filters=256 * self.m,
kernel_size=(1, 7),
strides=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="conv2d_1b_1x7",
)
branch_1 = self.bn_lrelu(branch_1, is_training)
branch_1 = tf.layers.conv2d(
inputs=branch_1,
filters=320 * self.m,
kernel_size=(7, 1),
strides=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="conv2d_1c_7x1",
)
branch_1 = self.bn_lrelu(branch_1, is_training)
branch_1 = tf.layers.conv2d(
inputs=branch_1,
filters=320 * self.m,
kernel_size=(3, 3),
strides=(2, 2),
padding="VALID",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="Conv2d_1d_3x3V2",
)
branch_1 = self.bn_lrelu(branch_1, is_training)
# Branch 2: MaxPool 3x3 (V,2)
with tf.variable_scope("branch_2"):
branch_2 = tf.layers.max_pooling2d(
inputs=net,
pool_size=(3, 3),
strides=(2, 2),
padding="VALID",
name="MaxPool_2a_3x3V2",
)
return tf.concat([branch_0, branch_1, branch_2], axis=3)
# Inception Block C
def block_c(self, net, name_scope, is_training):
with tf.variable_scope(
name_or_scope=name_scope, default_name="Inception_Block_C"
):
# Branch 0: 1x1
with tf.variable_scope("branch_0"):
branch_0 = tf.layers.conv2d(
inputs=net,
filters=256 * self.m,
kernel_size=(1, 1),
strides=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="Conv2d_0a_1x1",
)
branch_0 = self.bn_lrelu(branch_0, is_training)
# Branch 1: 1x1 {1x3, 3x1}
with tf.variable_scope("branch_1"):
branch_1 = tf.layers.conv2d(
inputs=net,
filters=384 * self.m,
kernel_size=(1, 1),
strides=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="conv2d_1a_1x1",
)
branch_1 = self.bn_lrelu(branch_1, is_training)
branch_1a = tf.layers.conv2d(
inputs=branch_1,
filters=256 * self.m,
kernel_size=(1, 3),
strides=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="conv2d_1b0_1x3",
)
branch_1a = self.bn_lrelu(branch_1a, is_training)
branch_1b = tf.layers.conv2d(
inputs=branch_1,
filters=256 * self.m,
kernel_size=(3, 1),
strides=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="conv2d_1b1_3x1",
)
branch_1b = self.bn_lrelu(branch_1b, is_training)
branch_1 = tf.concat([branch_1a, branch_1b], axis=3)
# Branch 2: 1x1, 3x1, 1x3 {3x1, 1x3}
with tf.variable_scope("branch_2"):
branch_2 = tf.layers.conv2d(
inputs=net,
filters=384 * self.m,
kernel_size=(1, 1),
strides=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="conv2d_2a_1x1",
)
branch_2 = self.bn_lrelu(branch_2, is_training)
branch_2 = tf.layers.conv2d(
inputs=branch_2,
filters=448 * self.m,
kernel_size=(1, 3),
strides=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="conv2d_2b_1x3",
)
branch_2 = self.bn_lrelu(branch_2, is_training)
branch_2 = tf.layers.conv2d(
inputs=branch_2,
filters=512 * self.m,
kernel_size=(3, 1),
strides=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="conv2d_2c_3x1",
)
branch_2 = self.bn_lrelu(branch_2, is_training)
branch_2a = tf.layers.conv2d(
inputs=branch_2,
filters=256 * self.m,
kernel_size=(1, 3),
strides=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="conv2d_2d0_1x3",
)
branch_2a = self.bn_lrelu(branch_2a, is_training)
branch_2b = tf.layers.conv2d(
inputs=branch_2,
filters=256 * self.m,
kernel_size=(3, 1),
strides=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="conv2d_2d1_3x1",
)
branch_2b = self.bn_lrelu(branch_2b, is_training)
branch_2 = tf.concat([branch_2a, branch_2b], axis=3)
# Branch 3: AvgPool, 1x1
with tf.variable_scope("branch_3"):
branch_3 = tf.layers.average_pooling2d(
inputs=net,
pool_size=(3, 3),
strides=(1, 1),
padding="SAME",
name="AvgPool_3a_3x3",
)
branch_3 = tf.layers.conv2d(
inputs=branch_3,
filters=256 * self.m,
kernel_size=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="Conv2d_3b_1x1",
)
branch_3 = self.bn_lrelu(branch_3, is_training)
return tf.concat([branch_0, branch_1, branch_2, branch_3], axis=3)
def init_forward(self):
# make the stem
net = self.X
# Begin Inception Model
with tf.variable_scope(name_or_scope="InceptionV4"):
net = tf.layers.conv2d(
inputs=net,
filters=32 * self.m,
kernel_size=(3, 3),
strides=(2, 2),
padding="VALID",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="conv2d_stem0_3x3V2",
)
net = self.bn_lrelu(net, self.train_flag)
net = tf.layers.conv2d(
inputs=net,
filters=32 * self.m,
kernel_size=(3, 3),
strides=(1, 1),
padding="VALID",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="conv2d_stem1_3x3V1",
)
net = self.bn_lrelu(net, self.train_flag)
net = tf.layers.conv2d(
inputs=net,
filters=64 * self.m,
kernel_size=(3, 3),
strides=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="Conv2d_stem2_3x3",
)
net = self.bn_lrelu(net, self.train_flag)
with tf.variable_scope("Mixed_3a"):
with tf.variable_scope("branch_0"):
net_a = tf.layers.conv2d(
inputs=net,
filters=96 * self.m,
kernel_size=(3, 3),
strides=(2, 2),
padding="VALID",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="Conv2d_0a_3x3s2",
)
net_a = self.bn_lrelu(net_a, self.train_flag)
with tf.variable_scope("branch_1"):
net_b = tf.layers.max_pooling2d(
inputs=net,
pool_size=(3, 3),
strides=(2, 2),
padding="VALID",
name="MaxPool_1a_3x3s2",
)
net = tf.concat([net_a, net_b], axis=3)
with tf.variable_scope("mixed_4a"):
# Branch 0: 1x1, 7x1, 1x7, 3x3v
with tf.variable_scope("branch_0"):
branch_0 = tf.layers.conv2d(
inputs=net,
filters=64 * self.m,
kernel_size=(1, 1),
strides=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="Conv2d_0a_3x3",
)
branch_0 = self.bn_lrelu(branch_0, self.train_flag)
branch_0 = tf.layers.conv2d(
inputs=branch_0,
filters=64 * self.m,
kernel_size=(7, 1),
strides=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="Conv2d_0b_7x1",
)
branch_0 = self.bn_lrelu(branch_0, self.train_flag)
branch_0 = tf.layers.conv2d(
inputs=branch_0,
filters=64 * self.m,
kernel_size=(1, 7),
strides=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="Conv2d_0c_1x7",
)
branch_0 = self.bn_lrelu(branch_0, self.train_flag)
branch_0 = tf.layers.conv2d(
inputs=branch_0,
filters=96 * self.m,
kernel_size=(3, 3),
strides=(1, 1),
padding="VALID",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="Conv2d_0d_3x3V",
)
branch_0 = self.bn_lrelu(branch_0, self.train_flag)
# Branch 1: 1x1, 3x3v
with tf.variable_scope("branch_1"):
branch_1 = tf.layers.conv2d(
inputs=net,
filters=64 * self.m,
kernel_size=(1, 1),
strides=(1, 1),
padding="SAME",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="Conv2d_0a_3x3",
)
branch_1 = self.bn_lrelu(branch_1, self.train_flag)
branch_1 = tf.layers.conv2d(
inputs=branch_1,
filters=96 * self.m,
kernel_size=(3, 3),
strides=(1, 1),
padding="VALID",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="Conv2d_0b_3x3V",
)
branch_1 = self.bn_lrelu(branch_1, self.train_flag)
net = tf.concat([branch_0, branch_1], axis=3)
with tf.variable_scope("Mixed_5a"):
# Branch 0: 3x3
with tf.variable_scope("branch_0"):
branch_0 = tf.layers.conv2d(
inputs=net,
filters=192 * self.m,
kernel_size=(3, 3),
strides=(2, 2),
padding="VALID",
kernel_regularizer=self.l2_reg,
kernel_initializer=tensorflow.initializers.GlorotUniform(),
name="Conv2d_0a_3x3v",
)
branch_0 = self.bn_lrelu(branch_0, self.train_flag)
# Branch 1: MaxPool 3x3s2
with tf.variable_scope("branch_1"):
branch_1 = tf.layers.max_pooling2d(
inputs=net,
pool_size=(3, 3),
strides=(2, 2),
padding="VALID",
name="MaxPool_0a_3x3s2",
)
net = tf.concat([branch_0, branch_1], axis=3)
# Block A: 3x
net = self.block_a(net, "Block_A0", self.train_flag)
net = self.block_a(net, "Block_A1", self.train_flag)
net = self.block_a(net, "Block_A2", self.train_flag)
# Block A: Reduction
net = self.block_a_reduction(net, "Reduction_A", self.train_flag)
# Block B: 4x
net = self.block_b(net, "Block_B0", self.train_flag)
net = self.block_b(net, "Block_B1", self.train_flag)
net = self.block_b(net, "Block_B2", self.train_flag)
net = self.block_b(net, "Block_B3", self.train_flag)
net = tf.nn.dropout(net, self.keep_prob, name="net_dropout")
self.GAP = tf.reduce_mean(net, axis=[1, 2], name="GAP")
# Final layer
units = self.GAP.get_shape().as_list()[1]
net = tf.reshape(self.GAP, (-1, 1, 1, units), name="reshaping")
net = tf.layers.conv2d(
net,
self.cfg["output_dim"],
(1, 1),
padding="VALID",
kernel_initializer=tensorflow.initializers.GlorotUniform(),
kernel_regularizer=self.l2_reg,
use_bias=False,
name="final_conv",
)
net = tf.nn.relu(net, name="logits_relu")
# Logits
self.logits = tf.reshape(net, shape=(-1, self.cfg["output_dim"]), name="y")
self.loss = tf.losses.huber_loss(
labels=self.Y,
predictions=self.logits,
weights=[self.cfg["output_weights"][0 : self.cfg["output_dim"]]],
delta=1.0,
)
# Training summary for the current batch_loss
tf.summary.scalar("loss", self.loss)

View File

@ -1,2 +0,0 @@
model_checkpoint_path: "-224532"
all_model_checkpoint_paths: "-224532"

View File

@ -1,5 +0,0 @@
run 93
model_type INC
model_name 3A4Bh-Ref25
model_comment
best_loss inf

View File

@ -1,14 +0,0 @@
import cv2
# Opens the Video file
cap= cv2.VideoCapture('raw.mp4')
i=0
while(cap.isOpened()):
ret, frame = cap.read()
if ret == False:
break
cv2.imwrite('kang'+str(i)+'.jpg',frame)
i+=1
cap.release()
cv2.destroyAllWindows()

View File

@ -1,5 +0,0 @@
tensorflow==2.5.3
tensorflow_hub==0.8.0
pyttsx3
python-osc
opencv-python

View File

@ -1,203 +0,0 @@
import os
import cv2
import numpy as np
from config import config
def check_dir(path):
if not os.path.exists(path):
os.makedirs(path)
def rf(low, high):
"""
return a random float number between [low, high)
:param low: lower bound
:param high: higher bound (excluded)
:return: a float number between [low, high)
"""
if low >= high:
return low
return np.random.uniform(low, high)
def ri(low, high):
"""
return a random int number between [low, high)
:param low: lower bound
:param high: higher bound (excluded)
:return: an int number between [low, high)
"""
if low >= high:
return low
return np.random.randint(low, high)
def annotator(color, img, x, y, w=10, h=None, a=0):
"""
draw a circle around predicted pupil
:param img: input frame
:param x: x-position
:param y: y-position
:param w: width of pupil
:param h: height of pupil
:return: an image with a circle around the pupil
"""
if color is None:
color = (0, 250, 250)
c = 1
if np.ndim(img) == 2:
img = np.expand_dims(img, -1)
elif np.ndim(img) == 3:
c = img.shape[2]
if c == 1:
img = np.concatenate((img, img, img), axis=2)
l1xs = int(x - 3)
l1ys = int(y)
l1xe = int(x + 3)
l1ye = int(y)
l2xs = int(x)
l2ys = int(y - 3)
l2xe = int(x)
l2ye = int(y + 3)
img = cv2.line(img, (l1xs, l1ys), (l1xe, l1ye), color, 1)
img = cv2.line(img, (l2xs, l2ys), (l2xe, l2ye), color, 1)
# We predict only width!
if h is None:
h = w
# draw ellipse
img = cv2.ellipse(img, ((x, y), (w, h), a), color, 1)
return img
def create_noisy_video(data_path='data/valid_data.csv', length=60, fps=5, with_label=False, augmentor=None):
"""
create a sample video based random image.
Of course it is not a valid solution to test the model with already seen images.
It is just to check the speed of model. based on different FPS
:param data_path: CSV file for input data
:param length: length of video in second
:param fps: number of frame per second
:param with_label: if true, show true label on the video
:return: a noisy video (file name) for test purpose.
"""
# read CSV
data_list = []
with open(data_path, "r") as f:
for line in f:
# values: [ img_path, x, y, w, h , a]
values = line.strip().split(",")
data_list.append([values[0], # image path
values[1], # x
values[2]]) # y
# number image to make the video
images_len = fps * length
np.random.shuffle(data_list)
start_idx = np.random.randint(0, len(data_list) - images_len)
selected_images = data_list[start_idx:start_idx + images_len]
output_fn = 'video_{}s_{}fps.avi'.format(length, fps)
video = cv2.VideoWriter(output_fn, cv2.VideoWriter_fourcc(*"XVID"), fps,
(config["input_height"], config["input_width"]))
for i in selected_images:
img = cv2.imread(i[0], cv2.IMREAD_GRAYSCALE)
x = float(i[1])
y = float(i[2])
# w = float(i[3])
# h = float(i[4])
# a = float(i[5])
label = [x, y]
if augmentor is not None:
img, label = augmentor.addNoise(img, label)
img = np.asarray(img, dtype=np.uint8)
if with_label:
img = annotator((0, 250, 0), img, *label)
font = cv2.FONT_HERSHEY_PLAIN
texts = i[0].split("/")
text = texts[2] + "/" + texts[3] + "/" + texts[4]
img = cv2.putText(img, text, (5, 10), font, 0.8, (0, 250, 0), 1, cv2.LINE_8)
else:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
video.write(img)
cv2.destroyAllWindows()
video.release()
return output_fn
def change_channel(img, num_channel=1):
"""
Get frame and normalize values between 0 and 1 and then based num channel reshape it to desired channel
:param frame: the input image, a numpy array
:param num_channel: desired number of channel
:return: normalized frame with num_channel
"""
img = np.expand_dims(img, -1)
if num_channel == 3:
img = np.concatenate((img, img, img), axis=2)
return img
def gray_normalizer(gray):
"""
get a grayscale image with pixel value 0-255
and return normalized pixel with value between -1,1
:param gray: input grayscale image
:return: normalized grayscale image
"""
# average mean over all training images ( without noise)
gray = gray * 1/255
out_gray = np.asarray(gray - 0.5, dtype=np.float32)
return out_gray
def gray_denormalizer(gray):
"""
Get a normalized gray image and convert to value 0-255
:param gray: normalized grayscale image
:return: denormalized grayscale image
"""
# average mean over all training images ( without noise)
out_gray = gray + 0.5
out_gray = np.asarray(out_gray * 255, dtype=np.uint8)
return out_gray
def save_dict(dict, save_path):
with open(save_path, mode="w") as f:
for key, val in dict.items():
f.write(key+";"+str(val)+"\n")
print("Class dict saved successfully at: {}".format(save_path))
def load_dict(load_path):
dict = {}
with open(load_path, mode="r") as f:
for line in f:
key, val = line.split(";")
dict[key] = int(val)
print("Class dict loaded successfuly at: {}".format(load_path))
return dict
if __name__ == "__main__":
ag = Augmentor('data/noisy_videos', config)
create_noisy_video(with_label=True, augmentor=ag)