remove torch for smaller binary. kill bug for mommy

This commit is contained in:
Prohurtz 2023-06-24 14:15:46 -05:00
parent 338591617e
commit 45a7b25a0a
2 changed files with 16 additions and 8 deletions

View File

@ -121,4 +121,4 @@ class EyeTrackConfig(BaseModel):
pass pass
with open(CONFIG_FILE_NAME, "w") as settings_file: with open(CONFIG_FILE_NAME, "w") as settings_file:
json.dump(obj=self.dict(), fp=settings_file) json.dump(obj=self.dict(), fp=settings_file)
print("[INFO] Config Saved Successfully") print(f"\033[92m[INFO] Config Saved Successfully\033[0m")

View File

@ -30,8 +30,6 @@ import os
os.environ["OMP_NUM_THREADS"] = "1" os.environ["OMP_NUM_THREADS"] = "1"
import onnxruntime import onnxruntime
import numpy as np import numpy as np
from PIL import Image
import torchvision.transforms as transforms
import cv2 import cv2
import time import time
import math import math
@ -52,10 +50,20 @@ def run_model(input_queue, output_queue, session):
if frame is None: if frame is None:
break break
to_tensor = transforms.ToTensor() # to_tensor = transforms.ToTensor()
img_tensor = to_tensor(frame) # img_tensor = to_tensor(frame)
img_tensor.unsqueeze_(0) # img_tensor.unsqueeze_(0)
img_np = img_tensor.numpy() # img_np = img_tensor.numpy()
img_np = np.array(frame)
# Normalize the pixel values to [0, 1] and convert the data type to float32
img_np = img_np.astype(np.float32) / 255.0
# Transpose the dimensions from (height, width, channels) to (channels, height, width)
img_np = np.transpose(img_np, (2, 0, 1))
# Add a batch dimension
img_np = np.expand_dims(img_np, axis=0)
ort_inputs = {session.get_inputs()[0].name: img_np} ort_inputs = {session.get_inputs()[0].name: img_np}
pre_landmark = session.run(None, ort_inputs) pre_landmark = session.run(None, ort_inputs)
@ -180,7 +188,7 @@ class MOMMY_C(object):
x = pre_landmark[17][0] x = pre_landmark[17][0]
y = pre_landmark[17][1] y = pre_landmark[17][1]
frame = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) frame = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
return frame, x, y, per return frame, float(x), float(y), per
frame = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) frame = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)