mirror of
https://github.com/openmv/openmv.git
synced 2025-11-04 14:49:50 +08:00
Merge 9c55b26a69 into daf26f9fc8
This commit is contained in:
commit
b335b43724
@ -44,6 +44,12 @@
|
|||||||
"alignment": 16,
|
"alignment": 16,
|
||||||
"optimize": "Performance"
|
"optimize": "Performance"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"type": "tflite",
|
||||||
|
"path": "{TOP}/lib/models/face_landmarks_192.tflite",
|
||||||
|
"alignment": 16,
|
||||||
|
"optimize": "Performance"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"type": "tflite",
|
"type": "tflite",
|
||||||
"path": "{TOP}/lib/models/palm_detection_full_192.tflite",
|
"path": "{TOP}/lib/models/palm_detection_full_192.tflite",
|
||||||
|
|||||||
@ -26,6 +26,12 @@
|
|||||||
"alignment": 32,
|
"alignment": 32,
|
||||||
"profile": "default"
|
"profile": "default"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"type": "tflite",
|
||||||
|
"path": "{TOP}/lib/models/face_landmarks_192.tflite",
|
||||||
|
"alignment": 32,
|
||||||
|
"profile": "default"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"type": "tflite",
|
"type": "tflite",
|
||||||
"path": "{TOP}/lib/models/palm_detection_full_192.tflite",
|
"path": "{TOP}/lib/models/palm_detection_full_192.tflite",
|
||||||
|
|||||||
BIN
lib/models/face_landmarks_192.tflite
Normal file
BIN
lib/models/face_landmarks_192.tflite
Normal file
Binary file not shown.
@ -26,20 +26,16 @@ while True:
|
|||||||
img = csi0.snapshot()
|
img = csi0.snapshot()
|
||||||
|
|
||||||
# faces is a list of ((x, y, w, h), score, keypoints) tuples
|
# faces is a list of ((x, y, w, h), score, keypoints) tuples
|
||||||
faces = model.predict([img])
|
for r, score, keypoints in model.predict([img]):
|
||||||
|
ml.utils.draw_predictions(img, [r], ("face",), ((0, 0, 255),), format=None)
|
||||||
|
|
||||||
# Draw bounding boxes around the detected faces and keypoints.
|
# keypoints is a ndarray of shape (6, 2)
|
||||||
if faces:
|
# 0 - right eye (x, y)
|
||||||
for r, score, keypoints in faces[0]:
|
# 1 - left eye (x, y)
|
||||||
ml.utils.draw_predictions(img, [r], ("face",), ((0, 0, 255),), format=None)
|
# 2 - nose (x, y)
|
||||||
|
# 3 - mouth (x, y)
|
||||||
# keypoints is a ndarray of shape (6, 2)
|
# 4 - right ear (x, y)
|
||||||
# 0 - right eye (x, y)
|
# 5 - left ear (x, y)
|
||||||
# 1 - left eye (x, y)
|
ml.utils.draw_keypoints(img, keypoints, color=(255, 0, 0))
|
||||||
# 2 - nose (x, y)
|
|
||||||
# 3 - mouth (x, y)
|
|
||||||
# 4 - right ear (x, y)
|
|
||||||
# 5 - left ear (x, y)
|
|
||||||
ml.utils.draw_keypoints(img, keypoints, color=(255, 0, 0))
|
|
||||||
|
|
||||||
print(clock.fps(), "fps")
|
print(clock.fps(), "fps")
|
||||||
|
|||||||
@ -31,24 +31,20 @@ while True:
|
|||||||
img = csi0.snapshot()
|
img = csi0.snapshot()
|
||||||
|
|
||||||
# palms is a list of ((x, y, w, h), score, keypoints) tuples
|
# palms is a list of ((x, y, w, h), score, keypoints) tuples
|
||||||
palms = model.predict([img])
|
for r, score, keypoints in model.predict([img]):
|
||||||
|
ml.utils.draw_predictions(img, [r], ("palm",), ((0, 0, 255),), format=None)
|
||||||
|
|
||||||
# Draw bounding boxes around the detected palms and keypoints.
|
# keypoints is a ndarray of shape (7, 2)
|
||||||
if palms:
|
# 0 - wrist (x, y)
|
||||||
for r, score, keypoints in palms[0]:
|
# 1 - index finger mcp (x, y)
|
||||||
ml.utils.draw_predictions(img, [r], ("palm",), ((0, 0, 255),), format=None)
|
# 2 - middle finger mcp (x, y)
|
||||||
|
# 3 - ring finger mcp (x, y)
|
||||||
# keypoints is a ndarray of shape (7, 2)
|
# 4 - pinky mcp (x, y)
|
||||||
# 0 - wrist (x, y)
|
# 5 - thumb cmc (x, y)
|
||||||
# 1 - index finger mcp (x, y)
|
# 6 - thumb mcp (x, y)
|
||||||
# 2 - middle finger mcp (x, y)
|
#
|
||||||
# 3 - ring finger mcp (x, y)
|
# mcp = Metacarpophalangeal Joint - the knuckle
|
||||||
# 4 - pinky mcp (x, y)
|
# cmc = Carpometacarpal Joint - the base of the thumb
|
||||||
# 5 - thumb cmc (x, y)
|
ml.utils.draw_skeleton(img, keypoints, palm_lines, kp_color=(255, 0, 0), line_color=(0, 255, 0))
|
||||||
# 6 - thumb mcp (x, y)
|
|
||||||
#
|
|
||||||
# mcp = Metacarpophalangeal Joint - the knuckle
|
|
||||||
# cmc = Carpometacarpal Joint - the base of the thumb
|
|
||||||
ml.utils.draw_skeleton(img, keypoints, palm_lines, kp_color=(255, 0, 0), line_color=(0, 255, 0))
|
|
||||||
|
|
||||||
print(clock.fps(), "fps")
|
print(clock.fps(), "fps")
|
||||||
|
|||||||
@ -0,0 +1,50 @@
|
|||||||
|
# This work is licensed under the MIT license.
|
||||||
|
# Copyright (c) 2013-2025 OpenMV LLC. All rights reserved.
|
||||||
|
# https://github.com/openmv/openmv/blob/master/LICENSE
|
||||||
|
#
|
||||||
|
# This example shows off Google's MediaPipe Face Landmark Detection model for multiple faces.
|
||||||
|
#
|
||||||
|
# NOTE: This exaxmple requires an OpenMV Cam with an NPU like the AE3 or N6 to run real-time.
|
||||||
|
|
||||||
|
import csi
|
||||||
|
import time
|
||||||
|
import ml
|
||||||
|
from ml.preprocessing import Normalization
|
||||||
|
from ml.postprocessing.mediapipe import BlazeFace
|
||||||
|
from ml.postprocessing.mediapipe import FaceLandmarks
|
||||||
|
|
||||||
|
# Initialize the sensor.
|
||||||
|
csi0 = csi.CSI()
|
||||||
|
csi0.reset()
|
||||||
|
csi0.pixformat(csi.RGB565)
|
||||||
|
csi0.framesize(csi.VGA)
|
||||||
|
csi0.window((400, 400))
|
||||||
|
|
||||||
|
# Load built-in face detection model
|
||||||
|
face_detection = ml.Model("/rom/blazeface_front_128.tflite", postprocess=BlazeFace(threshold=0.4))
|
||||||
|
print(face_detection)
|
||||||
|
|
||||||
|
# Load built-in face landmark model
|
||||||
|
face_landmarks = ml.Model("/rom/face_landmarks_192.tflite", postprocess=FaceLandmarks(threshold=0.4))
|
||||||
|
print(face_landmarks)
|
||||||
|
|
||||||
|
clock = time.clock()
|
||||||
|
while True:
|
||||||
|
clock.tick()
|
||||||
|
img = csi0.snapshot()
|
||||||
|
|
||||||
|
# faces is a list of ((x, y, w, h), score, keypoints) tuples
|
||||||
|
for r, score, keypoints in face_detection.predict([img]):
|
||||||
|
# rect is (x, y, w, h) - enlarge by 2x for face landmarks model
|
||||||
|
wider_rect = (r[0] - r[2] // 2, r[1] - r[3] // 2, r[2] * 2, r[3] * 2)
|
||||||
|
# Operate on just the ROI of the detected face
|
||||||
|
n = Normalization(roi=wider_rect)
|
||||||
|
|
||||||
|
# marks is a list of ((x, y, w, h), score, keypoints) tuples
|
||||||
|
for r, score, keypoints in face_landmarks.predict([n(img)]):
|
||||||
|
ml.utils.draw_predictions(img, [r], ("face",), ((0, 0, 255),), format=None)
|
||||||
|
|
||||||
|
# keypoints is a ndarray of shape (468, 3) where each keypoint is (x, y, z)
|
||||||
|
ml.utils.draw_keypoints(img, keypoints, radius=0, color=(255, 0, 0))
|
||||||
|
|
||||||
|
print(clock.fps(), "fps")
|
||||||
@ -0,0 +1,71 @@
|
|||||||
|
# This work is licensed under the MIT license.
|
||||||
|
# Copyright (c) 2013-2025 OpenMV LLC. All rights reserved.
|
||||||
|
# https://github.com/openmv/openmv/blob/master/LICENSE
|
||||||
|
#
|
||||||
|
# This example shows off Google's MediaPipe Face Landmark Detection model for a single face.
|
||||||
|
#
|
||||||
|
# NOTE: This exaxmple requires an OpenMV Cam with an NPU like the AE3 or N6 to run real-time.
|
||||||
|
|
||||||
|
import csi
|
||||||
|
import time
|
||||||
|
import ml
|
||||||
|
from ml.preprocessing import Normalization
|
||||||
|
from ml.postprocessing.mediapipe import BlazeFace
|
||||||
|
from ml.postprocessing.mediapipe import FaceLandmarks
|
||||||
|
|
||||||
|
# Initialize the sensor.
|
||||||
|
csi0 = csi.CSI()
|
||||||
|
csi0.reset()
|
||||||
|
csi0.pixformat(csi.RGB565)
|
||||||
|
csi0.framesize(csi.VGA)
|
||||||
|
csi0.window((400, 400))
|
||||||
|
|
||||||
|
# Load built-in face detection model
|
||||||
|
face_detection = ml.Model("/rom/blazeface_front_128.tflite", postprocess=BlazeFace(threshold=0.4))
|
||||||
|
print(face_detection)
|
||||||
|
|
||||||
|
# Load built-in face landmark model
|
||||||
|
face_landmarks = ml.Model("/rom/face_landmarks_192.tflite", postprocess=FaceLandmarks(threshold=0.4))
|
||||||
|
print(face_landmarks)
|
||||||
|
|
||||||
|
# Tracking vars.
|
||||||
|
n = None
|
||||||
|
|
||||||
|
clock = time.clock()
|
||||||
|
while True:
|
||||||
|
clock.tick()
|
||||||
|
img = csi0.snapshot()
|
||||||
|
|
||||||
|
if n is None:
|
||||||
|
# faces is a list of ((x, y, w, h), score, keypoints) tuples
|
||||||
|
for r, score, keypoints in face_detection.predict([img]):
|
||||||
|
# rect is (x, y, w, h) - enlarge by 2x for face landmarks model
|
||||||
|
wider_rect = (r[0] - r[2] // 2, r[1] - r[3] // 2, r[2] * 2, r[3] * 2)
|
||||||
|
# Operate on just the ROI of the detected face
|
||||||
|
n = Normalization(roi=wider_rect)
|
||||||
|
|
||||||
|
else:
|
||||||
|
# marks is a list of ((x, y, w, h), score, keypoints) tuples
|
||||||
|
marks = face_landmarks.predict([n(img)])
|
||||||
|
|
||||||
|
# No faces detected, reset the tracker.
|
||||||
|
if not marks:
|
||||||
|
n = None
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Draw bounding boxes around the detected faces and keypoints.
|
||||||
|
for r, score, keypoints in marks:
|
||||||
|
ml.utils.draw_predictions(img, [r], ("face",), ((0, 0, 255),), format=None)
|
||||||
|
|
||||||
|
# keypoints is a ndarray of shape (468, 3) where each keypoint is (x, y, z)
|
||||||
|
ml.utils.draw_keypoints(img, keypoints, radius=0, color=(255, 0, 0))
|
||||||
|
|
||||||
|
# Center new_wider_rect on face for tracking
|
||||||
|
new_wider_rect = (r[0] + (r[2] // 2) - (wider_rect[2] // 2),
|
||||||
|
r[1] + (r[3] // 2) - (wider_rect[3] // 2),
|
||||||
|
wider_rect[2],
|
||||||
|
wider_rect[3])
|
||||||
|
# Operate on just the ROI of the detected face
|
||||||
|
n = Normalization(roi=new_wider_rect)
|
||||||
|
|
||||||
|
print(clock.fps(), "fps")
|
||||||
@ -39,34 +39,31 @@ while True:
|
|||||||
img = csi0.snapshot()
|
img = csi0.snapshot()
|
||||||
|
|
||||||
# palms is a list of ((x, y, w, h), score, keypoints) tuples
|
# palms is a list of ((x, y, w, h), score, keypoints) tuples
|
||||||
palms = palm_detection.predict([img])
|
for r, score, keypoints in palm_detection.predict([img]):
|
||||||
|
# rect is (x, y, w, h) - enlarge by 3x for hand landmarks model
|
||||||
|
wider_rect = (r[0] - r[2], r[1] - r[3], r[2] * 3, r[3] * 3)
|
||||||
|
# Operate on just the ROI of the detected palm
|
||||||
|
n = Normalization(roi=wider_rect)
|
||||||
|
|
||||||
if palms:
|
# hands is a list of ((x, y, w, h), score, keypoints) tuples
|
||||||
for r, score, keypoints in palms[0]:
|
# index 0 (if present) is left hand
|
||||||
# rect is (x, y, w, h) - enlarge by 3x for hand landmarks model
|
# index 1 (if present) is right hand
|
||||||
wider_rect = (r[0] - r[2], r[1] - r[3], r[2] * 3, r[3] * 3)
|
hands = hand_landmarks.predict([n(img)])
|
||||||
# Operate on just the ROI of the detected palm
|
|
||||||
n = Normalization(roi=wider_rect)
|
|
||||||
|
|
||||||
# hands is a list of ((x, y, w, h), score, keypoints) tuples
|
# Draw bounding boxes around the detected hands and keypoints.
|
||||||
# index 0 (if present) is left hand
|
for i, detections in enumerate(hands):
|
||||||
# index 1 (if present) is right hand
|
for r, score, keypoints in detections:
|
||||||
hands = hand_landmarks.predict([n(img)])
|
ml.utils.draw_predictions(img, [r], ("right",) if i else ("left",), ((0, 0, 255),), format=None)
|
||||||
|
|
||||||
# Draw bounding boxes around the detected hands and keypoints.
|
# keypoints: ndarray (21, 3) of hand joints (x, y, z)
|
||||||
for i, detections in enumerate(hands):
|
# Indices follow MediaPipe convention:
|
||||||
for r, score, keypoints in detections:
|
# 0: wrist
|
||||||
ml.utils.draw_predictions(img, [r], ("right",) if i else ("left",), ((0, 0, 255),), format=None)
|
# Thumb: 1 cmc, 2 mcp, 3 ip, 4 tip
|
||||||
|
# Index: 5 mcp, 6 pip, 7 dip, 8 tip
|
||||||
# keypoints: ndarray (21, 3) of hand joints (x, y, z)
|
# Middle: 9 mcp, 10 pip, 11 dip, 12 tip
|
||||||
# Indices follow MediaPipe convention:
|
# Ring: 13 mcp, 14 pip, 15 dip, 16 tip
|
||||||
# 0: wrist
|
# Pinky: 17 mcp, 18 pip, 19 dip, 20 tip
|
||||||
# Thumb: 1 cmc, 2 mcp, 3 ip, 4 tip
|
# (cmc=base, mcp=knuckle, pip=mid, dip=distal, ip=thumb joint, tip=fingertip)
|
||||||
# Index: 5 mcp, 6 pip, 7 dip, 8 tip
|
ml.utils.draw_skeleton(img, keypoints, hand_lines, kp_color=(255, 0, 0), line_color=(0, 255, 0))
|
||||||
# Middle: 9 mcp, 10 pip, 11 dip, 12 tip
|
|
||||||
# Ring: 13 mcp, 14 pip, 15 dip, 16 tip
|
|
||||||
# Pinky: 17 mcp, 18 pip, 19 dip, 20 tip
|
|
||||||
# (cmc=base, mcp=knuckle, pip=mid, dip=distal, ip=thumb joint, tip=fingertip)
|
|
||||||
ml.utils.draw_skeleton(img, keypoints, hand_lines, kp_color=(255, 0, 0), line_color=(0, 255, 0))
|
|
||||||
|
|
||||||
print(clock.fps(), "fps")
|
print(clock.fps(), "fps")
|
||||||
|
|||||||
@ -43,14 +43,11 @@ while True:
|
|||||||
|
|
||||||
if n is None:
|
if n is None:
|
||||||
# palms is a list of ((x, y, w, h), score, keypoints) tuples
|
# palms is a list of ((x, y, w, h), score, keypoints) tuples
|
||||||
palms = palm_detection.predict([img])
|
for r, score, keypoints in palm_detection.predict([img]):
|
||||||
|
# rect is (x, y, w, h) - enlarge by 3x for hand landmarks model
|
||||||
if palms:
|
wider_rect = (r[0] - r[2], r[1] - r[3], r[2] * 3, r[3] * 3)
|
||||||
for r, score, keypoints in palms[0]:
|
# Operate on just the ROI of the detected palm
|
||||||
# rect is (x, y, w, h) - enlarge by 3x for hand landmarks model
|
n = Normalization(roi=wider_rect)
|
||||||
wider_rect = (r[0] - r[2], r[1] - r[3], r[2] * 3, r[3] * 3)
|
|
||||||
# Operate on just the ROI of the detected palm
|
|
||||||
n = Normalization(roi=wider_rect)
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
# hands is a list of ((x, y, w, h), score, keypoints) tuples
|
# hands is a list of ((x, y, w, h), score, keypoints) tuples
|
||||||
|
|||||||
@ -78,7 +78,7 @@ class mediapipe_detection_postprocess:
|
|||||||
self.detection_post_process(ih, iw, nms, model, inputs, outputs, self.scores[1], self.cords[1],
|
self.detection_post_process(ih, iw, nms, model, inputs, outputs, self.scores[1], self.cords[1],
|
||||||
self.threshold, self.anchors[output_len:])
|
self.threshold, self.anchors[output_len:])
|
||||||
|
|
||||||
return nms.get_bounding_boxes(threshold=self.nms_threshold, sigma=self.nms_sigma)
|
return nms.get_bounding_boxes(threshold=self.nms_threshold, sigma=self.nms_sigma)[0]
|
||||||
|
|
||||||
def detection_post_process(self, ih, iw, nms, model, inputs, outputs, score_idx, cords_idx, t, anchors):
|
def detection_post_process(self, ih, iw, nms, model, inputs, outputs, score_idx, cords_idx, t, anchors):
|
||||||
s_oh, s_ow, s_oc = model.output_shape[score_idx]
|
s_oh, s_ow, s_oc = model.output_shape[score_idx]
|
||||||
@ -173,3 +173,35 @@ class HandLandmarks:
|
|||||||
|
|
||||||
nms.add_bounding_box(xmin, ymin, xmax, ymax, score, left_right, keypoints=keypoints)
|
nms.add_bounding_box(xmin, ymin, xmax, ymax, score, left_right, keypoints=keypoints)
|
||||||
return nms.get_bounding_boxes(threshold=self.nms_threshold, sigma=self.nms_sigma)
|
return nms.get_bounding_boxes(threshold=self.nms_threshold, sigma=self.nms_sigma)
|
||||||
|
|
||||||
|
|
||||||
|
class FaceLandmarks:
|
||||||
|
def __init__(self, threshold=0.6, nms_threshold=0.1, nms_sigma=0.1):
|
||||||
|
self.threshold = threshold
|
||||||
|
self.nms_threshold = nms_threshold
|
||||||
|
self.nms_sigma = nms_sigma
|
||||||
|
|
||||||
|
def __call__(self, model, inputs, outputs):
|
||||||
|
ib, ih, iw, ic = model.input_shape[0]
|
||||||
|
nms = NMS(iw, ih, inputs[0].roi)
|
||||||
|
|
||||||
|
score = sigmoid(outputs[1][0, 0, 0, 0])
|
||||||
|
if score < self.threshold:
|
||||||
|
return _NO_DETECTION
|
||||||
|
|
||||||
|
cords = outputs[0][0, 0, 0, :]
|
||||||
|
|
||||||
|
# Get the keypoint information
|
||||||
|
keypoints = np.empty((len(cords) // 3, 3))
|
||||||
|
keypoints[:, 0] = cords[0::3]
|
||||||
|
keypoints[:, 1] = cords[1::3]
|
||||||
|
keypoints[:, 2] = cords[2::3]
|
||||||
|
|
||||||
|
# Get bounding box information
|
||||||
|
xmin = np.min(keypoints[:, 0])
|
||||||
|
ymin = np.min(keypoints[:, 1])
|
||||||
|
xmax = np.max(keypoints[:, 0])
|
||||||
|
ymax = np.max(keypoints[:, 1])
|
||||||
|
|
||||||
|
nms.add_bounding_box(xmin, ymin, xmax, ymax, score, 0, keypoints=keypoints)
|
||||||
|
return nms.get_bounding_boxes(threshold=self.nms_threshold, sigma=self.nms_sigma)[0]
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user