Merge pull request #2871 from kwagyeman/kwabena/add_hand_detection
Some checks failed
🔥 Firmware Build / build-firmware (false, 0, false, DOCKER) (push) Waiting to run
🔥 Firmware Build / build-firmware (false, 0, true, MPS2_AN500) (push) Waiting to run
🔥 Firmware Build / build-firmware (false, 0, true, MPS3_AN547) (push) Waiting to run
🔥 Firmware Build / build-firmware (false, 1, false, OPENMV_N6) (push) Waiting to run
🔥 Firmware Build / build-firmware (true, 0, false, ARDUINO_GIGA) (push) Waiting to run
🔥 Firmware Build / build-firmware (true, 0, false, ARDUINO_NANO_33_BLE_SENSE) (push) Waiting to run
🔥 Firmware Build / build-firmware (true, 0, false, ARDUINO_NANO_RP2040_CONNECT) (push) Waiting to run
🔥 Firmware Build / build-firmware (true, 0, false, ARDUINO_NICLA_VISION) (push) Waiting to run
🔥 Firmware Build / build-firmware (true, 0, false, ARDUINO_PORTENTA_H7) (push) Waiting to run
🔥 Firmware Build / build-firmware (true, 0, false, OPENMV2) (push) Waiting to run
🔥 Firmware Build / build-firmware (true, 0, false, OPENMV3) (push) Waiting to run
🔥 Firmware Build / build-firmware (true, 0, false, OPENMV4) (push) Waiting to run
🔥 Firmware Build / build-firmware (true, 0, false, OPENMV4P) (push) Waiting to run
🔥 Firmware Build / build-firmware (true, 0, false, OPENMVPT) (push) Waiting to run
🔥 Firmware Build / build-firmware (true, 0, false, OPENMV_AE3) (push) Waiting to run
🔥 Firmware Build / build-firmware (true, 0, false, OPENMV_N6) (push) Waiting to run
🔥 Firmware Build / build-firmware (true, 0, false, OPENMV_RT1060) (push) Waiting to run
🔥 Firmware Build / code-size-report (push) Blocked by required conditions
🔥 Firmware Build / stable-release (push) Blocked by required conditions
🔥 Firmware Build / development-release (push) Blocked by required conditions
🔎 Check Code Formatting / formatting-check (push) Has been cancelled

scripts/libraries: Add palm detector post-processing support.
This commit is contained in:
Ibrahim Abdelkader 2025-10-18 20:40:54 +03:00 committed by GitHub
commit 15070dd722
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
12 changed files with 377 additions and 28 deletions

View File

@ -38,6 +38,18 @@
"alignment": 16,
"optimize": "Performance"
},
{
"type": "tflite",
"path": "{TOP}/lib/models/palm_detection_full_192.tflite",
"alignment": 16,
"optimize": "Performance"
},
{
"type": "tflite",
"path": "{TOP}/lib/models/hand_landmarks_full_224.tflite",
"alignment": 16,
"optimize": "Performance"
},
{
"type": "haar",
"path": "{TOP}/lib/haar/haarcascade_eye.xml",

View File

@ -25,6 +25,18 @@
"path": "{TOP}/lib/models/blazeface_front_128.tflite",
"alignment": 32,
"profile": "default"
},
{
"type": "tflite",
"path": "{TOP}/lib/models/palm_detection_full_192.tflite",
"alignment": 32,
"profile": "default"
},
{
"type": "tflite",
"path": "{TOP}/lib/models/hand_landmarks_full_224.tflite",
"alignment": 32,
"profile": "default"
}
]
}

Binary file not shown.

Binary file not shown.

View File

@ -262,16 +262,17 @@ static mp_obj_t py_ml_model_predict(size_t n_args, const mp_obj_t *pos_args, mp_
}
bool callback = args[ARG_callback].u_obj != mp_const_none;
bool postprocess = model->postprocess != mp_const_none || callback;
py_ml_process_input(model, pos_args[1]);
ml_backend_run_inference(model);
mp_obj_t output = py_ml_process_output(model, !callback);
mp_obj_t output = py_ml_process_output(model, !postprocess);
if (callback) {
if (postprocess) {
// Pass model, inputs, outputs to the post-processing callback.
mp_obj_t fargs[3] = { MP_OBJ_FROM_PTR(model), pos_args[1], output };
output = mp_call_function_n_kw(args[ARG_callback].u_obj, 3, 0, fargs);
output = mp_call_function_n_kw(callback ? args[ARG_callback].u_obj : model->postprocess, 3, 0, fargs);
}
return output;
@ -313,6 +314,9 @@ static void py_ml_model_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
case MP_QSTR_output_zero_point:
dest[0] = MP_OBJ_FROM_PTR(self->output_zero_point);
break;
case MP_QSTR_postprocess:
dest[0] = self->postprocess;
break;
default:
// Continue lookup in locals_dict.
dest[1] = MP_OBJ_SENTINEL;
@ -322,10 +326,11 @@ static void py_ml_model_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
}
mp_obj_t py_ml_model_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *all_args) {
enum { ARG_path, ARG_load_to_fb };
enum { ARG_path, ARG_load_to_fb, ARG_postprocess };
static const mp_arg_t allowed_args[] = {
{ MP_QSTR_path, MP_ARG_REQUIRED | MP_ARG_OBJ },
{ MP_QSTR_load_to_fb, MP_ARG_REQUIRED | MP_ARG_BOOL },
{ MP_QSTR_postprocess, MP_ARG_REQUIRED | MP_ARG_OBJ, {.u_rom_obj = MP_ROM_NONE} },
};
// Parse args.
@ -334,6 +339,7 @@ mp_obj_t py_ml_model_make_new(const mp_obj_type_t *type, size_t n_args, size_t n
//const char *path = mp_obj_str_get_str(args[ARG_path].u_obj);
py_ml_model_obj_t *model = mp_obj_malloc_with_finaliser(py_ml_model_obj_t, &py_ml_model_type);
model->postprocess = args[ARG_postprocess].u_obj;
#if MICROPY_VFS
mp_obj_t file_args[2] = {

View File

@ -51,6 +51,7 @@ typedef struct py_ml_model_obj {
mp_obj_tuple_t *output_scale;
mp_obj_tuple_t *output_zero_point;
mp_obj_tuple_t *output_dtype;
mp_obj_t postprocess; // Post-processing object.
void *state; // Private context for the backend.
} py_ml_model_obj_t;

View File

@ -0,0 +1,60 @@
# This work is licensed under the MIT license.
# Copyright (c) 2013-2025 OpenMV LLC. All rights reserved.
# https://github.com/openmv/openmv/blob/master/LICENSE
#
# This example shows off Google's MediaPipe Palm Detection model.
#
# NOTE: This exaxmple requires an OpenMV Cam with an NPU like the AE3 or N6 to run real-time.
import csi
import time
import ml
from ml.postprocessing.mediapipe import BlazePalm
# Initialize the sensor.
csi0 = csi.CSI()
csi0.reset()
csi0.pixformat(csi.RGB565)
csi0.framesize(csi.VGA)
csi0.window((400, 400))
# Load built-in palm detection model
model = ml.Model("/rom/palm_detection_full_192.tflite", postprocess=BlazePalm(threshold=0.4))
print(model)
# Line connections between hand joints for drawing the hand skeleton.
palm_lines = ((0, 1), (1, 2), (2, 3), (3, 4), (4, 0), (0, 5), (5, 6))
# Visualization parameters.
palm_labels = ["palm"]
palm_colors = [(0, 0, 255)]
kp_color = (255, 0, 0)
line_color = (0, 255, 0)
clock = time.clock()
while True:
clock.tick()
img = csi0.snapshot()
# palms is a list of ((x, y, w, h), score, keypoints) tuples
palms = model.predict([img])
# Draw bounding boxes around the detected palms and keypoints.
if palms:
for r, score, keypoints in palms[0]:
ml.utils.draw_predictions(img, [r], palm_labels, palm_colors, format=None)
# keypoints is a ndarray of shape (7, 2)
# 0 - wrist (x, y)
# 1 - index finger mcp (x, y)
# 2 - middle finger mcp (x, y)
# 3 - ring finger mcp (x, y)
# 4 - pinky mcp (x, y)
# 5 - thumb cmc (x, y)
# 6 - thumb mcp (x, y)
#
# mcp = Metacarpophalangeal Joint - the knuckle
# cmc = Carpometacarpal Joint - the base of the thumb
ml.utils.draw_skeleton(img, keypoints, palm_lines, kp_color=kp_color, line_color=line_color)
print(clock.fps(), "fps")

View File

@ -0,0 +1,77 @@
# This work is licensed under the MIT license.
# Copyright (c) 2013-2025 OpenMV LLC. All rights reserved.
# https://github.com/openmv/openmv/blob/master/LICENSE
#
# This example shows off Google's MediaPipe Hand Landmarks Detection model for multiple hands.
#
# NOTE: This exaxmple requires an OpenMV Cam with an NPU like the AE3 or N6 to run real-time.
import csi
import time
import ml
from ml.preprocessing import Normalization
from ml.postprocessing.mediapipe import BlazePalm
from ml.postprocessing.mediapipe import HandLandmarks
# Initialize the sensor.
csi0 = csi.CSI()
csi0.reset()
csi0.pixformat(csi.RGB565)
csi0.framesize(csi.VGA)
csi0.window((400, 400))
# Load built-in palm detection model
palm_detection = ml.Model("/rom/palm_detection_full_192.tflite", postprocess=BlazePalm(threshold=0.4))
print(palm_detection)
# Load built-in hand landmark model
hand_landmarks = ml.Model("/rom/hand_landmarks_full_224.tflite", postprocess=HandLandmarks(threshold=0.4))
print(hand_landmarks)
# Line connections between hand joints for drawing the hand skeleton.
hand_lines = ((0, 1), (1, 2), (2, 3), (3, 4), (0, 5), (5, 6), (6, 7), (7, 8),
(5, 9), (9, 10), (10, 11), (11, 12), (9, 13), (13, 14), (14, 15), (15, 16),
(13, 17), (17, 18), (18, 19), (19, 20), (0, 17))
# Visualization parameters.
palm_colors = [(0, 0, 255)]
kp_color = (255, 0, 0)
line_color = (0, 255, 0)
clock = time.clock()
while True:
clock.tick()
img = csi0.snapshot()
# palms is a list of ((x, y, w, h), score, keypoints) tuples
palms = palm_detection.predict([img])
if palms:
for r, score, keypoints in palms[0]:
# rect is (x, y, w, h) - enlarge by 3x for hand landmarks model
wider_rect = (r[0] - r[2], r[1] - r[3], r[2] * 3, r[3] * 3)
# Operate on just the ROI of the detected palm
n = Normalization(roi=wider_rect)
# hands is a list of ((x, y, w, h), score, keypoints) tuples
# index 0 (if present) is left hand
# index 1 (if present) is right hand
hands = hand_landmarks.predict([n(img)])
# Draw bounding boxes around the detected hands and keypoints.
for i, detections in enumerate(hands):
for r, score, keypoints in detections:
ml.utils.draw_predictions(img, [r], ["right" if i else "left"], palm_colors, format=None)
# keypoints: ndarray (21, 3) of hand joints (x, y, z)
# Indices follow MediaPipe convention:
# 0: wrist
# Thumb: 1 cmc, 2 mcp, 3 ip, 4 tip
# Index: 5 mcp, 6 pip, 7 dip, 8 tip
# Middle: 9 mcp, 10 pip, 11 dip, 12 tip
# Ring: 13 mcp, 14 pip, 15 dip, 16 tip
# Pinky: 17 mcp, 18 pip, 19 dip, 20 tip
# (cmc=base, mcp=knuckle, pip=mid, dip=distal, ip=thumb joint, tip=fingertip)
ml.utils.draw_skeleton(img, keypoints, hand_lines, kp_color=kp_color, line_color=line_color)
print(clock.fps(), "fps")

View File

@ -0,0 +1,95 @@
# This work is licensed under the MIT license.
# Copyright (c) 2013-2025 OpenMV LLC. All rights reserved.
# https://github.com/openmv/openmv/blob/master/LICENSE
#
# This example shows off Google's MediaPipe Hand Landmarks Detection model for a single hand.
#
# NOTE: This exaxmple requires an OpenMV Cam with an NPU like the AE3 or N6 to run real-time.
import csi
import time
import ml
from ml.preprocessing import Normalization
from ml.postprocessing.mediapipe import BlazePalm
from ml.postprocessing.mediapipe import HandLandmarks
# Initialize the sensor.
csi0 = csi.CSI()
csi0.reset()
csi0.pixformat(csi.RGB565)
csi0.framesize(csi.VGA)
csi0.window((400, 400))
# Load built-in palm detection model
palm_detection = ml.Model("/rom/palm_detection_full_192.tflite", postprocess=BlazePalm(threshold=0.4))
print(palm_detection)
# Load built-in hand landmark model
hand_landmarks = ml.Model("/rom/hand_landmarks_full_224.tflite", postprocess=HandLandmarks(threshold=0.4))
print(hand_landmarks)
# Line connections between hand joints for drawing the hand skeleton.
hand_lines = ((0, 1), (1, 2), (2, 3), (3, 4), (0, 5), (5, 6), (6, 7), (7, 8),
(5, 9), (9, 10), (10, 11), (11, 12), (9, 13), (13, 14), (14, 15), (15, 16),
(13, 17), (17, 18), (18, 19), (19, 20), (0, 17))
# Visualization parameters.
palm_colors = [(0, 0, 255)]
kp_color = (255, 0, 0)
line_color = (0, 255, 0)
# Tracking vars.
n = None
clock = time.clock()
while True:
clock.tick()
img = csi0.snapshot()
if n is None:
# palms is a list of ((x, y, w, h), score, keypoints) tuples
palms = palm_detection.predict([img])
if palms:
for r, score, keypoints in palms[0]:
# rect is (x, y, w, h) - enlarge by 3x for hand landmarks model
wider_rect = (r[0] - r[2], r[1] - r[3], r[2] * 3, r[3] * 3)
# Operate on just the ROI of the detected palm
n = Normalization(roi=wider_rect)
else:
# hands is a list of ((x, y, w, h), score, keypoints) tuples
# index 0 (if present) is left hand
# index 1 (if present) is right hand
hands = hand_landmarks.predict([n(img)])
# No hands detected, reset the tracker.
if not hands:
n = None
continue
# Draw bounding boxes around the detected hands and keypoints.
for i, detections in enumerate(hands):
for r, score, keypoints in detections:
ml.utils.draw_predictions(img, [r], ["right" if i else "left"], [(0, 0, 255)], format=None)
# keypoints: ndarray (21, 3) of hand joints (x, y, z)
# Indices follow MediaPipe convention:
# 0: wrist
# Thumb: 1 cmc, 2 mcp, 3 ip, 4 tip
# Index: 5 mcp, 6 pip, 7 dip, 8 tip
# Middle: 9 mcp, 10 pip, 11 dip, 12 tip
# Ring: 13 mcp, 14 pip, 15 dip, 16 tip
# Pinky: 17 mcp, 18 pip, 19 dip, 20 tip
# (cmc=base, mcp=knuckle, pip=mid, dip=distal, ip=thumb joint, tip=fingertip)
ml.utils.draw_skeleton(img, keypoints, hand_lines, kp_color=kp_color, line_color=line_color)
# Center new_wider_rect on hand for tracking
new_wider_rect = (r[0] + (r[2] // 2) - (wider_rect[2] // 2),
r[1] + (r[3] // 2) - (wider_rect[3] // 2),
wider_rect[2],
wider_rect[3])
# Operate on just the ROI of the detected hand
n = Normalization(roi=new_wider_rect)
print(clock.fps(), "fps")

View File

@ -33,7 +33,7 @@ from ml.preprocessing import Normalization
class Model(uml.Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, kwargs.get("load_to_fb", False))
super().__init__(*args, kwargs.get("load_to_fb", False), kwargs.get("postprocess", False))
try:
path = args[0].split(".")[0] + ".txt"
self.labels = [line.rstrip('\n') for line in open(path, "r")]

View File

@ -195,3 +195,39 @@ def draw_predictions(
color=box_color,
)
image.draw_string(x, y - font_height, label.upper(), text_color)
def draw_keypoints(
image,
keypoints,
radius=4,
color=(255, 0, 0),
thickness=1,
fill=False,
):
if radius > 0:
for kp in keypoints:
image.draw_circle(int(kp[0]), int(kp[1]), radius, color=color, thickness=thickness, fill=fill)
elif radius == 0:
for kp in keypoints:
image.set_pixel(int(kp[0]), int(kp[1]), color)
def draw_skeleton(
image,
keypoints,
lines,
kp_radius=4,
kp_color=(255, 0, 0),
kp_thickness=1,
kp_fill=False,
line_color=(0, 255, 0),
line_thickness=1,
):
draw_keypoints(image, keypoints, radius=kp_radius, color=kp_color, thickness=kp_thickness, fill=kp_fill)
for line in lines:
image.draw_line(int(keypoints[line[0]][0]), int(keypoints[line[0]][1]),
int(keypoints[line[1]][0]), int(keypoints[line[1]][1]),
color=line_color, thickness=line_thickness)

View File

@ -34,24 +34,26 @@ from ulab import numpy as np
_NO_DETECTION = const(())
class BlazeFace:
_BLAZEFACE_CX = const(0)
_BLAZEFACE_CY = const(1)
_BLAZEFACE_CW = const(2)
_BLAZEFACE_CH = const(3)
_BLAZEFACE_KP = const(4)
class mediapipe_detection_postprocess:
_MEDIAPIPE_CX = const(0)
_MEDIAPIPE_CY = const(1)
_MEDIAPIPE_CW = const(2)
_MEDIAPIPE_CH = const(3)
_MEDIAPIPE_KP = const(4)
def __init__(self, threshold=0.6, anchors=None, nms_threshold=0.1, nms_sigma=0.1):
def __init__(self, threshold=0.6, anchors=None, anchor_grid=None, scores=[], cords=[],
nms_threshold=0.1, nms_sigma=0.1):
self.threshold = threshold
self.anchors = anchors
self.scores = scores
self.cords = cords
if self.anchors is None:
self.anchors = np.empty((896, 2))
anchor_count = sum((g * g) * d for g, d in anchor_grid)
self.anchors = np.empty((anchor_count, 2))
idx = 0
# Generate anchors for 16x16 grid with 2 duplicates and
# 8x8 grid with 6 duplicates to match the model output size.
for grid_size, scales in [(16, 2), (8, 6)]:
for grid_size, scales in anchor_grid:
for gy in range(grid_size):
cy = (gy + 0.5) / grid_size
for gx in range(grid_size):
@ -69,16 +71,16 @@ class BlazeFace:
nms = NMS(iw, ih, inputs[0].roi)
output_len = outputs[0].shape[1]
self.blazeface_post_process(ih, iw, nms, model, inputs, outputs, 1, 0,
self.detection_post_process(ih, iw, nms, model, inputs, outputs, self.scores[0], self.cords[0],
self.threshold, self.anchors[:output_len])
if output_len < len(self.anchors):
self.blazeface_post_process(ih, iw, nms, model, inputs, outputs, 2, 3,
self.detection_post_process(ih, iw, nms, model, inputs, outputs, self.scores[1], self.cords[1],
self.threshold, self.anchors[output_len:])
return nms.get_bounding_boxes(threshold=self.nms_threshold, sigma=self.nms_sigma)
def blazeface_post_process(self, ih, iw, nms, model, inputs, outputs, score_idx, cords_idx, t, anchors):
def detection_post_process(self, ih, iw, nms, model, inputs, outputs, score_idx, cords_idx, t, anchors):
s_oh, s_ow, s_oc = model.output_shape[score_idx]
scale = model.output_scale[score_idx]
t = quantize(model, logit(t), index=score_idx)
@ -102,18 +104,18 @@ class BlazeFace:
bb_a_array = np.take(anchors, score_indices, axis=0)
# Compute the bounding box information
ax = bb_a_array[:, _BLAZEFACE_CX]
ay = bb_a_array[:, _BLAZEFACE_CY]
x_center = bb[:, _BLAZEFACE_CX] / iw + ax
y_center = bb[:, _BLAZEFACE_CY] / ih + ay
w_rel = bb[:, _BLAZEFACE_CW] / iw * 0.5
h_rel = bb[:, _BLAZEFACE_CH] / ih * 0.5
ax = bb_a_array[:, _MEDIAPIPE_CX]
ay = bb_a_array[:, _MEDIAPIPE_CY]
x_center = bb[:, _MEDIAPIPE_CX] / iw + ax
y_center = bb[:, _MEDIAPIPE_CY] / ih + ay
w_rel = bb[:, _MEDIAPIPE_CW] / iw * 0.5
h_rel = bb[:, _MEDIAPIPE_CH] / ih * 0.5
# Get the keypoint information
row_count = bb.shape[0]
keypoints = np.empty((row_count, (c_oc - _BLAZEFACE_KP) // 2, 2))
keypoints[:, :, 0] = (bb[:, _BLAZEFACE_KP::2] / iw + ax.reshape((row_count, 1))) * iw
keypoints[:, :, 1] = (bb[:, _BLAZEFACE_KP + 1::2] / ih + ay.reshape((row_count, 1))) * ih
keypoints = np.empty((row_count, (c_oc - _MEDIAPIPE_KP) // 2, 2))
keypoints[:, :, 0] = (bb[:, _MEDIAPIPE_KP::2] / iw + ax.reshape((row_count, 1))) * iw
keypoints[:, :, 1] = (bb[:, _MEDIAPIPE_KP + 1::2] / ih + ay.reshape((row_count, 1))) * ih
# Scale the bounding boxes to have enough integer precision for NMS
xmin = (x_center - w_rel) * iw
@ -123,3 +125,51 @@ class BlazeFace:
for i in range(bb.shape[0]):
nms.add_bounding_box(xmin[i], ymin[i], xmax[i], ymax[i], bb_scores[i], 0, keypoints=keypoints[i])
class BlazeFace(mediapipe_detection_postprocess):
def __init__(self, threshold=0.6, anchors=None, nms_threshold=0.1, nms_sigma=0.1):
super().__init__(threshold=threshold, anchors=anchors,
anchor_grid=[(16, 2), (8, 6)], scores=[1, 2], cords=[0, 3],
nms_threshold=nms_threshold, nms_sigma=nms_sigma)
class BlazePalm(mediapipe_detection_postprocess):
def __init__(self, threshold=0.6, anchors=None, nms_threshold=0.1, nms_sigma=0.1):
super().__init__(threshold=threshold, anchors=anchors,
anchor_grid=[(24, 2), (12, 6)], scores=[0], cords=[1],
nms_threshold=nms_threshold, nms_sigma=nms_sigma)
class HandLandmarks:
def __init__(self, threshold=0.6, nms_threshold=0.1, nms_sigma=0.1):
self.threshold = threshold
self.nms_threshold = nms_threshold
self.nms_sigma = nms_sigma
def __call__(self, model, inputs, outputs):
ib, ih, iw, ic = model.input_shape[0]
nms = NMS(iw, ih, inputs[0].roi)
score = outputs[2][0, 0]
if score < self.threshold:
return _NO_DETECTION
cords = outputs[3][0, :]
# Get the keypoint information
keypoints = np.empty((len(cords) // 3, 3))
keypoints[:, 0] = cords[0::3]
keypoints[:, 1] = cords[1::3]
keypoints[:, 2] = cords[2::3]
# Get bounding box information
xmin = np.min(keypoints[:, 0])
ymin = np.min(keypoints[:, 1])
xmax = np.max(keypoints[:, 0])
ymax = np.max(keypoints[:, 1])
left_right = outputs[0][0, 0] > 0.5
nms.add_bounding_box(xmin, ymin, xmax, ymax, score, left_right, keypoints=keypoints)
return nms.get_bounding_boxes(threshold=self.nms_threshold, sigma=self.nms_sigma)