Compare commits

...

9 Commits

Author SHA1 Message Date
Kwabena W Agyeman
0be5c5b27b
Merge 47d35b9ed2 into 1dab51addf 2025-09-18 04:57:05 +00:00
Kwabena W. Agyeman
47d35b9ed2 scripts/examples: Add BlazeFace detection example. 2025-09-17 21:56:57 -07:00
Kwabena W. Agyeman
b18342f2ed lib/models: Add blazeface model. 2025-09-17 21:56:55 -07:00
Kwabena W. Agyeman
04f889d75d scripts/libraries: Add face detection post-processing. 2025-09-17 21:54:47 -07:00
Ibrahim Abdelkader
1dab51addf
Merge pull request #2828 from openmv/dependabot/github_actions/actions/setup-python-6
Some checks failed
🔥 Firmware Build / build-firmware (0, ARDUINO_GIGA) (push) Has been cancelled
🔥 Firmware Build / build-firmware (0, ARDUINO_NANO_33_BLE_SENSE) (push) Has been cancelled
🔥 Firmware Build / build-firmware (0, ARDUINO_NANO_RP2040_CONNECT) (push) Has been cancelled
🔥 Firmware Build / build-firmware (0, ARDUINO_NICLA_VISION) (push) Has been cancelled
🔥 Firmware Build / build-firmware (0, ARDUINO_PORTENTA_H7) (push) Has been cancelled
🔥 Firmware Build / build-firmware (0, DOCKER) (push) Has been cancelled
🔥 Firmware Build / build-firmware (0, OPENMV2) (push) Has been cancelled
🔥 Firmware Build / build-firmware (0, OPENMV3) (push) Has been cancelled
🔥 Firmware Build / build-firmware (0, OPENMV4) (push) Has been cancelled
🔥 Firmware Build / build-firmware (0, OPENMV4P) (push) Has been cancelled
🔥 Firmware Build / build-firmware (0, OPENMVPT) (push) Has been cancelled
🔥 Firmware Build / build-firmware (0, OPENMV_AE3) (push) Has been cancelled
🔥 Firmware Build / build-firmware (0, OPENMV_N6) (push) Has been cancelled
🔥 Firmware Build / build-firmware (0, OPENMV_RT1060) (push) Has been cancelled
🔥 Firmware Build / build-firmware (1, OPENMV_N6) (push) Has been cancelled
🔥 Firmware Build / code-size-report (push) Has been cancelled
🔥 Firmware Build / stable-release (push) Has been cancelled
🔥 Firmware Build / development-release (push) Has been cancelled
build(deps): bump actions/setup-python from 5 to 6
2025-09-17 19:15:32 +03:00
Kwabena W. Agyeman
ca61476299 scripts/libraries: Add keypoint support to NMS.
NMS will now pass-through keypoints along with scaling/offsetting
them to be drawn correctly on the image.
2025-09-05 17:49:58 -07:00
Kwabena W. Agyeman
1dbbbae533 scripts/libraries: Fix pre-processing float input array normalization.
ULAB only does simple assignment operator operations in-place.
2025-09-05 17:49:58 -07:00
Kwabena W. Agyeman
013b62a186 lib/stai: Add multi-tensor input/output support.
The name of the buffer being NULL defines the end of the buffer list.
2025-09-05 17:49:27 -07:00
dependabot[bot]
960ae86545
build(deps): bump actions/setup-python from 5 to 6
Bumps [actions/setup-python](https://github.com/actions/setup-python) from 5 to 6.
- [Release notes](https://github.com/actions/setup-python/releases)
- [Commits](https://github.com/actions/setup-python/compare/v5...v6)

---
updated-dependencies:
- dependency-name: actions/setup-python
  dependency-version: '6'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-09-04 15:22:50 +00:00
16 changed files with 222 additions and 23 deletions

View File

@ -75,7 +75,7 @@ jobs:
key: 'gcc-14.3.rel1_llvm-18.1.3_make-4.4.1_stedgeai-2.1'
- name: '🐍 Install Python'
uses: actions/setup-python@v5
uses: actions/setup-python@v6
with:
cache: 'pip'
python-version: "3.12.4"
@ -136,7 +136,7 @@ jobs:
key: 'gcc-14.3.rel1_llvm-18.1.3_make-4.4.1_stedgeai-2.1'
- name: '🐍 Install Python'
uses: actions/setup-python@v5
uses: actions/setup-python@v6
with:
cache: 'pip'
python-version: "3.12.4"

View File

@ -28,7 +28,7 @@ jobs:
uses: actions/checkout@v5
- name: '🐍 Set up Python ${{ matrix.python-version }}'
uses: actions/setup-python@v5
uses: actions/setup-python@v6
with:
cache: 'pip'
python-version: ${{ matrix.python-version }}

View File

@ -38,6 +38,12 @@
"alignment": 16,
"optimize": "Performance"
},
{
"type": "tflite",
"path": "{TOP}/lib/models/blazeface_front_128.tflite",
"alignment": 16,
"optimize": "Performance"
},
{
"type": "haar",
"path": "{TOP}/lib/haar/haarcascade_eye.xml",

View File

@ -32,6 +32,12 @@
"alignment": 16,
"optimize": "Performance"
},
{
"type": "tflite",
"path": "{TOP}/lib/models/blazeface_front_128.tflite",
"alignment": 16,
"optimize": "Performance"
},
{
"type": "haar",
"path": "{TOP}/lib/haar/haarcascade_eye.xml",

View File

@ -38,6 +38,12 @@
"alignment": 16,
"optimize": "Performance"
},
{
"type": "tflite",
"path": "{TOP}/lib/models/blazeface_front_128.tflite",
"alignment": 16,
"optimize": "Performance"
},
{
"type": "haar",
"path": "{TOP}/lib/haar/haarcascade_eye.xml",

View File

@ -38,6 +38,12 @@
"alignment": 16,
"optimize": "Performance"
},
{
"type": "tflite",
"path": "{TOP}/lib/models/blazeface_front_128.tflite",
"alignment": 16,
"optimize": "Performance"
},
{
"type": "haar",
"path": "{TOP}/lib/haar/haarcascade_eye.xml",

View File

@ -38,6 +38,12 @@
"alignment": 16,
"optimize": "Performance"
},
{
"type": "tflite",
"path": "{TOP}/lib/models/blazeface_front_128.tflite",
"alignment": 16,
"optimize": "Performance"
},
{
"type": "haar",
"path": "{TOP}/lib/haar/haarcascade_eye.xml",

View File

@ -32,6 +32,12 @@
"alignment": 16,
"optimize": "Performance"
},
{
"type": "tflite",
"path": "{TOP}/lib/models/blazeface_front_128.tflite",
"alignment": 16,
"optimize": "Performance"
},
{
"type": "haar",
"path": "{TOP}/lib/haar/haarcascade_eye.xml",

View File

@ -19,6 +19,12 @@
"path": "{TOP}/lib/models/yolo_v5_224_nano.tflite",
"alignment": 32,
"profile": "default"
},
{
"type": "tflite",
"path": "{TOP}/lib/models/blazeface_front_128.tflite",
"alignment": 32,
"profile": "default"
}
]
}

View File

@ -38,6 +38,12 @@
"alignment": 16,
"optimize": "Performance"
},
{
"type": "tflite",
"path": "{TOP}/lib/models/blazeface_front_128.tflite",
"alignment": 16,
"optimize": "Performance"
},
{
"type": "haar",
"path": "{TOP}/lib/haar/haarcascade_eye.xml",

Binary file not shown.

View File

@ -178,7 +178,7 @@ int ml_backend_init_model(py_ml_model_obj_t *model) {
const LL_Buffer_InfoTypeDef *model_outputs = ll_aton_reloc_get_output_buffers_info(&state->nn_inst, -1);
// Initialize the model's inputs.
model->inputs_size = 1;
for (model->inputs_size = 0; model_inputs[model->inputs_size].name != NULL; model->inputs_size++);
model->input_shape = (mp_obj_tuple_t *) MP_OBJ_TO_PTR(mp_obj_new_tuple(model->inputs_size, NULL));
model->input_scale = (mp_obj_tuple_t *) MP_OBJ_TO_PTR(mp_obj_new_tuple(model->inputs_size, NULL));
model->input_zero_point = (mp_obj_tuple_t *) MP_OBJ_TO_PTR(mp_obj_new_tuple(model->inputs_size, NULL));
@ -205,7 +205,7 @@ int ml_backend_init_model(py_ml_model_obj_t *model) {
}
// Initialize the model's outputs.
model->outputs_size = 1;
for (model->outputs_size = 0; model_outputs[model->outputs_size].name != NULL; model->outputs_size++);
model->output_shape = (mp_obj_tuple_t *) MP_OBJ_TO_PTR(mp_obj_new_tuple(model->outputs_size, NULL));
model->output_scale = (mp_obj_tuple_t *) MP_OBJ_TO_PTR(mp_obj_new_tuple(model->outputs_size, NULL));
model->output_zero_point = (mp_obj_tuple_t *) MP_OBJ_TO_PTR(mp_obj_new_tuple(model->outputs_size, NULL));

View File

@ -0,0 +1,49 @@
# This work is licensed under the MIT license.
# Copyright (c) 2013-2025 OpenMV LLC. All rights reserved.
# https://github.com/openmv/openmv/blob/master/LICENSE
#
# This example shows off Google's MediaPipe BlazeFace face detection model.
import csi
import time
import ml
from ml.postprocessing import mediapipe_face_detection_postprocess
# Initialize the sensor.
csi0 = csi.CSI()
csi0.reset()
csi0.pixformat(csi.RGB565)
csi0.framesize(csi.VGA)
csi0.window((400, 400))
# Load built-in face detection model
model = ml.Model("/rom/blazeface_front_128.tflite")
print(model)
# Create the face detection post-processor. This post-processor dynamically
# generates anchors for the model input size which should only be done once.
face_detection_postprocess = mediapipe_face_detection_postprocess(threshold=0.6)
clock = time.clock()
while True:
clock.tick()
img = csi0.snapshot()
# faces is a list of ((x, y, w, h), score, keypoints) tuples
faces = model.predict([img], callback=face_detection_postprocess)
# Draw bounding boxes around the detected faces and keypoints.
if faces:
for r, score, keypoints in faces[0]:
ml.utils.draw_predictions(img, [r], ["face"], [(0, 0, 255)], format=None)
# keypoints is a ndarray of shape (6, 2)
# 0 - right eye (x, y)
# 1 - left eye (x, y)
# 2 - nose (x, y)
# 3 - mouth (x, y)
# 4 - right ear (x, y)
# 5 - left ear (x, y)
for kp in keypoints.tolist():
img.draw_circle(int(kp[0]), int(kp[1]), 4, color=(255, 0, 0))
print(clock.fps(), "fps")

View File

@ -34,6 +34,14 @@ from ulab import numpy as np
_NO_DETECTION = const(())
def logit(x):
return np.log(x / (1.0 - x))
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def mod(a, b):
return a - (b * (a // b))
@ -49,16 +57,16 @@ def threshold(scores, threshold, scale, find_max=False, find_max_axis=1):
return np.nonzero(scores < threshold)[0]
def quantize(model, value):
if model.output_dtype[0] == 'f':
def quantize(model, value, index=0):
if model.output_dtype[index] == 'f':
return value
return (value / model.output_scale[0]) + model.output_zero_point[0]
return (value / model.output_scale[index]) + model.output_zero_point[index]
def dequantize(model, value):
if model.output_dtype[0] == 'f':
def dequantize(model, value, index=0):
if model.output_dtype[index] == 'f':
return value
return (value - model.output_zero_point[0]) * model.output_scale[0]
return (value - float(model.output_zero_point[index])) * model.output_scale[index]
class fomo_postprocess:
@ -141,12 +149,6 @@ class yolo_v2_postprocess:
def __call__(self, model, inputs, outputs):
def logit(x):
return np.log(x / (1.0 - x))
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def softmax(x):
e_x = np.exp(x - np.max(x, axis=1, keepdims=True))
return e_x / np.sum(e_x, axis=1, keepdims=True)
@ -330,3 +332,94 @@ class yolo_v8_postprocess:
nms.add_bounding_box(xmin[i], ymin[i], xmax[i], ymax[i],
bb_scores[i], bb_classes[i])
return nms.get_bounding_boxes(threshold=self.nms_threshold, sigma=self.nms_sigma)
class mediapipe_face_detection_postprocess:
_BLAZEFACE_CX = const(0)
_BLAZEFACE_CY = const(1)
_BLAZEFACE_CW = const(2)
_BLAZEFACE_CH = const(3)
_BLAZEFACE_KP = const(4)
def __init__(self, threshold=0.6, anchors=None, nms_threshold=0.1, nms_sigma=0.1):
self.threshold = threshold
self.anchors = anchors
if self.anchors is None:
self.anchors = np.empty((896, 2))
idx = 0
# Generate anchors for 16x16 grid with 2 duplicates and
# 8x8 grid with 6 duplicates to match the model output size.
for grid_size, scales in [(16, 2), (8, 6)]:
for gy in range(grid_size):
cy = (gy + 0.5) / grid_size
for gx in range(grid_size):
cx = (gx + 0.5) / grid_size
for _ in range(scales):
self.anchors[idx, 0] = cx
self.anchors[idx, 1] = cy
idx += 1
self.nms_threshold = nms_threshold
self.nms_sigma = nms_sigma
def blazeface_post_process(self, ih, iw, nms, model, inputs, outputs, score_idx, cords_idx, t, anchors):
s_oh, s_ow, s_oc = model.output_shape[score_idx]
scale = model.output_scale[score_idx]
t = quantize(model, logit(t), index=score_idx)
# Threshold all the scores
score_row_outputs = outputs[score_idx].reshape((s_oh * s_ow * s_oc))
score_indices = threshold(score_row_outputs, t, scale)
if not len(score_indices):
return _NO_DETECTION
# Get the score information
bb_scores = np.take(score_row_outputs, score_indices, axis=0)
bb_scores = sigmoid(dequantize(model, bb_scores, index=score_idx))
# Get the bounding boxes that have a valid score
c_oh, c_ow, c_oc = model.output_shape[cords_idx]
cords_row_outputs = outputs[cords_idx].reshape((c_oh * c_ow, c_oc))
bb = dequantize(model, np.take(cords_row_outputs, score_indices, axis=0), index=cords_idx)
# Get the anchor box information
bb_a_array = np.take(anchors, score_indices, axis=0)
# Compute the bounding box information
ax = bb_a_array[:, _BLAZEFACE_CX]
ay = bb_a_array[:, _BLAZEFACE_CY]
x_center = bb[:, _BLAZEFACE_CX] / iw + ax
y_center = bb[:, _BLAZEFACE_CY] / ih + ay
w_rel = bb[:, _BLAZEFACE_CW] / iw * 0.5
h_rel = bb[:, _BLAZEFACE_CH] / ih * 0.5
# Get the keypoint information
row_count = bb.shape[0]
keypoints = np.empty((row_count, (c_oc - _BLAZEFACE_KP) // 2, 2))
keypoints[:, :, 0] = (bb[:, _BLAZEFACE_KP::2] / iw + ax.reshape((row_count, 1))) * iw
keypoints[:, :, 1] = (bb[:, _BLAZEFACE_KP + 1::2] / ih + ay.reshape((row_count, 1))) * ih
# Scale the bounding boxes to have enough integer precision for NMS
xmin = (x_center - w_rel) * iw
ymin = (y_center - h_rel) * ih
xmax = (x_center + w_rel) * iw
ymax = (y_center + h_rel) * ih
for i in range(bb.shape[0]):
nms.add_bounding_box(xmin[i], ymin[i], xmax[i], ymax[i], bb_scores[i], 0, keypoints=keypoints[i])
def __call__(self, model, inputs, outputs):
ib, ih, iw, ic = model.input_shape[0]
nms = NMS(iw, ih, inputs[0].roi)
output_len = outputs[0].shape[1]
self.blazeface_post_process(ih, iw, nms, model, inputs, outputs, 1, 0,
self.threshold, self.anchors[:output_len])
if output_len < len(self.anchors):
self.blazeface_post_process(ih, iw, nms, model, inputs, outputs, 2, 3,
self.threshold, self.anchors[output_len:])
return nms.get_bounding_boxes(threshold=self.nms_threshold, sigma=self.nms_sigma)

View File

@ -93,4 +93,6 @@ class Normalization:
fadd = (fadd - np.array(self.mean)) / np.array(self.stdev)
fscale = fscale / np.array(self.stdev)
array = (array * fscale) + fadd
# Apply normalization in-place (must be done in two steps for ulab).
array *= fscale
array += fadd

View File

@ -43,7 +43,7 @@ class NMS:
raise ValueError("Invalid ROI dimensions!")
self.boxes = []
def add_bounding_box(self, xmin, ymin, xmax, ymax, score, label_index):
def add_bounding_box(self, xmin, ymin, xmax, ymax, score, label_index, keypoints=None):
if score >= 0.0 and score <= 1.0:
xmin = max(0.0, min(xmin, self.window_w))
ymin = max(0.0, min(ymin, self.window_h))
@ -52,7 +52,7 @@ class NMS:
w = int(xmax - xmin)
h = int(ymax - ymin)
if w > 0 and h > 0:
self.boxes.append([int(xmin), int(ymin), w, h, score, label_index])
self.boxes.append([int(xmin), int(ymin), w, h, score, label_index, keypoints])
def get_bounding_boxes(self, threshold=0.1, sigma=0.1):
sorted_boxes = sorted(self.boxes, key=lambda x: x[4], reverse=True)
@ -107,15 +107,22 @@ class NMS:
output_boxes[i][1] = int((output_boxes[i][1] * scale) + y_offset)
output_boxes[i][2] = int(output_boxes[i][2] * scale)
output_boxes[i][3] = int(output_boxes[i][3] * scale)
keypoints = output_boxes[i][6]
if keypoints is not None:
keypoints *= scale
keypoints[:, 0] += x_offset
keypoints[:, 1] += y_offset
# Create a list per class with (rect, score) tuples.
output_list = [[] for i in range(max_label_index + 1)]
for i in range(len(output_boxes)):
output_list[output_boxes[i][5]].append(
(output_boxes[i][0:4], output_boxes[i][4])
)
rect_score = [output_boxes[i][:4], output_boxes[i][4]]
keypoints = output_boxes[i][6]
if keypoints is not None:
rect_score.append(keypoints)
output_list[output_boxes[i][5]].append(tuple(rect_score))
return output_list