diff --git a/boards/ARDUINO_GIGA/romfs.json b/boards/ARDUINO_GIGA/romfs.json index 825230658..0ff11776c 100644 --- a/boards/ARDUINO_GIGA/romfs.json +++ b/boards/ARDUINO_GIGA/romfs.json @@ -38,6 +38,12 @@ "alignment": 16, "optimize": "Performance" }, + { + "type": "tflite", + "path": "{TOP}/lib/models/blazeface_front_128.tflite", + "alignment": 16, + "optimize": "Performance" + }, { "type": "haar", "path": "{TOP}/lib/haar/haarcascade_eye.xml", diff --git a/boards/ARDUINO_NICLA_VISION/romfs.json b/boards/ARDUINO_NICLA_VISION/romfs.json index 16e40bb2e..8a1c4876d 100644 --- a/boards/ARDUINO_NICLA_VISION/romfs.json +++ b/boards/ARDUINO_NICLA_VISION/romfs.json @@ -32,6 +32,12 @@ "alignment": 16, "optimize": "Performance" }, + { + "type": "tflite", + "path": "{TOP}/lib/models/blazeface_front_128.tflite", + "alignment": 16, + "optimize": "Performance" + }, { "type": "haar", "path": "{TOP}/lib/haar/haarcascade_eye.xml", diff --git a/boards/ARDUINO_PORTENTA_H7/romfs.json b/boards/ARDUINO_PORTENTA_H7/romfs.json index 825230658..0ff11776c 100644 --- a/boards/ARDUINO_PORTENTA_H7/romfs.json +++ b/boards/ARDUINO_PORTENTA_H7/romfs.json @@ -38,6 +38,12 @@ "alignment": 16, "optimize": "Performance" }, + { + "type": "tflite", + "path": "{TOP}/lib/models/blazeface_front_128.tflite", + "alignment": 16, + "optimize": "Performance" + }, { "type": "haar", "path": "{TOP}/lib/haar/haarcascade_eye.xml", diff --git a/boards/OPENMV4P/romfs.json b/boards/OPENMV4P/romfs.json index 26ef3eae3..63cc583fa 100644 --- a/boards/OPENMV4P/romfs.json +++ b/boards/OPENMV4P/romfs.json @@ -38,6 +38,12 @@ "alignment": 16, "optimize": "Performance" }, + { + "type": "tflite", + "path": "{TOP}/lib/models/blazeface_front_128.tflite", + "alignment": 16, + "optimize": "Performance" + }, { "type": "haar", "path": "{TOP}/lib/haar/haarcascade_eye.xml", diff --git a/boards/OPENMVPT/romfs.json b/boards/OPENMVPT/romfs.json index 26ef3eae3..63cc583fa 100644 --- a/boards/OPENMVPT/romfs.json +++ b/boards/OPENMVPT/romfs.json @@ -38,6 +38,12 @@ "alignment": 16, "optimize": "Performance" }, + { + "type": "tflite", + "path": "{TOP}/lib/models/blazeface_front_128.tflite", + "alignment": 16, + "optimize": "Performance" + }, { "type": "haar", "path": "{TOP}/lib/haar/haarcascade_eye.xml", diff --git a/boards/OPENMV_AE3/romfs.json b/boards/OPENMV_AE3/romfs.json index baf70a0df..84d621d8a 100644 --- a/boards/OPENMV_AE3/romfs.json +++ b/boards/OPENMV_AE3/romfs.json @@ -32,6 +32,12 @@ "alignment": 16, "optimize": "Performance" }, + { + "type": "tflite", + "path": "{TOP}/lib/models/blazeface_front_128.tflite", + "alignment": 16, + "optimize": "Performance" + }, { "type": "haar", "path": "{TOP}/lib/haar/haarcascade_eye.xml", diff --git a/boards/OPENMV_N6/romfs.json b/boards/OPENMV_N6/romfs.json index 7ab29a3a8..77cc22f75 100644 --- a/boards/OPENMV_N6/romfs.json +++ b/boards/OPENMV_N6/romfs.json @@ -19,6 +19,12 @@ "path": "{TOP}/lib/models/yolo_v5_224_nano.tflite", "alignment": 32, "profile": "default" + }, + { + "type": "tflite", + "path": "{TOP}/lib/models/blazeface_front_128.tflite", + "alignment": 32, + "profile": "default" } ] } diff --git a/boards/OPENMV_RT1060/romfs.json b/boards/OPENMV_RT1060/romfs.json index 26ef3eae3..63cc583fa 100644 --- a/boards/OPENMV_RT1060/romfs.json +++ b/boards/OPENMV_RT1060/romfs.json @@ -38,6 +38,12 @@ "alignment": 16, "optimize": "Performance" }, + { + "type": "tflite", + "path": "{TOP}/lib/models/blazeface_front_128.tflite", + "alignment": 16, + "optimize": "Performance" + }, { "type": "haar", "path": "{TOP}/lib/haar/haarcascade_eye.xml", diff --git a/lib/models/blazeface_front_128.tflite b/lib/models/blazeface_front_128.tflite new file mode 100644 index 000000000..a511dbc83 Binary files /dev/null and b/lib/models/blazeface_front_128.tflite differ diff --git a/lib/stai/stai_backend.c b/lib/stai/stai_backend.c index aefe61a53..aed4cadc7 100644 --- a/lib/stai/stai_backend.c +++ b/lib/stai/stai_backend.c @@ -178,7 +178,7 @@ int ml_backend_init_model(py_ml_model_obj_t *model) { const LL_Buffer_InfoTypeDef *model_outputs = ll_aton_reloc_get_output_buffers_info(&state->nn_inst, -1); // Initialize the model's inputs. - model->inputs_size = 1; + for (model->inputs_size = 0; model_inputs[model->inputs_size].name != NULL; model->inputs_size++); model->input_shape = (mp_obj_tuple_t *) MP_OBJ_TO_PTR(mp_obj_new_tuple(model->inputs_size, NULL)); model->input_scale = (mp_obj_tuple_t *) MP_OBJ_TO_PTR(mp_obj_new_tuple(model->inputs_size, NULL)); model->input_zero_point = (mp_obj_tuple_t *) MP_OBJ_TO_PTR(mp_obj_new_tuple(model->inputs_size, NULL)); @@ -205,7 +205,7 @@ int ml_backend_init_model(py_ml_model_obj_t *model) { } // Initialize the model's outputs. - model->outputs_size = 1; + for (model->outputs_size = 0; model_outputs[model->outputs_size].name != NULL; model->outputs_size++); model->output_shape = (mp_obj_tuple_t *) MP_OBJ_TO_PTR(mp_obj_new_tuple(model->outputs_size, NULL)); model->output_scale = (mp_obj_tuple_t *) MP_OBJ_TO_PTR(mp_obj_new_tuple(model->outputs_size, NULL)); model->output_zero_point = (mp_obj_tuple_t *) MP_OBJ_TO_PTR(mp_obj_new_tuple(model->outputs_size, NULL)); diff --git a/scripts/examples/03-Machine-Learning/00-TensorFlow/blazeface_face_detector.py b/scripts/examples/03-Machine-Learning/00-TensorFlow/blazeface_face_detector.py new file mode 100644 index 000000000..f44bb01b5 --- /dev/null +++ b/scripts/examples/03-Machine-Learning/00-TensorFlow/blazeface_face_detector.py @@ -0,0 +1,49 @@ +# This work is licensed under the MIT license. +# Copyright (c) 2013-2025 OpenMV LLC. All rights reserved. +# https://github.com/openmv/openmv/blob/master/LICENSE +# +# This example shows off Google's MediaPipe BlazeFace face detection model. + +import csi +import time +import ml +from ml.postprocessing import mediapipe_face_detection_postprocess + +# Initialize the sensor. +csi0 = csi.CSI() +csi0.reset() +csi0.pixformat(csi.RGB565) +csi0.framesize(csi.VGA) +csi0.window((400, 400)) + +# Load built-in face detection model +model = ml.Model("/rom/blazeface_front_128.tflite") +print(model) + +# Create the face detection post-processor. This post-processor dynamically +# generates anchors for the model input size which should only be done once. +face_detection_postprocess = mediapipe_face_detection_postprocess(threshold=0.6) + +clock = time.clock() +while True: + clock.tick() + img = csi0.snapshot() + + # faces is a list of ((x, y, w, h), score, keypoints) tuples + faces = model.predict([img], callback=face_detection_postprocess) + + # Draw bounding boxes around the detected faces and keypoints. + if faces: + for r, score, keypoints in faces[0]: + ml.utils.draw_predictions(img, [r], ["face"], [(0, 0, 255)], format=None) + # keypoints is a ndarray of shape (6, 2) + # 0 - right eye (x, y) + # 1 - left eye (x, y) + # 2 - nose (x, y) + # 3 - mouth (x, y) + # 4 - right ear (x, y) + # 5 - left ear (x, y) + for kp in keypoints.tolist(): + img.draw_circle(int(kp[0]), int(kp[1]), 4, color=(255, 0, 0)) + + print(clock.fps(), "fps") diff --git a/scripts/libraries/ml/ml/postprocessing.py b/scripts/libraries/ml/ml/postprocessing.py index 8254dcdd1..1ce4667a0 100644 --- a/scripts/libraries/ml/ml/postprocessing.py +++ b/scripts/libraries/ml/ml/postprocessing.py @@ -34,6 +34,14 @@ from ulab import numpy as np _NO_DETECTION = const(()) +def logit(x): + return np.log(x / (1.0 - x)) + + +def sigmoid(x): + return 1.0 / (1.0 + np.exp(-x)) + + def mod(a, b): return a - (b * (a // b)) @@ -49,16 +57,16 @@ def threshold(scores, threshold, scale, find_max=False, find_max_axis=1): return np.nonzero(scores < threshold)[0] -def quantize(model, value): - if model.output_dtype[0] == 'f': +def quantize(model, value, index=0): + if model.output_dtype[index] == 'f': return value - return (value / model.output_scale[0]) + model.output_zero_point[0] + return (value / model.output_scale[index]) + model.output_zero_point[index] -def dequantize(model, value): - if model.output_dtype[0] == 'f': +def dequantize(model, value, index=0): + if model.output_dtype[index] == 'f': return value - return (value - model.output_zero_point[0]) * model.output_scale[0] + return (value - float(model.output_zero_point[index])) * model.output_scale[index] class fomo_postprocess: @@ -141,12 +149,6 @@ class yolo_v2_postprocess: def __call__(self, model, inputs, outputs): - def logit(x): - return np.log(x / (1.0 - x)) - - def sigmoid(x): - return 1.0 / (1.0 + np.exp(-x)) - def softmax(x): e_x = np.exp(x - np.max(x, axis=1, keepdims=True)) return e_x / np.sum(e_x, axis=1, keepdims=True) @@ -330,3 +332,94 @@ class yolo_v8_postprocess: nms.add_bounding_box(xmin[i], ymin[i], xmax[i], ymax[i], bb_scores[i], bb_classes[i]) return nms.get_bounding_boxes(threshold=self.nms_threshold, sigma=self.nms_sigma) + + +class mediapipe_face_detection_postprocess: + _BLAZEFACE_CX = const(0) + _BLAZEFACE_CY = const(1) + _BLAZEFACE_CW = const(2) + _BLAZEFACE_CH = const(3) + _BLAZEFACE_KP = const(4) + + def __init__(self, threshold=0.6, anchors=None, nms_threshold=0.1, nms_sigma=0.1): + self.threshold = threshold + self.anchors = anchors + + if self.anchors is None: + self.anchors = np.empty((896, 2)) + idx = 0 + + # Generate anchors for 16x16 grid with 2 duplicates and + # 8x8 grid with 6 duplicates to match the model output size. + for grid_size, scales in [(16, 2), (8, 6)]: + for gy in range(grid_size): + cy = (gy + 0.5) / grid_size + for gx in range(grid_size): + cx = (gx + 0.5) / grid_size + for _ in range(scales): + self.anchors[idx, 0] = cx + self.anchors[idx, 1] = cy + idx += 1 + + self.nms_threshold = nms_threshold + self.nms_sigma = nms_sigma + + def blazeface_post_process(self, ih, iw, nms, model, inputs, outputs, score_idx, cords_idx, t, anchors): + s_oh, s_ow, s_oc = model.output_shape[score_idx] + scale = model.output_scale[score_idx] + t = quantize(model, logit(t), index=score_idx) + + # Threshold all the scores + score_row_outputs = outputs[score_idx].reshape((s_oh * s_ow * s_oc)) + score_indices = threshold(score_row_outputs, t, scale) + if not len(score_indices): + return _NO_DETECTION + + # Get the score information + bb_scores = np.take(score_row_outputs, score_indices, axis=0) + bb_scores = sigmoid(dequantize(model, bb_scores, index=score_idx)) + + # Get the bounding boxes that have a valid score + c_oh, c_ow, c_oc = model.output_shape[cords_idx] + cords_row_outputs = outputs[cords_idx].reshape((c_oh * c_ow, c_oc)) + bb = dequantize(model, np.take(cords_row_outputs, score_indices, axis=0), index=cords_idx) + + # Get the anchor box information + bb_a_array = np.take(anchors, score_indices, axis=0) + + # Compute the bounding box information + ax = bb_a_array[:, _BLAZEFACE_CX] + ay = bb_a_array[:, _BLAZEFACE_CY] + x_center = bb[:, _BLAZEFACE_CX] / iw + ax + y_center = bb[:, _BLAZEFACE_CY] / ih + ay + w_rel = bb[:, _BLAZEFACE_CW] / iw * 0.5 + h_rel = bb[:, _BLAZEFACE_CH] / ih * 0.5 + + # Get the keypoint information + row_count = bb.shape[0] + keypoints = np.empty((row_count, (c_oc - _BLAZEFACE_KP) // 2, 2)) + keypoints[:, :, 0] = (bb[:, _BLAZEFACE_KP::2] / iw + ax.reshape((row_count, 1))) * iw + keypoints[:, :, 1] = (bb[:, _BLAZEFACE_KP + 1::2] / ih + ay.reshape((row_count, 1))) * ih + + # Scale the bounding boxes to have enough integer precision for NMS + xmin = (x_center - w_rel) * iw + ymin = (y_center - h_rel) * ih + xmax = (x_center + w_rel) * iw + ymax = (y_center + h_rel) * ih + + for i in range(bb.shape[0]): + nms.add_bounding_box(xmin[i], ymin[i], xmax[i], ymax[i], bb_scores[i], 0, keypoints=keypoints[i]) + + def __call__(self, model, inputs, outputs): + ib, ih, iw, ic = model.input_shape[0] + nms = NMS(iw, ih, inputs[0].roi) + output_len = outputs[0].shape[1] + + self.blazeface_post_process(ih, iw, nms, model, inputs, outputs, 1, 0, + self.threshold, self.anchors[:output_len]) + + if output_len < len(self.anchors): + self.blazeface_post_process(ih, iw, nms, model, inputs, outputs, 2, 3, + self.threshold, self.anchors[output_len:]) + + return nms.get_bounding_boxes(threshold=self.nms_threshold, sigma=self.nms_sigma) diff --git a/scripts/libraries/ml/ml/preprocessing.py b/scripts/libraries/ml/ml/preprocessing.py index 012349e7a..5ecafd03a 100644 --- a/scripts/libraries/ml/ml/preprocessing.py +++ b/scripts/libraries/ml/ml/preprocessing.py @@ -93,4 +93,6 @@ class Normalization: fadd = (fadd - np.array(self.mean)) / np.array(self.stdev) fscale = fscale / np.array(self.stdev) - array = (array * fscale) + fadd + # Apply normalization in-place (must be done in two steps for ulab). + array *= fscale + array += fadd diff --git a/scripts/libraries/ml/ml/utils.py b/scripts/libraries/ml/ml/utils.py index 47b568498..461b8d6bf 100644 --- a/scripts/libraries/ml/ml/utils.py +++ b/scripts/libraries/ml/ml/utils.py @@ -43,7 +43,7 @@ class NMS: raise ValueError("Invalid ROI dimensions!") self.boxes = [] - def add_bounding_box(self, xmin, ymin, xmax, ymax, score, label_index): + def add_bounding_box(self, xmin, ymin, xmax, ymax, score, label_index, keypoints=None): if score >= 0.0 and score <= 1.0: xmin = max(0.0, min(xmin, self.window_w)) ymin = max(0.0, min(ymin, self.window_h)) @@ -52,7 +52,7 @@ class NMS: w = int(xmax - xmin) h = int(ymax - ymin) if w > 0 and h > 0: - self.boxes.append([int(xmin), int(ymin), w, h, score, label_index]) + self.boxes.append([int(xmin), int(ymin), w, h, score, label_index, keypoints]) def get_bounding_boxes(self, threshold=0.1, sigma=0.1): sorted_boxes = sorted(self.boxes, key=lambda x: x[4], reverse=True) @@ -107,15 +107,22 @@ class NMS: output_boxes[i][1] = int((output_boxes[i][1] * scale) + y_offset) output_boxes[i][2] = int(output_boxes[i][2] * scale) output_boxes[i][3] = int(output_boxes[i][3] * scale) + keypoints = output_boxes[i][6] + if keypoints is not None: + keypoints *= scale + keypoints[:, 0] += x_offset + keypoints[:, 1] += y_offset # Create a list per class with (rect, score) tuples. output_list = [[] for i in range(max_label_index + 1)] for i in range(len(output_boxes)): - output_list[output_boxes[i][5]].append( - (output_boxes[i][0:4], output_boxes[i][4]) - ) + rect_score = [output_boxes[i][:4], output_boxes[i][4]] + keypoints = output_boxes[i][6] + if keypoints is not None: + rect_score.append(keypoints) + output_list[output_boxes[i][5]].append(tuple(rect_score)) return output_list