Merge pull request #2886 from kwagyeman/kwabena/fix_blazeface

scripts/examples: Fix blazeface and fomo examples.
This commit is contained in:
Ibrahim Abdelkader 2025-10-22 16:34:18 +03:00 committed by GitHub
commit 7a46d0c82e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 16 additions and 37 deletions

View File

@ -2,12 +2,12 @@
# Copyright (c) 2013-2025 OpenMV LLC. All rights reserved.
# https://github.com/openmv/openmv/blob/master/LICENSE
#
# This example shows off Google's MediaPipe BlazeFace face detection model.
# This example shows off Google's MediaPipe Face Detection model.
import csi
import time
import ml
from ml.postprocessing import mediapipe_face_detection_postprocess
from ml.postprocessing.mediapipe import BlazeFace
# Initialize the sensor.
csi0 = csi.CSI()
@ -17,25 +17,22 @@ csi0.framesize(csi.VGA)
csi0.window((400, 400))
# Load built-in face detection model
model = ml.Model("/rom/blazeface_front_128.tflite")
model = ml.Model("/rom/blazeface_front_128.tflite", postprocess=BlazeFace(threshold=0.4))
print(model)
# Create the face detection post-processor. This post-processor dynamically
# generates anchors for the model input size which should only be done once.
face_detection_postprocess = mediapipe_face_detection_postprocess(threshold=0.6)
clock = time.clock()
while True:
clock.tick()
img = csi0.snapshot()
# faces is a list of ((x, y, w, h), score, keypoints) tuples
faces = model.predict([img], callback=face_detection_postprocess)
faces = model.predict([img])
# Draw bounding boxes around the detected faces and keypoints.
if faces:
for r, score, keypoints in faces[0]:
ml.utils.draw_predictions(img, [r], ["face"], [(0, 0, 255)], format=None)
ml.utils.draw_predictions(img, [r], ("face",), ((0, 0, 255),), format=None)
# keypoints is a ndarray of shape (6, 2)
# 0 - right eye (x, y)
# 1 - left eye (x, y)
@ -43,7 +40,6 @@ while True:
# 3 - mouth (x, y)
# 4 - right ear (x, y)
# 5 - left ear (x, y)
for kp in keypoints.tolist():
img.draw_circle(int(kp[0]), int(kp[1]), 4, color=(255, 0, 0))
ml.utils.draw_keypoints(img, keypoints, color=(255, 0, 0))
print(clock.fps(), "fps")

View File

@ -25,12 +25,6 @@ print(model)
# Line connections between hand joints for drawing the hand skeleton.
palm_lines = ((0, 1), (1, 2), (2, 3), (3, 4), (4, 0), (0, 5), (5, 6))
# Visualization parameters.
palm_labels = ["palm"]
palm_colors = [(0, 0, 255)]
kp_color = (255, 0, 0)
line_color = (0, 255, 0)
clock = time.clock()
while True:
clock.tick()
@ -42,7 +36,7 @@ while True:
# Draw bounding boxes around the detected palms and keypoints.
if palms:
for r, score, keypoints in palms[0]:
ml.utils.draw_predictions(img, [r], palm_labels, palm_colors, format=None)
ml.utils.draw_predictions(img, [r], ("palm",), ((0, 0, 255),), format=None)
# keypoints is a ndarray of shape (7, 2)
# 0 - wrist (x, y)
@ -55,6 +49,6 @@ while True:
#
# mcp = Metacarpophalangeal Joint - the knuckle
# cmc = Carpometacarpal Joint - the base of the thumb
ml.utils.draw_skeleton(img, keypoints, palm_lines, kp_color=kp_color, line_color=line_color)
ml.utils.draw_skeleton(img, keypoints, palm_lines, kp_color=(255, 0, 0), line_color=(0, 255, 0))
print(clock.fps(), "fps")

View File

@ -33,11 +33,6 @@ hand_lines = ((0, 1), (1, 2), (2, 3), (3, 4), (0, 5), (5, 6), (6, 7), (7, 8),
(5, 9), (9, 10), (10, 11), (11, 12), (9, 13), (13, 14), (14, 15), (15, 16),
(13, 17), (17, 18), (18, 19), (19, 20), (0, 17))
# Visualization parameters.
palm_colors = [(0, 0, 255)]
kp_color = (255, 0, 0)
line_color = (0, 255, 0)
clock = time.clock()
while True:
clock.tick()
@ -61,7 +56,7 @@ while True:
# Draw bounding boxes around the detected hands and keypoints.
for i, detections in enumerate(hands):
for r, score, keypoints in detections:
ml.utils.draw_predictions(img, [r], ["right" if i else "left"], palm_colors, format=None)
ml.utils.draw_predictions(img, [r], ("right",) if i else ("left",), ((0, 0, 255),), format=None)
# keypoints: ndarray (21, 3) of hand joints (x, y, z)
# Indices follow MediaPipe convention:
@ -72,6 +67,6 @@ while True:
# Ring: 13 mcp, 14 pip, 15 dip, 16 tip
# Pinky: 17 mcp, 18 pip, 19 dip, 20 tip
# (cmc=base, mcp=knuckle, pip=mid, dip=distal, ip=thumb joint, tip=fingertip)
ml.utils.draw_skeleton(img, keypoints, hand_lines, kp_color=kp_color, line_color=line_color)
ml.utils.draw_skeleton(img, keypoints, hand_lines, kp_color=(255, 0, 0), line_color=(0, 255, 0))
print(clock.fps(), "fps")

View File

@ -33,11 +33,6 @@ hand_lines = ((0, 1), (1, 2), (2, 3), (3, 4), (0, 5), (5, 6), (6, 7), (7, 8),
(5, 9), (9, 10), (10, 11), (11, 12), (9, 13), (13, 14), (14, 15), (15, 16),
(13, 17), (17, 18), (18, 19), (19, 20), (0, 17))
# Visualization parameters.
palm_colors = [(0, 0, 255)]
kp_color = (255, 0, 0)
line_color = (0, 255, 0)
# Tracking vars.
n = None
@ -71,7 +66,7 @@ while True:
# Draw bounding boxes around the detected hands and keypoints.
for i, detections in enumerate(hands):
for r, score, keypoints in detections:
ml.utils.draw_predictions(img, [r], ["right" if i else "left"], [(0, 0, 255)], format=None)
ml.utils.draw_predictions(img, [r], ("right",) if i else ("left",), ((0, 0, 255),), format=None)
# keypoints: ndarray (21, 3) of hand joints (x, y, z)
# Indices follow MediaPipe convention:
@ -82,7 +77,7 @@ while True:
# Ring: 13 mcp, 14 pip, 15 dip, 16 tip
# Pinky: 17 mcp, 18 pip, 19 dip, 20 tip
# (cmc=base, mcp=knuckle, pip=mid, dip=distal, ip=thumb joint, tip=fingertip)
ml.utils.draw_skeleton(img, keypoints, hand_lines, kp_color=kp_color, line_color=line_color)
ml.utils.draw_skeleton(img, keypoints, hand_lines, kp_color=(255, 0, 0), line_color=(0, 255, 0))
# Center new_wider_rect on hand for tracking
new_wider_rect = (r[0] + (r[2] // 2) - (wider_rect[2] // 2),

View File

@ -9,7 +9,7 @@
import sensor
import time
import ml
from ml.postprocessing import fomo_postprocess
from ml.postprocessing.edgeimpulse import Fomo
import math
sensor.reset() # Reset and initialize the sensor.
@ -19,7 +19,7 @@ sensor.set_windowing((240, 240)) # Set 240x240 window.
sensor.skip_frames(time=2000) # Let the camera adjust.
# Load built-in FOMO face detection model
model = ml.Model("/rom/fomo_face_detection.tflite")
model = ml.Model("/rom/fomo_face_detection.tflite", postprocess=Fomo(threshold=0.4))
print(model)
# Alternatively, models can be loaded from the filesystem storage.
@ -39,10 +39,9 @@ colors = [ # Add more colors if you are detecting more than 7 types of classes
clock = time.clock()
while True:
clock.tick()
img = sensor.snapshot()
for i, detection_list in enumerate(model.predict([img], callback=fomo_postprocess())):
for i, detection_list in enumerate(model.predict([img])):
if i == 0:
continue # background class
if len(detection_list) == 0: