diff --git a/boards/OPENMV_AE3/romfs.json b/boards/OPENMV_AE3/romfs.json index a337b0458..d3d600740 100644 --- a/boards/OPENMV_AE3/romfs.json +++ b/boards/OPENMV_AE3/romfs.json @@ -44,6 +44,12 @@ "alignment": 16, "optimize": "Performance" }, + { + "type": "tflite", + "path": "{TOP}/lib/models/face_landmarks_192.tflite", + "alignment": 16, + "optimize": "Performance" + }, { "type": "tflite", "path": "{TOP}/lib/models/palm_detection_full_192.tflite", diff --git a/boards/OPENMV_N6/romfs.json b/boards/OPENMV_N6/romfs.json index 3b65e9abe..ab4684b6e 100644 --- a/boards/OPENMV_N6/romfs.json +++ b/boards/OPENMV_N6/romfs.json @@ -26,6 +26,12 @@ "alignment": 32, "profile": "default" }, + { + "type": "tflite", + "path": "{TOP}/lib/models/face_landmarks_192.tflite", + "alignment": 32, + "profile": "default" + }, { "type": "tflite", "path": "{TOP}/lib/models/palm_detection_full_192.tflite", diff --git a/lib/models/face_landmarks_192.tflite b/lib/models/face_landmarks_192.tflite new file mode 100644 index 000000000..0d5c19b06 Binary files /dev/null and b/lib/models/face_landmarks_192.tflite differ diff --git a/scripts/examples/03-Machine-Learning/00-TensorFlow/blazeface_detector.py b/scripts/examples/03-Machine-Learning/00-TensorFlow/blazeface_detector.py index 6efe47b9c..b6ebd7426 100644 --- a/scripts/examples/03-Machine-Learning/00-TensorFlow/blazeface_detector.py +++ b/scripts/examples/03-Machine-Learning/00-TensorFlow/blazeface_detector.py @@ -26,20 +26,16 @@ while True: img = csi0.snapshot() # faces is a list of ((x, y, w, h), score, keypoints) tuples - faces = model.predict([img]) + for r, score, keypoints in model.predict([img]): + ml.utils.draw_predictions(img, [r], ("face",), ((0, 0, 255),), format=None) - # Draw bounding boxes around the detected faces and keypoints. - if faces: - for r, score, keypoints in faces[0]: - ml.utils.draw_predictions(img, [r], ("face",), ((0, 0, 255),), format=None) - - # keypoints is a ndarray of shape (6, 2) - # 0 - right eye (x, y) - # 1 - left eye (x, y) - # 2 - nose (x, y) - # 3 - mouth (x, y) - # 4 - right ear (x, y) - # 5 - left ear (x, y) - ml.utils.draw_keypoints(img, keypoints, color=(255, 0, 0)) + # keypoints is a ndarray of shape (6, 2) + # 0 - right eye (x, y) + # 1 - left eye (x, y) + # 2 - nose (x, y) + # 3 - mouth (x, y) + # 4 - right ear (x, y) + # 5 - left ear (x, y) + ml.utils.draw_keypoints(img, keypoints, color=(255, 0, 0)) print(clock.fps(), "fps") diff --git a/scripts/examples/03-Machine-Learning/00-TensorFlow/blazepalm_detection.py b/scripts/examples/03-Machine-Learning/00-TensorFlow/blazepalm_detection.py index d1dabd998..6bea64b7d 100644 --- a/scripts/examples/03-Machine-Learning/00-TensorFlow/blazepalm_detection.py +++ b/scripts/examples/03-Machine-Learning/00-TensorFlow/blazepalm_detection.py @@ -31,24 +31,20 @@ while True: img = csi0.snapshot() # palms is a list of ((x, y, w, h), score, keypoints) tuples - palms = model.predict([img]) + for r, score, keypoints in model.predict([img]): + ml.utils.draw_predictions(img, [r], ("palm",), ((0, 0, 255),), format=None) - # Draw bounding boxes around the detected palms and keypoints. - if palms: - for r, score, keypoints in palms[0]: - ml.utils.draw_predictions(img, [r], ("palm",), ((0, 0, 255),), format=None) - - # keypoints is a ndarray of shape (7, 2) - # 0 - wrist (x, y) - # 1 - index finger mcp (x, y) - # 2 - middle finger mcp (x, y) - # 3 - ring finger mcp (x, y) - # 4 - pinky mcp (x, y) - # 5 - thumb cmc (x, y) - # 6 - thumb mcp (x, y) - # - # mcp = Metacarpophalangeal Joint - the knuckle - # cmc = Carpometacarpal Joint - the base of the thumb - ml.utils.draw_skeleton(img, keypoints, palm_lines, kp_color=(255, 0, 0), line_color=(0, 255, 0)) + # keypoints is a ndarray of shape (7, 2) + # 0 - wrist (x, y) + # 1 - index finger mcp (x, y) + # 2 - middle finger mcp (x, y) + # 3 - ring finger mcp (x, y) + # 4 - pinky mcp (x, y) + # 5 - thumb cmc (x, y) + # 6 - thumb mcp (x, y) + # + # mcp = Metacarpophalangeal Joint - the knuckle + # cmc = Carpometacarpal Joint - the base of the thumb + ml.utils.draw_skeleton(img, keypoints, palm_lines, kp_color=(255, 0, 0), line_color=(0, 255, 0)) print(clock.fps(), "fps") diff --git a/scripts/examples/03-Machine-Learning/00-TensorFlow/face_landmarks_multi_face.py b/scripts/examples/03-Machine-Learning/00-TensorFlow/face_landmarks_multi_face.py new file mode 100644 index 000000000..f598b7d78 --- /dev/null +++ b/scripts/examples/03-Machine-Learning/00-TensorFlow/face_landmarks_multi_face.py @@ -0,0 +1,50 @@ +# This work is licensed under the MIT license. +# Copyright (c) 2013-2025 OpenMV LLC. All rights reserved. +# https://github.com/openmv/openmv/blob/master/LICENSE +# +# This example shows off Google's MediaPipe Face Landmark Detection model for multiple faces. +# +# NOTE: This exaxmple requires an OpenMV Cam with an NPU like the AE3 or N6 to run real-time. + +import csi +import time +import ml +from ml.preprocessing import Normalization +from ml.postprocessing.mediapipe import BlazeFace +from ml.postprocessing.mediapipe import FaceLandmarks + +# Initialize the sensor. +csi0 = csi.CSI() +csi0.reset() +csi0.pixformat(csi.RGB565) +csi0.framesize(csi.VGA) +csi0.window((400, 400)) + +# Load built-in face detection model +face_detection = ml.Model("/rom/blazeface_front_128.tflite", postprocess=BlazeFace(threshold=0.4)) +print(face_detection) + +# Load built-in face landmark model +face_landmarks = ml.Model("/rom/face_landmarks_192.tflite", postprocess=FaceLandmarks(threshold=0.4)) +print(face_landmarks) + +clock = time.clock() +while True: + clock.tick() + img = csi0.snapshot() + + # faces is a list of ((x, y, w, h), score, keypoints) tuples + for r, score, keypoints in face_detection.predict([img]): + # rect is (x, y, w, h) - enlarge by 2x for face landmarks model + wider_rect = (r[0] - r[2] // 2, r[1] - r[3] // 2, r[2] * 2, r[3] * 2) + # Operate on just the ROI of the detected face + n = Normalization(roi=wider_rect) + + # marks is a list of ((x, y, w, h), score, keypoints) tuples + for r, score, keypoints in face_landmarks.predict([n(img)]): + ml.utils.draw_predictions(img, [r], ("face",), ((0, 0, 255),), format=None) + + # keypoints is a ndarray of shape (468, 3) where each keypoint is (x, y, z) + ml.utils.draw_keypoints(img, keypoints, radius=0, color=(255, 0, 0)) + + print(clock.fps(), "fps") diff --git a/scripts/examples/03-Machine-Learning/00-TensorFlow/face_landmarks_single_face.py b/scripts/examples/03-Machine-Learning/00-TensorFlow/face_landmarks_single_face.py new file mode 100644 index 000000000..0246086bf --- /dev/null +++ b/scripts/examples/03-Machine-Learning/00-TensorFlow/face_landmarks_single_face.py @@ -0,0 +1,71 @@ +# This work is licensed under the MIT license. +# Copyright (c) 2013-2025 OpenMV LLC. All rights reserved. +# https://github.com/openmv/openmv/blob/master/LICENSE +# +# This example shows off Google's MediaPipe Face Landmark Detection model for a single face. +# +# NOTE: This exaxmple requires an OpenMV Cam with an NPU like the AE3 or N6 to run real-time. + +import csi +import time +import ml +from ml.preprocessing import Normalization +from ml.postprocessing.mediapipe import BlazeFace +from ml.postprocessing.mediapipe import FaceLandmarks + +# Initialize the sensor. +csi0 = csi.CSI() +csi0.reset() +csi0.pixformat(csi.RGB565) +csi0.framesize(csi.VGA) +csi0.window((400, 400)) + +# Load built-in face detection model +face_detection = ml.Model("/rom/blazeface_front_128.tflite", postprocess=BlazeFace(threshold=0.4)) +print(face_detection) + +# Load built-in face landmark model +face_landmarks = ml.Model("/rom/face_landmarks_192.tflite", postprocess=FaceLandmarks(threshold=0.4)) +print(face_landmarks) + +# Tracking vars. +n = None + +clock = time.clock() +while True: + clock.tick() + img = csi0.snapshot() + + if n is None: + # faces is a list of ((x, y, w, h), score, keypoints) tuples + for r, score, keypoints in face_detection.predict([img]): + # rect is (x, y, w, h) - enlarge by 2x for face landmarks model + wider_rect = (r[0] - r[2] // 2, r[1] - r[3] // 2, r[2] * 2, r[3] * 2) + # Operate on just the ROI of the detected face + n = Normalization(roi=wider_rect) + + else: + # marks is a list of ((x, y, w, h), score, keypoints) tuples + marks = face_landmarks.predict([n(img)]) + + # No faces detected, reset the tracker. + if not marks: + n = None + continue + + # Draw bounding boxes around the detected faces and keypoints. + for r, score, keypoints in marks: + ml.utils.draw_predictions(img, [r], ("face",), ((0, 0, 255),), format=None) + + # keypoints is a ndarray of shape (468, 3) where each keypoint is (x, y, z) + ml.utils.draw_keypoints(img, keypoints, radius=0, color=(255, 0, 0)) + + # Center new_wider_rect on face for tracking + new_wider_rect = (r[0] + (r[2] // 2) - (wider_rect[2] // 2), + r[1] + (r[3] // 2) - (wider_rect[3] // 2), + wider_rect[2], + wider_rect[3]) + # Operate on just the ROI of the detected face + n = Normalization(roi=new_wider_rect) + + print(clock.fps(), "fps") diff --git a/scripts/examples/03-Machine-Learning/00-TensorFlow/hand_landmarks_multi_hand.py b/scripts/examples/03-Machine-Learning/00-TensorFlow/hand_landmarks_multi_hand.py index bfca10514..1e0379003 100644 --- a/scripts/examples/03-Machine-Learning/00-TensorFlow/hand_landmarks_multi_hand.py +++ b/scripts/examples/03-Machine-Learning/00-TensorFlow/hand_landmarks_multi_hand.py @@ -39,34 +39,31 @@ while True: img = csi0.snapshot() # palms is a list of ((x, y, w, h), score, keypoints) tuples - palms = palm_detection.predict([img]) + for r, score, keypoints in palm_detection.predict([img]): + # rect is (x, y, w, h) - enlarge by 3x for hand landmarks model + wider_rect = (r[0] - r[2], r[1] - r[3], r[2] * 3, r[3] * 3) + # Operate on just the ROI of the detected palm + n = Normalization(roi=wider_rect) - if palms: - for r, score, keypoints in palms[0]: - # rect is (x, y, w, h) - enlarge by 3x for hand landmarks model - wider_rect = (r[0] - r[2], r[1] - r[3], r[2] * 3, r[3] * 3) - # Operate on just the ROI of the detected palm - n = Normalization(roi=wider_rect) + # hands is a list of ((x, y, w, h), score, keypoints) tuples + # index 0 (if present) is left hand + # index 1 (if present) is right hand + hands = hand_landmarks.predict([n(img)]) - # hands is a list of ((x, y, w, h), score, keypoints) tuples - # index 0 (if present) is left hand - # index 1 (if present) is right hand - hands = hand_landmarks.predict([n(img)]) + # Draw bounding boxes around the detected hands and keypoints. + for i, detections in enumerate(hands): + for r, score, keypoints in detections: + ml.utils.draw_predictions(img, [r], ("right",) if i else ("left",), ((0, 0, 255),), format=None) - # Draw bounding boxes around the detected hands and keypoints. - for i, detections in enumerate(hands): - for r, score, keypoints in detections: - ml.utils.draw_predictions(img, [r], ("right",) if i else ("left",), ((0, 0, 255),), format=None) - - # keypoints: ndarray (21, 3) of hand joints (x, y, z) - # Indices follow MediaPipe convention: - # 0: wrist - # Thumb: 1 cmc, 2 mcp, 3 ip, 4 tip - # Index: 5 mcp, 6 pip, 7 dip, 8 tip - # Middle: 9 mcp, 10 pip, 11 dip, 12 tip - # Ring: 13 mcp, 14 pip, 15 dip, 16 tip - # Pinky: 17 mcp, 18 pip, 19 dip, 20 tip - # (cmc=base, mcp=knuckle, pip=mid, dip=distal, ip=thumb joint, tip=fingertip) - ml.utils.draw_skeleton(img, keypoints, hand_lines, kp_color=(255, 0, 0), line_color=(0, 255, 0)) + # keypoints: ndarray (21, 3) of hand joints (x, y, z) + # Indices follow MediaPipe convention: + # 0: wrist + # Thumb: 1 cmc, 2 mcp, 3 ip, 4 tip + # Index: 5 mcp, 6 pip, 7 dip, 8 tip + # Middle: 9 mcp, 10 pip, 11 dip, 12 tip + # Ring: 13 mcp, 14 pip, 15 dip, 16 tip + # Pinky: 17 mcp, 18 pip, 19 dip, 20 tip + # (cmc=base, mcp=knuckle, pip=mid, dip=distal, ip=thumb joint, tip=fingertip) + ml.utils.draw_skeleton(img, keypoints, hand_lines, kp_color=(255, 0, 0), line_color=(0, 255, 0)) print(clock.fps(), "fps") diff --git a/scripts/examples/03-Machine-Learning/00-TensorFlow/hand_landmarks_single_hand.py b/scripts/examples/03-Machine-Learning/00-TensorFlow/hand_landmarks_single_hand.py index 261f358b0..06e3dec3c 100644 --- a/scripts/examples/03-Machine-Learning/00-TensorFlow/hand_landmarks_single_hand.py +++ b/scripts/examples/03-Machine-Learning/00-TensorFlow/hand_landmarks_single_hand.py @@ -43,14 +43,11 @@ while True: if n is None: # palms is a list of ((x, y, w, h), score, keypoints) tuples - palms = palm_detection.predict([img]) - - if palms: - for r, score, keypoints in palms[0]: - # rect is (x, y, w, h) - enlarge by 3x for hand landmarks model - wider_rect = (r[0] - r[2], r[1] - r[3], r[2] * 3, r[3] * 3) - # Operate on just the ROI of the detected palm - n = Normalization(roi=wider_rect) + for r, score, keypoints in palm_detection.predict([img]): + # rect is (x, y, w, h) - enlarge by 3x for hand landmarks model + wider_rect = (r[0] - r[2], r[1] - r[3], r[2] * 3, r[3] * 3) + # Operate on just the ROI of the detected palm + n = Normalization(roi=wider_rect) else: # hands is a list of ((x, y, w, h), score, keypoints) tuples diff --git a/scripts/libraries/ml/ml-mediapipe/ml/postprocessing/mediapipe.py b/scripts/libraries/ml/ml-mediapipe/ml/postprocessing/mediapipe.py index a547079ef..d6fc433d6 100644 --- a/scripts/libraries/ml/ml-mediapipe/ml/postprocessing/mediapipe.py +++ b/scripts/libraries/ml/ml-mediapipe/ml/postprocessing/mediapipe.py @@ -78,7 +78,7 @@ class mediapipe_detection_postprocess: self.detection_post_process(ih, iw, nms, model, inputs, outputs, self.scores[1], self.cords[1], self.threshold, self.anchors[output_len:]) - return nms.get_bounding_boxes(threshold=self.nms_threshold, sigma=self.nms_sigma) + return nms.get_bounding_boxes(threshold=self.nms_threshold, sigma=self.nms_sigma)[0] def detection_post_process(self, ih, iw, nms, model, inputs, outputs, score_idx, cords_idx, t, anchors): s_oh, s_ow, s_oc = model.output_shape[score_idx] @@ -173,3 +173,35 @@ class HandLandmarks: nms.add_bounding_box(xmin, ymin, xmax, ymax, score, left_right, keypoints=keypoints) return nms.get_bounding_boxes(threshold=self.nms_threshold, sigma=self.nms_sigma) + + +class FaceLandmarks: + def __init__(self, threshold=0.6, nms_threshold=0.1, nms_sigma=0.1): + self.threshold = threshold + self.nms_threshold = nms_threshold + self.nms_sigma = nms_sigma + + def __call__(self, model, inputs, outputs): + ib, ih, iw, ic = model.input_shape[0] + nms = NMS(iw, ih, inputs[0].roi) + + score = sigmoid(outputs[1][0, 0, 0, 0]) + if score < self.threshold: + return _NO_DETECTION + + cords = outputs[0][0, 0, 0, :] + + # Get the keypoint information + keypoints = np.empty((len(cords) // 3, 3)) + keypoints[:, 0] = cords[0::3] + keypoints[:, 1] = cords[1::3] + keypoints[:, 2] = cords[2::3] + + # Get bounding box information + xmin = np.min(keypoints[:, 0]) + ymin = np.min(keypoints[:, 1]) + xmax = np.max(keypoints[:, 0]) + ymax = np.max(keypoints[:, 1]) + + nms.add_bounding_box(xmin, ymin, xmax, ymax, score, 0, keypoints=keypoints) + return nms.get_bounding_boxes(threshold=self.nms_threshold, sigma=self.nms_sigma)[0]