mirror of
https://github.com/openmv/openmv.git
synced 2025-11-04 14:49:50 +08:00
Compare commits
10 Commits
5c28d513cd
...
ef3341add2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ef3341add2 | ||
|
|
d038dc7ab8 | ||
|
|
7a46d0c82e | ||
|
|
b897ec4f16 | ||
|
|
ce18e680b2 | ||
|
|
74faef3a8e | ||
|
|
781a7bf86f | ||
|
|
d2d1a9448f | ||
|
|
c4b0b5a3dc | ||
|
|
b214c54c07 |
@ -14,12 +14,6 @@
|
|||||||
"alignment": 16,
|
"alignment": 16,
|
||||||
"optimize": "Performance"
|
"optimize": "Performance"
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"type": "tflite",
|
|
||||||
"path": "{TOP}/lib/models/yolo_v5_224_nano.tflite",
|
|
||||||
"alignment": 16,
|
|
||||||
"optimize": "Performance"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"type": "tflite",
|
"type": "tflite",
|
||||||
"path": "{TOP}/lib/models/force_int_quant.tflite",
|
"path": "{TOP}/lib/models/force_int_quant.tflite",
|
||||||
|
|||||||
@ -14,12 +14,6 @@
|
|||||||
"alignment": 16,
|
"alignment": 16,
|
||||||
"optimize": "Performance"
|
"optimize": "Performance"
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"type": "tflite",
|
|
||||||
"path": "{TOP}/lib/models/yolo_v5_224_nano.tflite",
|
|
||||||
"alignment": 16,
|
|
||||||
"optimize": "Performance"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"type": "tflite",
|
"type": "tflite",
|
||||||
"path": "{TOP}/lib/models/force_int_quant.tflite",
|
"path": "{TOP}/lib/models/force_int_quant.tflite",
|
||||||
|
|||||||
@ -14,12 +14,6 @@
|
|||||||
"alignment": 16,
|
"alignment": 16,
|
||||||
"optimize": "Performance"
|
"optimize": "Performance"
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"type": "tflite",
|
|
||||||
"path": "{TOP}/lib/models/yolo_v5_224_nano.tflite",
|
|
||||||
"alignment": 16,
|
|
||||||
"optimize": "Performance"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"type": "tflite",
|
"type": "tflite",
|
||||||
"path": "{TOP}/lib/models/force_int_quant.tflite",
|
"path": "{TOP}/lib/models/force_int_quant.tflite",
|
||||||
|
|||||||
@ -14,12 +14,6 @@
|
|||||||
"alignment": 16,
|
"alignment": 16,
|
||||||
"optimize": "Performance"
|
"optimize": "Performance"
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"type": "tflite",
|
|
||||||
"path": "{TOP}/lib/models/yolo_v5_224_nano.tflite",
|
|
||||||
"alignment": 16,
|
|
||||||
"optimize": "Performance"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"type": "tflite",
|
"type": "tflite",
|
||||||
"path": "{TOP}/lib/models/force_int_quant.tflite",
|
"path": "{TOP}/lib/models/force_int_quant.tflite",
|
||||||
|
|||||||
@ -26,12 +26,6 @@
|
|||||||
"alignment": 16,
|
"alignment": 16,
|
||||||
"optimize": "Performance"
|
"optimize": "Performance"
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"type": "tflite",
|
|
||||||
"path": "{TOP}/lib/models/yolo_v2_224_small.tflite",
|
|
||||||
"alignment": 16,
|
|
||||||
"optimize": "Performance"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"type": "tflite",
|
"type": "tflite",
|
||||||
"path": "{TOP}/lib/models/force_int_quant.tflite",
|
"path": "{TOP}/lib/models/force_int_quant.tflite",
|
||||||
|
|||||||
@ -14,18 +14,6 @@
|
|||||||
"alignment": 32,
|
"alignment": 32,
|
||||||
"profile": "default"
|
"profile": "default"
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"type": "tflite",
|
|
||||||
"path": "{TOP}/lib/models/yolo_v2_224_small.tflite",
|
|
||||||
"alignment": 32,
|
|
||||||
"profile": "default"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "tflite",
|
|
||||||
"path": "{TOP}/lib/models/yolo_v5_224_nano.tflite",
|
|
||||||
"alignment": 32,
|
|
||||||
"profile": "default"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"type": "tflite",
|
"type": "tflite",
|
||||||
"path": "{TOP}/lib/models/blazeface_front_128.tflite",
|
"path": "{TOP}/lib/models/blazeface_front_128.tflite",
|
||||||
|
|||||||
@ -14,12 +14,6 @@
|
|||||||
"alignment": 16,
|
"alignment": 16,
|
||||||
"optimize": "Performance"
|
"optimize": "Performance"
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"type": "tflite",
|
|
||||||
"path": "{TOP}/lib/models/yolo_v5_224_nano.tflite",
|
|
||||||
"alignment": 16,
|
|
||||||
"optimize": "Performance"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"type": "tflite",
|
"type": "tflite",
|
||||||
"path": "{TOP}/lib/models/force_int_quant.tflite",
|
"path": "{TOP}/lib/models/force_int_quant.tflite",
|
||||||
|
|||||||
@ -29,10 +29,8 @@ ENV MAKE_URL="https://ftp.gnu.org/gnu/make/make-4.4.1.tar.gz"
|
|||||||
RUN wget --no-check-certificate --user-agent="Mozilla/5.0" -O - ${MAKE_URL} | tar --strip-components=1 -xz -C /workspace/make
|
RUN wget --no-check-certificate --user-agent="Mozilla/5.0" -O - ${MAKE_URL} | tar --strip-components=1 -xz -C /workspace/make
|
||||||
RUN cd /workspace/make && ./configure && make -j$(nproc)
|
RUN cd /workspace/make && ./configure && make -j$(nproc)
|
||||||
|
|
||||||
# Set up directories
|
# Permissive git permissions inside container
|
||||||
WORKDIR /workspace
|
ENV HOME=/tmp
|
||||||
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
RUN git config --global --add safe.directory '*'
|
RUN git config --global --add safe.directory '*'
|
||||||
|
|
||||||
ENV PATH="/workspace/gcc/bin:/workspace/llvm/bin:/workspace/cmake/bin:/workspace/make:$PATH"
|
ENV PATH="/workspace/gcc/bin:/workspace/llvm/bin:/workspace/cmake/bin:/workspace/make:$PATH"
|
||||||
|
|||||||
@ -3,6 +3,27 @@ DOCKER_TAG = latest
|
|||||||
CONTAINER_NAME = firmware-container
|
CONTAINER_NAME = firmware-container
|
||||||
DOCKERFILE_PATH = Dockerfile
|
DOCKERFILE_PATH = Dockerfile
|
||||||
|
|
||||||
|
# Detect repository structure
|
||||||
|
REPO_ROOT := $(shell git rev-parse --show-toplevel)
|
||||||
|
GIT_DIR := $(shell realpath $$(git rev-parse --git-dir))
|
||||||
|
GIT_COMMON_DIR := $(shell realpath $$(git rev-parse --git-common-dir))
|
||||||
|
|
||||||
|
# Detect worktree: git-dir != git-common-dir
|
||||||
|
IS_WORKTREE := $(shell [ "$(GIT_DIR)" != "$(GIT_COMMON_DIR)" ] && echo "yes" || echo "no")
|
||||||
|
|
||||||
|
# Working directory inside container
|
||||||
|
WORKDIR = $(REPO_ROOT)
|
||||||
|
|
||||||
|
# Volume mounts: always mount repo at actual path
|
||||||
|
ifeq ($(IS_WORKTREE),yes)
|
||||||
|
# For worktrees, also mount the main repo (for .git references)
|
||||||
|
MAIN_REPO_PATH := $(shell dirname $(GIT_COMMON_DIR))
|
||||||
|
VOLUME_MOUNTS = -v $(MAIN_REPO_PATH)/.git:$(MAIN_REPO_PATH)/.git \
|
||||||
|
-v $(REPO_ROOT):$(WORKDIR)
|
||||||
|
else
|
||||||
|
VOLUME_MOUNTS = -v $(REPO_ROOT):$(WORKDIR)
|
||||||
|
endif
|
||||||
|
|
||||||
# Build the Docker image
|
# Build the Docker image
|
||||||
build-image:
|
build-image:
|
||||||
docker build \
|
docker build \
|
||||||
@ -15,7 +36,8 @@ build-firmware: build-image
|
|||||||
-e TARGET=$(TARGET) \
|
-e TARGET=$(TARGET) \
|
||||||
-e HOST_UID=$(shell id -u) \
|
-e HOST_UID=$(shell id -u) \
|
||||||
-e HOST_GID=$(shell id -g) \
|
-e HOST_GID=$(shell id -g) \
|
||||||
-v $(PWD)/build:/workspace/build \
|
-w $(WORKDIR) \
|
||||||
|
$(VOLUME_MOUNTS) \
|
||||||
--name $(CONTAINER_NAME) \
|
--name $(CONTAINER_NAME) \
|
||||||
$(DOCKER_IMAGE_NAME):$(DOCKER_TAG) docker/build.sh
|
$(DOCKER_IMAGE_NAME):$(DOCKER_TAG) docker/build.sh
|
||||||
|
|
||||||
@ -23,7 +45,18 @@ build-firmware: build-image
|
|||||||
shell:
|
shell:
|
||||||
docker run --rm -it \
|
docker run --rm -it \
|
||||||
-e shell="true" \
|
-e shell="true" \
|
||||||
-v $(PWD)/build:/workspace/build \
|
-w $(WORKDIR) \
|
||||||
|
$(VOLUME_MOUNTS) \
|
||||||
$(DOCKER_IMAGE_NAME):$(DOCKER_TAG) bash
|
$(DOCKER_IMAGE_NAME):$(DOCKER_TAG) bash
|
||||||
|
|
||||||
|
# Debug target to show detected paths
|
||||||
|
debug-paths:
|
||||||
|
@echo "IS_WORKTREE: $(IS_WORKTREE)"
|
||||||
|
@echo "REPO_ROOT: $(REPO_ROOT)"
|
||||||
|
@echo "GIT_DIR: $(GIT_DIR)"
|
||||||
|
@echo "GIT_COMMON_DIR: $(GIT_COMMON_DIR)"
|
||||||
|
@echo "MAIN_REPO_PATH: $(MAIN_REPO_PATH)"
|
||||||
|
@echo "WORKDIR: $(WORKDIR)"
|
||||||
|
@echo "VOLUME_MOUNTS: $(VOLUME_MOUNTS)"
|
||||||
|
|
||||||
.DEFAULT_GOAL := build-firmware
|
.DEFAULT_GOAL := build-firmware
|
||||||
|
|||||||
@ -1,7 +1,8 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
set -e -x
|
set -e -x
|
||||||
|
|
||||||
BUILD_DIR=/workspace/build/${TARGET}
|
OPENMV="$(pwd)"
|
||||||
|
BUILD_DIR=${OPENMV}/build/${TARGET}
|
||||||
|
|
||||||
# Update submodules.
|
# Update submodules.
|
||||||
git submodule update --init --depth=1
|
git submodule update --init --depth=1
|
||||||
@ -13,4 +14,4 @@ make -j$(nproc) -C lib/micropython/mpy-cross
|
|||||||
make -j$(nproc) BUILD=${BUILD_DIR} TARGET=${TARGET} LLVM_PATH=/workspace/llvm/bin
|
make -j$(nproc) BUILD=${BUILD_DIR} TARGET=${TARGET} LLVM_PATH=/workspace/llvm/bin
|
||||||
|
|
||||||
# Fix permissions.
|
# Fix permissions.
|
||||||
chown -R ${HOST_UID:-1000}:${HOST_GID:-1000} /workspace/build
|
chown -R ${HOST_UID:-1000}:${HOST_GID:-1000} ${OPENMV}/build
|
||||||
|
|||||||
Binary file not shown.
@ -1,2 +0,0 @@
|
|||||||
background
|
|
||||||
person
|
|
||||||
Binary file not shown.
@ -1,2 +0,0 @@
|
|||||||
background
|
|
||||||
person
|
|
||||||
@ -2,12 +2,12 @@
|
|||||||
# Copyright (c) 2013-2025 OpenMV LLC. All rights reserved.
|
# Copyright (c) 2013-2025 OpenMV LLC. All rights reserved.
|
||||||
# https://github.com/openmv/openmv/blob/master/LICENSE
|
# https://github.com/openmv/openmv/blob/master/LICENSE
|
||||||
#
|
#
|
||||||
# This example shows off Google's MediaPipe BlazeFace face detection model.
|
# This example shows off Google's MediaPipe Face Detection model.
|
||||||
|
|
||||||
import csi
|
import csi
|
||||||
import time
|
import time
|
||||||
import ml
|
import ml
|
||||||
from ml.postprocessing import mediapipe_face_detection_postprocess
|
from ml.postprocessing.mediapipe import BlazeFace
|
||||||
|
|
||||||
# Initialize the sensor.
|
# Initialize the sensor.
|
||||||
csi0 = csi.CSI()
|
csi0 = csi.CSI()
|
||||||
@ -17,25 +17,22 @@ csi0.framesize(csi.VGA)
|
|||||||
csi0.window((400, 400))
|
csi0.window((400, 400))
|
||||||
|
|
||||||
# Load built-in face detection model
|
# Load built-in face detection model
|
||||||
model = ml.Model("/rom/blazeface_front_128.tflite")
|
model = ml.Model("/rom/blazeface_front_128.tflite", postprocess=BlazeFace(threshold=0.4))
|
||||||
print(model)
|
print(model)
|
||||||
|
|
||||||
# Create the face detection post-processor. This post-processor dynamically
|
|
||||||
# generates anchors for the model input size which should only be done once.
|
|
||||||
face_detection_postprocess = mediapipe_face_detection_postprocess(threshold=0.6)
|
|
||||||
|
|
||||||
clock = time.clock()
|
clock = time.clock()
|
||||||
while True:
|
while True:
|
||||||
clock.tick()
|
clock.tick()
|
||||||
img = csi0.snapshot()
|
img = csi0.snapshot()
|
||||||
|
|
||||||
# faces is a list of ((x, y, w, h), score, keypoints) tuples
|
# faces is a list of ((x, y, w, h), score, keypoints) tuples
|
||||||
faces = model.predict([img], callback=face_detection_postprocess)
|
faces = model.predict([img])
|
||||||
|
|
||||||
# Draw bounding boxes around the detected faces and keypoints.
|
# Draw bounding boxes around the detected faces and keypoints.
|
||||||
if faces:
|
if faces:
|
||||||
for r, score, keypoints in faces[0]:
|
for r, score, keypoints in faces[0]:
|
||||||
ml.utils.draw_predictions(img, [r], ["face"], [(0, 0, 255)], format=None)
|
ml.utils.draw_predictions(img, [r], ("face",), ((0, 0, 255),), format=None)
|
||||||
|
|
||||||
# keypoints is a ndarray of shape (6, 2)
|
# keypoints is a ndarray of shape (6, 2)
|
||||||
# 0 - right eye (x, y)
|
# 0 - right eye (x, y)
|
||||||
# 1 - left eye (x, y)
|
# 1 - left eye (x, y)
|
||||||
@ -43,7 +40,6 @@ while True:
|
|||||||
# 3 - mouth (x, y)
|
# 3 - mouth (x, y)
|
||||||
# 4 - right ear (x, y)
|
# 4 - right ear (x, y)
|
||||||
# 5 - left ear (x, y)
|
# 5 - left ear (x, y)
|
||||||
for kp in keypoints.tolist():
|
ml.utils.draw_keypoints(img, keypoints, color=(255, 0, 0))
|
||||||
img.draw_circle(int(kp[0]), int(kp[1]), 4, color=(255, 0, 0))
|
|
||||||
|
|
||||||
print(clock.fps(), "fps")
|
print(clock.fps(), "fps")
|
||||||
@ -25,12 +25,6 @@ print(model)
|
|||||||
# Line connections between hand joints for drawing the hand skeleton.
|
# Line connections between hand joints for drawing the hand skeleton.
|
||||||
palm_lines = ((0, 1), (1, 2), (2, 3), (3, 4), (4, 0), (0, 5), (5, 6))
|
palm_lines = ((0, 1), (1, 2), (2, 3), (3, 4), (4, 0), (0, 5), (5, 6))
|
||||||
|
|
||||||
# Visualization parameters.
|
|
||||||
palm_labels = ["palm"]
|
|
||||||
palm_colors = [(0, 0, 255)]
|
|
||||||
kp_color = (255, 0, 0)
|
|
||||||
line_color = (0, 255, 0)
|
|
||||||
|
|
||||||
clock = time.clock()
|
clock = time.clock()
|
||||||
while True:
|
while True:
|
||||||
clock.tick()
|
clock.tick()
|
||||||
@ -42,7 +36,7 @@ while True:
|
|||||||
# Draw bounding boxes around the detected palms and keypoints.
|
# Draw bounding boxes around the detected palms and keypoints.
|
||||||
if palms:
|
if palms:
|
||||||
for r, score, keypoints in palms[0]:
|
for r, score, keypoints in palms[0]:
|
||||||
ml.utils.draw_predictions(img, [r], palm_labels, palm_colors, format=None)
|
ml.utils.draw_predictions(img, [r], ("palm",), ((0, 0, 255),), format=None)
|
||||||
|
|
||||||
# keypoints is a ndarray of shape (7, 2)
|
# keypoints is a ndarray of shape (7, 2)
|
||||||
# 0 - wrist (x, y)
|
# 0 - wrist (x, y)
|
||||||
@ -55,6 +49,6 @@ while True:
|
|||||||
#
|
#
|
||||||
# mcp = Metacarpophalangeal Joint - the knuckle
|
# mcp = Metacarpophalangeal Joint - the knuckle
|
||||||
# cmc = Carpometacarpal Joint - the base of the thumb
|
# cmc = Carpometacarpal Joint - the base of the thumb
|
||||||
ml.utils.draw_skeleton(img, keypoints, palm_lines, kp_color=kp_color, line_color=line_color)
|
ml.utils.draw_skeleton(img, keypoints, palm_lines, kp_color=(255, 0, 0), line_color=(0, 255, 0))
|
||||||
|
|
||||||
print(clock.fps(), "fps")
|
print(clock.fps(), "fps")
|
||||||
|
|||||||
@ -33,11 +33,6 @@ hand_lines = ((0, 1), (1, 2), (2, 3), (3, 4), (0, 5), (5, 6), (6, 7), (7, 8),
|
|||||||
(5, 9), (9, 10), (10, 11), (11, 12), (9, 13), (13, 14), (14, 15), (15, 16),
|
(5, 9), (9, 10), (10, 11), (11, 12), (9, 13), (13, 14), (14, 15), (15, 16),
|
||||||
(13, 17), (17, 18), (18, 19), (19, 20), (0, 17))
|
(13, 17), (17, 18), (18, 19), (19, 20), (0, 17))
|
||||||
|
|
||||||
# Visualization parameters.
|
|
||||||
palm_colors = [(0, 0, 255)]
|
|
||||||
kp_color = (255, 0, 0)
|
|
||||||
line_color = (0, 255, 0)
|
|
||||||
|
|
||||||
clock = time.clock()
|
clock = time.clock()
|
||||||
while True:
|
while True:
|
||||||
clock.tick()
|
clock.tick()
|
||||||
@ -61,7 +56,7 @@ while True:
|
|||||||
# Draw bounding boxes around the detected hands and keypoints.
|
# Draw bounding boxes around the detected hands and keypoints.
|
||||||
for i, detections in enumerate(hands):
|
for i, detections in enumerate(hands):
|
||||||
for r, score, keypoints in detections:
|
for r, score, keypoints in detections:
|
||||||
ml.utils.draw_predictions(img, [r], ["right" if i else "left"], palm_colors, format=None)
|
ml.utils.draw_predictions(img, [r], ("right",) if i else ("left",), ((0, 0, 255),), format=None)
|
||||||
|
|
||||||
# keypoints: ndarray (21, 3) of hand joints (x, y, z)
|
# keypoints: ndarray (21, 3) of hand joints (x, y, z)
|
||||||
# Indices follow MediaPipe convention:
|
# Indices follow MediaPipe convention:
|
||||||
@ -72,6 +67,6 @@ while True:
|
|||||||
# Ring: 13 mcp, 14 pip, 15 dip, 16 tip
|
# Ring: 13 mcp, 14 pip, 15 dip, 16 tip
|
||||||
# Pinky: 17 mcp, 18 pip, 19 dip, 20 tip
|
# Pinky: 17 mcp, 18 pip, 19 dip, 20 tip
|
||||||
# (cmc=base, mcp=knuckle, pip=mid, dip=distal, ip=thumb joint, tip=fingertip)
|
# (cmc=base, mcp=knuckle, pip=mid, dip=distal, ip=thumb joint, tip=fingertip)
|
||||||
ml.utils.draw_skeleton(img, keypoints, hand_lines, kp_color=kp_color, line_color=line_color)
|
ml.utils.draw_skeleton(img, keypoints, hand_lines, kp_color=(255, 0, 0), line_color=(0, 255, 0))
|
||||||
|
|
||||||
print(clock.fps(), "fps")
|
print(clock.fps(), "fps")
|
||||||
|
|||||||
@ -33,11 +33,6 @@ hand_lines = ((0, 1), (1, 2), (2, 3), (3, 4), (0, 5), (5, 6), (6, 7), (7, 8),
|
|||||||
(5, 9), (9, 10), (10, 11), (11, 12), (9, 13), (13, 14), (14, 15), (15, 16),
|
(5, 9), (9, 10), (10, 11), (11, 12), (9, 13), (13, 14), (14, 15), (15, 16),
|
||||||
(13, 17), (17, 18), (18, 19), (19, 20), (0, 17))
|
(13, 17), (17, 18), (18, 19), (19, 20), (0, 17))
|
||||||
|
|
||||||
# Visualization parameters.
|
|
||||||
palm_colors = [(0, 0, 255)]
|
|
||||||
kp_color = (255, 0, 0)
|
|
||||||
line_color = (0, 255, 0)
|
|
||||||
|
|
||||||
# Tracking vars.
|
# Tracking vars.
|
||||||
n = None
|
n = None
|
||||||
|
|
||||||
@ -71,7 +66,7 @@ while True:
|
|||||||
# Draw bounding boxes around the detected hands and keypoints.
|
# Draw bounding boxes around the detected hands and keypoints.
|
||||||
for i, detections in enumerate(hands):
|
for i, detections in enumerate(hands):
|
||||||
for r, score, keypoints in detections:
|
for r, score, keypoints in detections:
|
||||||
ml.utils.draw_predictions(img, [r], ["right" if i else "left"], [(0, 0, 255)], format=None)
|
ml.utils.draw_predictions(img, [r], ("right",) if i else ("left",), ((0, 0, 255),), format=None)
|
||||||
|
|
||||||
# keypoints: ndarray (21, 3) of hand joints (x, y, z)
|
# keypoints: ndarray (21, 3) of hand joints (x, y, z)
|
||||||
# Indices follow MediaPipe convention:
|
# Indices follow MediaPipe convention:
|
||||||
@ -82,7 +77,7 @@ while True:
|
|||||||
# Ring: 13 mcp, 14 pip, 15 dip, 16 tip
|
# Ring: 13 mcp, 14 pip, 15 dip, 16 tip
|
||||||
# Pinky: 17 mcp, 18 pip, 19 dip, 20 tip
|
# Pinky: 17 mcp, 18 pip, 19 dip, 20 tip
|
||||||
# (cmc=base, mcp=knuckle, pip=mid, dip=distal, ip=thumb joint, tip=fingertip)
|
# (cmc=base, mcp=knuckle, pip=mid, dip=distal, ip=thumb joint, tip=fingertip)
|
||||||
ml.utils.draw_skeleton(img, keypoints, hand_lines, kp_color=kp_color, line_color=line_color)
|
ml.utils.draw_skeleton(img, keypoints, hand_lines, kp_color=(255, 0, 0), line_color=(0, 255, 0))
|
||||||
|
|
||||||
# Center new_wider_rect on hand for tracking
|
# Center new_wider_rect on hand for tracking
|
||||||
new_wider_rect = (r[0] + (r[2] // 2) - (wider_rect[2] // 2),
|
new_wider_rect = (r[0] + (r[2] // 2) - (wider_rect[2] // 2),
|
||||||
|
|||||||
@ -9,7 +9,7 @@
|
|||||||
import sensor
|
import sensor
|
||||||
import time
|
import time
|
||||||
import ml
|
import ml
|
||||||
from ml.postprocessing import fomo_postprocess
|
from ml.postprocessing.edgeimpulse import Fomo
|
||||||
import math
|
import math
|
||||||
|
|
||||||
sensor.reset() # Reset and initialize the sensor.
|
sensor.reset() # Reset and initialize the sensor.
|
||||||
@ -19,7 +19,7 @@ sensor.set_windowing((240, 240)) # Set 240x240 window.
|
|||||||
sensor.skip_frames(time=2000) # Let the camera adjust.
|
sensor.skip_frames(time=2000) # Let the camera adjust.
|
||||||
|
|
||||||
# Load built-in FOMO face detection model
|
# Load built-in FOMO face detection model
|
||||||
model = ml.Model("/rom/fomo_face_detection.tflite")
|
model = ml.Model("/rom/fomo_face_detection.tflite", postprocess=Fomo(threshold=0.4))
|
||||||
print(model)
|
print(model)
|
||||||
|
|
||||||
# Alternatively, models can be loaded from the filesystem storage.
|
# Alternatively, models can be loaded from the filesystem storage.
|
||||||
@ -39,10 +39,9 @@ colors = [ # Add more colors if you are detecting more than 7 types of classes
|
|||||||
clock = time.clock()
|
clock = time.clock()
|
||||||
while True:
|
while True:
|
||||||
clock.tick()
|
clock.tick()
|
||||||
|
|
||||||
img = sensor.snapshot()
|
img = sensor.snapshot()
|
||||||
|
|
||||||
for i, detection_list in enumerate(model.predict([img], callback=fomo_postprocess())):
|
for i, detection_list in enumerate(model.predict([img])):
|
||||||
if i == 0:
|
if i == 0:
|
||||||
continue # background class
|
continue # background class
|
||||||
if len(detection_list) == 0:
|
if len(detection_list) == 0:
|
||||||
|
|||||||
@ -0,0 +1,50 @@
|
|||||||
|
# This work is licensed under the MIT license.
|
||||||
|
# Copyright (c) 2013-2025 OpenMV LLC. All rights reserved.
|
||||||
|
# https://github.com/openmv/openmv/blob/master/LICENSE
|
||||||
|
#
|
||||||
|
# TensorFlow Lite YOLO V2 Example
|
||||||
|
#
|
||||||
|
# This example runs a YOLO V2 object detection model.
|
||||||
|
# Please see OpenMV IDE's model zoo for example yolo v2 models.
|
||||||
|
#
|
||||||
|
# For more information on YOLO V2, please see:
|
||||||
|
# https://github.com/STMicroelectronics/stm32ai-modelzoo/tree/main/object_detection/tiny_yolo_v2
|
||||||
|
#
|
||||||
|
# NOTE: This exaxmple requires an OpenMV Cam with an NPU like the AE3 or N6 to run real-time.
|
||||||
|
|
||||||
|
import csi
|
||||||
|
import time
|
||||||
|
import ml
|
||||||
|
from ml.postprocessing.darknet import YoloV2
|
||||||
|
|
||||||
|
# Initialize the sensor.
|
||||||
|
csi0 = csi.CSI()
|
||||||
|
csi0.reset()
|
||||||
|
csi0.pixformat(csi.RGB565)
|
||||||
|
csi0.framesize(csi.VGA)
|
||||||
|
csi0.window((400, 400))
|
||||||
|
|
||||||
|
# Load YOLO V2 model from ROM FS.
|
||||||
|
model = ml.Model("/rom/<model_file_name>", postprocess=YoloV2(threshold=0.4))
|
||||||
|
print(model)
|
||||||
|
|
||||||
|
# Visualization parameters.
|
||||||
|
n = len(model.labels)
|
||||||
|
model_class_colors = [(int(255 * i // n), int(255 * (n - i - 1) // n), 255) for i in range(n)]
|
||||||
|
|
||||||
|
clock = time.clock()
|
||||||
|
while True:
|
||||||
|
clock.tick()
|
||||||
|
img = csi0.snapshot()
|
||||||
|
|
||||||
|
# boxes is a list of list per class of ((x, y, w, h), score) tuples
|
||||||
|
boxes = model.predict([img])
|
||||||
|
|
||||||
|
# Draw bounding boxes around the detected objects
|
||||||
|
for i, class_detections in enumerate(boxes):
|
||||||
|
rects = [r for r, score in class_detections]
|
||||||
|
labels = [model.labels[i] for j in range(len(rects))]
|
||||||
|
colors = [model_class_colors[i] for j in range(len(rects))]
|
||||||
|
ml.utils.draw_predictions(img, rects, labels, colors, format=None)
|
||||||
|
|
||||||
|
print(clock.fps(), "fps")
|
||||||
@ -0,0 +1,50 @@
|
|||||||
|
# This work is licensed under the MIT license.
|
||||||
|
# Copyright (c) 2013-2025 OpenMV LLC. All rights reserved.
|
||||||
|
# https://github.com/openmv/openmv/blob/master/LICENSE
|
||||||
|
#
|
||||||
|
# TensorFlow Lite YOLO V5 Example
|
||||||
|
#
|
||||||
|
# This example runs a YOLO V5 object detection model.
|
||||||
|
# Please see OpenMV IDE's model zoo for example yolo v5 models.
|
||||||
|
#
|
||||||
|
# You can train your own custom YOLOV5 models using Edge Impulse:
|
||||||
|
# https://github.com/edgeimpulse/ml-block-yolov5
|
||||||
|
#
|
||||||
|
# NOTE: This exaxmple requires an OpenMV Cam with an NPU like the AE3 or N6 to run real-time.
|
||||||
|
|
||||||
|
import csi
|
||||||
|
import time
|
||||||
|
import ml
|
||||||
|
from ml.postprocessing.ultralytics import YoloV5
|
||||||
|
|
||||||
|
# Initialize the sensor.
|
||||||
|
csi0 = csi.CSI()
|
||||||
|
csi0.reset()
|
||||||
|
csi0.pixformat(csi.RGB565)
|
||||||
|
csi0.framesize(csi.VGA)
|
||||||
|
csi0.window((400, 400))
|
||||||
|
|
||||||
|
# Load YOLO V5 model from ROM FS.
|
||||||
|
model = ml.Model("/rom/<model_file_name>", postprocess=YoloV5(threshold=0.4))
|
||||||
|
print(model)
|
||||||
|
|
||||||
|
# Visualization parameters.
|
||||||
|
n = len(model.labels)
|
||||||
|
model_class_colors = [(int(255 * i // n), int(255 * (n - i - 1) // n), 255) for i in range(n)]
|
||||||
|
|
||||||
|
clock = time.clock()
|
||||||
|
while True:
|
||||||
|
clock.tick()
|
||||||
|
img = csi0.snapshot()
|
||||||
|
|
||||||
|
# boxes is a list of list per class of ((x, y, w, h), score) tuples
|
||||||
|
boxes = model.predict([img])
|
||||||
|
|
||||||
|
# Draw bounding boxes around the detected objects
|
||||||
|
for i, class_detections in enumerate(boxes):
|
||||||
|
rects = [r for r, score in class_detections]
|
||||||
|
labels = [model.labels[i] for j in range(len(rects))]
|
||||||
|
colors = [model_class_colors[i] for j in range(len(rects))]
|
||||||
|
ml.utils.draw_predictions(img, rects, labels, colors, format=None)
|
||||||
|
|
||||||
|
print(clock.fps(), "fps")
|
||||||
@ -47,7 +47,6 @@ class YoloV2:
|
|||||||
def __init__(self, threshold=0.6, anchors=None, nms_threshold=0.1, nms_sigma=0.1):
|
def __init__(self, threshold=0.6, anchors=None, nms_threshold=0.1, nms_sigma=0.1):
|
||||||
self.threshold = threshold
|
self.threshold = threshold
|
||||||
self.anchors = anchors
|
self.anchors = anchors
|
||||||
self.anchors_len = len(self.anchors)
|
|
||||||
self.nms_threshold = nms_threshold
|
self.nms_threshold = nms_threshold
|
||||||
self.nms_sigma = nms_sigma
|
self.nms_sigma = nms_sigma
|
||||||
|
|
||||||
@ -58,6 +57,8 @@ class YoloV2:
|
|||||||
[5.55170, 9.30660],
|
[5.55170, 9.30660],
|
||||||
[9.72600, 11.1422]])
|
[9.72600, 11.1422]])
|
||||||
|
|
||||||
|
self.anchors_len = len(self.anchors)
|
||||||
|
|
||||||
def __call__(self, model, inputs, outputs):
|
def __call__(self, model, inputs, outputs):
|
||||||
|
|
||||||
def softmax(x):
|
def softmax(x):
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user