mirror of
https://github.com/openmv/openmv.git
synced 2025-11-04 14:49:50 +08:00
Merge b591b39ab9 into 7a46d0c82e
This commit is contained in:
commit
334b0e97d2
@ -38,6 +38,12 @@
|
|||||||
"alignment": 16,
|
"alignment": 16,
|
||||||
"optimize": "Performance"
|
"optimize": "Performance"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"type": "tflite",
|
||||||
|
"path": "{TOP}/lib/models/yolov8n_192.tflite",
|
||||||
|
"alignment": 16,
|
||||||
|
"optimize": "Performance"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"type": "tflite",
|
"type": "tflite",
|
||||||
"path": "{TOP}/lib/models/blazeface_front_128.tflite",
|
"path": "{TOP}/lib/models/blazeface_front_128.tflite",
|
||||||
|
|||||||
@ -26,6 +26,12 @@
|
|||||||
"alignment": 32,
|
"alignment": 32,
|
||||||
"profile": "default"
|
"profile": "default"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"type": "tflite",
|
||||||
|
"path": "{TOP}/lib/models/yolov8n_192.tflite",
|
||||||
|
"alignment": 32,
|
||||||
|
"profile": "default"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"type": "tflite",
|
"type": "tflite",
|
||||||
"path": "{TOP}/lib/models/blazeface_front_128.tflite",
|
"path": "{TOP}/lib/models/blazeface_front_128.tflite",
|
||||||
|
|||||||
BIN
lib/models/yolov8n_192.tflite
Normal file
BIN
lib/models/yolov8n_192.tflite
Normal file
Binary file not shown.
1
lib/models/yolov8n_192.txt
Normal file
1
lib/models/yolov8n_192.txt
Normal file
@ -0,0 +1 @@
|
|||||||
|
person
|
||||||
@ -0,0 +1,46 @@
|
|||||||
|
# This work is licensed under the MIT license.
|
||||||
|
# Copyright (c) 2013-2025 OpenMV LLC. All rights reserved.
|
||||||
|
# https://github.com/openmv/openmv/blob/master/LICENSE
|
||||||
|
#
|
||||||
|
# TensorFlow Lite YOLO V8 Example
|
||||||
|
#
|
||||||
|
# This example runs a YOLO V8 person detection model.
|
||||||
|
#
|
||||||
|
# NOTE: This exaxmple requires an OpenMV Cam with an NPU like the AE3 or N6 to run real-time.
|
||||||
|
|
||||||
|
import csi
|
||||||
|
import time
|
||||||
|
import ml
|
||||||
|
from ml.postprocessing.ultralytics import YoloV8
|
||||||
|
|
||||||
|
# Initialize the sensor.
|
||||||
|
csi0 = csi.CSI()
|
||||||
|
csi0.reset()
|
||||||
|
csi0.pixformat(csi.RGB565)
|
||||||
|
csi0.framesize(csi.VGA)
|
||||||
|
csi0.window((400, 400))
|
||||||
|
|
||||||
|
# Load YOLO V8 model from ROM FS.
|
||||||
|
model = ml.Model("/rom/yolov8n_192.tflite", postprocess=YoloV8(threshold=0.4))
|
||||||
|
print(model)
|
||||||
|
|
||||||
|
# Visualization parameters.
|
||||||
|
n = len(model.labels)
|
||||||
|
model_class_colors = [(int(255 * i // n), int(255 * (n - i - 1) // n), 255) for i in range(n)]
|
||||||
|
|
||||||
|
clock = time.clock()
|
||||||
|
while True:
|
||||||
|
clock.tick()
|
||||||
|
img = csi0.snapshot()
|
||||||
|
|
||||||
|
# boxes is a list of list per class of ((x, y, w, h), score) tuples
|
||||||
|
boxes = model.predict([img])
|
||||||
|
|
||||||
|
# Draw bounding boxes around the detected objects
|
||||||
|
for i, class_detections in enumerate(boxes):
|
||||||
|
rects = [r for r, score in class_detections]
|
||||||
|
labels = [model.labels[i] for j in range(len(rects))]
|
||||||
|
colors = [model_class_colors[i] for j in range(len(rects))]
|
||||||
|
ml.utils.draw_predictions(img, rects, labels, colors, format=None)
|
||||||
|
|
||||||
|
print(clock.fps(), "fps")
|
||||||
Loading…
Reference in New Issue
Block a user