mirror of
https://github.com/openmv/openmv.git
synced 2025-11-04 14:49:50 +08:00
scripts/examples: Update examples.
This commit is contained in:
parent
40710a3573
commit
8ee4a28327
@ -4,23 +4,15 @@
|
|||||||
#
|
#
|
||||||
# TensorFlow Lite Mobilenet V1 Example
|
# TensorFlow Lite Mobilenet V1 Example
|
||||||
#
|
#
|
||||||
# Google's Mobilenet V1 detects 1000 classes of objects
|
# Google's Mobilenet is trained to detect 1000 classes of objects.
|
||||||
#
|
#
|
||||||
# WARNING: Mobilenet is trained on ImageNet and isn't meant to classify anything
|
# NOTE: This example only works on boards that have enough memory to load the model.
|
||||||
# in the real world. It's just designed to score well on the ImageNet dataset.
|
# To get the models, please see the CNN Network library in OpenMV IDE under Tools->
|
||||||
# This example just shows off running mobilenet on the OpenMV Cam. However, the
|
# Machine Vision. The labels file (mobilenet_labels.txt) is included there as well,
|
||||||
# default model is not really usable for anything. You have to use transfer
|
# and it should be copied to the root of the filesystem for this script to work.
|
||||||
# learning to apply the model to a target problem by re-training the model.
|
|
||||||
#
|
|
||||||
# NOTE: This example only works on the OpenMV Cam H7 Plus (that has SDRAM) and better!
|
|
||||||
# To get the models please see the CNN Network library in OpenMV IDE under
|
|
||||||
# Tools -> Machine Vision. The labels are there too.
|
|
||||||
# You should insert a microSD card into your camera and copy-paste the mobilenet_labels.txt
|
|
||||||
# file and your chosen model into the root folder for this script to work.
|
|
||||||
|
|
||||||
import sensor
|
import sensor
|
||||||
import time
|
import time
|
||||||
import tf
|
import ml
|
||||||
|
|
||||||
sensor.reset() # Reset and initialize the sensor.
|
sensor.reset() # Reset and initialize the sensor.
|
||||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||||
@ -38,7 +30,7 @@ mobilenet = "mobilenet_v%s_%s_%s_quant.tflite" % (
|
|||||||
mobilenet_resolution,
|
mobilenet_resolution,
|
||||||
)
|
)
|
||||||
|
|
||||||
net = tf.Model(mobilenet, load_to_fb=True)
|
model = ml.Model(mobilenet, load_to_fb=True)
|
||||||
labels = [line.rstrip("\n") for line in open("mobilenet_labels.txt")]
|
labels = [line.rstrip("\n") for line in open("mobilenet_labels.txt")]
|
||||||
|
|
||||||
clock = time.clock()
|
clock = time.clock()
|
||||||
@ -51,7 +43,7 @@ while True:
|
|||||||
# This combines the labels and confidence values into a list of tuples
|
# This combines the labels and confidence values into a list of tuples
|
||||||
# and then sorts that list by the confidence values.
|
# and then sorts that list by the confidence values.
|
||||||
sorted_list = sorted(
|
sorted_list = sorted(
|
||||||
zip(labels, net.predict(img)), key=lambda x: x[1], reverse=True
|
zip(labels, model.predict(img)[0]), key=lambda x: x[1], reverse=True
|
||||||
)
|
)
|
||||||
for i in range(5):
|
for i in range(5):
|
||||||
print("%s = %f" % (sorted_list[i][0], sorted_list[i][1]))
|
print("%s = %f" % (sorted_list[i][0], sorted_list[i][1]))
|
||||||
|
|||||||
@ -8,7 +8,7 @@
|
|||||||
|
|
||||||
import sensor
|
import sensor
|
||||||
import time
|
import time
|
||||||
import tf
|
import ml
|
||||||
import math
|
import math
|
||||||
import image
|
import image
|
||||||
|
|
||||||
@ -21,10 +21,10 @@ min_confidence = 0.4
|
|||||||
threshold_list = [(math.ceil(min_confidence * 255), 255)]
|
threshold_list = [(math.ceil(min_confidence * 255), 255)]
|
||||||
|
|
||||||
# Load built-in FOMO face detection model
|
# Load built-in FOMO face detection model
|
||||||
labels, net = tf.Model("fomo_face_detection")
|
labels, model = ml.Model("fomo_face_detection")
|
||||||
|
|
||||||
# Alternatively, models can be loaded from the filesystem storage.
|
# Alternatively, models can be loaded from the filesystem storage.
|
||||||
# net = tf.Model('<object_detection_network>', load_to_fb=True)
|
# model = ml.Model('<object_detection_modelwork>.tflite', load_to_fb=True)
|
||||||
# labels = [line.rstrip('\n') for line in open("labels.txt")]
|
# labels = [line.rstrip('\n') for line in open("labels.txt")]
|
||||||
|
|
||||||
colors = [ # Add more colors if you are detecting more than 7 types of classes at once.
|
colors = [ # Add more colors if you are detecting more than 7 types of classes at once.
|
||||||
@ -37,6 +37,7 @@ colors = [ # Add more colors if you are detecting more than 7 types of classes
|
|||||||
(255, 255, 255),
|
(255, 255, 255),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
# FOMO outputs an image per class where each pixel in the image is the centroid of the trained
|
# FOMO outputs an image per class where each pixel in the image is the centroid of the trained
|
||||||
# object. So, we will get those output images and then run find_blobs() on them to extract the
|
# object. So, we will get those output images and then run find_blobs() on them to extract the
|
||||||
# centroids. We will also run get_stats() on the detected blobs to determine their score.
|
# centroids. We will also run get_stats() on the detected blobs to determine their score.
|
||||||
@ -44,18 +45,20 @@ colors = [ # Add more colors if you are detecting more than 7 types of classes
|
|||||||
# position in the output image back to the original input image. The function then returns a
|
# position in the output image back to the original input image. The function then returns a
|
||||||
# list per class which each contain a list of (rect, score) tuples representing the detected
|
# list per class which each contain a list of (rect, score) tuples representing the detected
|
||||||
# objects.
|
# objects.
|
||||||
|
|
||||||
|
|
||||||
def fomo_post_process(model, output, rect):
|
def fomo_post_process(model, output, rect):
|
||||||
oh, ow, oc = model.output_shape
|
n, oh, ow, oc = model.output_shape[0]
|
||||||
nms = tf.NMS(ow, oh, rect)
|
nms = ml.NMS(ow, oh, rect)
|
||||||
for i in range(oc):
|
for i in range(oc):
|
||||||
img = image.Image(output, shape=(oh, ow, 1), strides=(i, oc), scale=(255, 0))
|
img = image.Image(output[0], shape=(oh, ow, 1), strides=(i, oc), scale=(255, 0))
|
||||||
blobs = img.find_blobs(threshold_list, x_stride=1, area_threshold=1, pixels_threshold=1)
|
blobs = img.find_blobs(
|
||||||
|
threshold_list, x_stride=1, area_threshold=1, pixels_threshold=1
|
||||||
|
)
|
||||||
for b in blobs:
|
for b in blobs:
|
||||||
rect = b.rect()
|
rect = b.rect()
|
||||||
x, y, w, h = rect
|
x, y, w, h = rect
|
||||||
score = img.get_statistics(thresholds=threshold_list, roi=rect).l_mean() / 255.0
|
score = (
|
||||||
|
img.get_statistics(thresholds=threshold_list, roi=rect).l_mean() / 255.0
|
||||||
|
)
|
||||||
nms.add_bounding_box(x, y, x + w, y + h, score, i)
|
nms.add_bounding_box(x, y, x + w, y + h, score, i)
|
||||||
return nms.get_bounding_boxes()
|
return nms.get_bounding_boxes()
|
||||||
|
|
||||||
@ -66,9 +69,7 @@ while True:
|
|||||||
|
|
||||||
img = sensor.snapshot()
|
img = sensor.snapshot()
|
||||||
|
|
||||||
for i, detection_list in enumerate(
|
for i, detection_list in enumerate(model.predict(img, callback=fomo_post_process)):
|
||||||
fomo_post_process(net, net.predict(img), rect=(0, 0, img.width(), img.height()))
|
|
||||||
):
|
|
||||||
if i == 0:
|
if i == 0:
|
||||||
continue # background class
|
continue # background class
|
||||||
if len(detection_list) == 0:
|
if len(detection_list) == 0:
|
||||||
|
|||||||
@ -1,40 +1,26 @@
|
|||||||
# This work is licensed under the MIT license.
|
# This work is licensed under the MIT license.
|
||||||
# Copyright (c) 2013-2023 OpenMV LLC. All rights reserved.
|
# Copyright (c) 2024 OpenMV LLC. All rights reserved.
|
||||||
# https://github.com/openmv/openmv/blob/master/LICENSE
|
# https://github.com/openmv/openmv/blob/master/LICENSE
|
||||||
#
|
#
|
||||||
# MicroSpeech demo.
|
# The MicroSpeech module is designed for real-time audio processing and speech recognition
|
||||||
#
|
# on microcontroller platforms. It leverages pre-trained models for audio preprocessing and
|
||||||
# Download the pre-trained Yes/No model from here:
|
# speech recognition, specifically optimized for detecting keywords such as "Yes" and "No".
|
||||||
# https://raw.githubusercontent.com/iabdalkader/microspeech-yesno-model/main/model.tflite
|
import ml
|
||||||
# Save the model to storage, reset and run the example.
|
|
||||||
import audio
|
|
||||||
import time
|
import time
|
||||||
import tf
|
|
||||||
import micro_speech
|
|
||||||
import pyb
|
|
||||||
|
|
||||||
labels = ["Silence", "Unknown", "Yes", "No"]
|
|
||||||
|
|
||||||
led_red = pyb.LED(1)
|
def callback(label, scores):
|
||||||
led_green = pyb.LED(2)
|
print(f'\nHeard: "{label}" @{time.ticks_ms()}ms Scores: {scores}')
|
||||||
|
|
||||||
model = tf.load("/model.tflite")
|
|
||||||
speech = micro_speech.MicroSpeech()
|
|
||||||
audio.init(channels=1, frequency=16000, gain_db=24, highpass=0.9883)
|
|
||||||
|
|
||||||
# Start audio streaming
|
# By default, the MicroSpeech object uses the built-in audio preprocessor (float) and the
|
||||||
audio.start_streaming(speech.audio_callback)
|
# micro speech module for audio preprocessing and speech recognition, respectively. The
|
||||||
|
# user can override both by passing two models:
|
||||||
|
# MicroSpeech(preprocessor=ml.Model(...), micro_speech=ml.Model(...), labels=["label",...])
|
||||||
|
speech = ml.MicroSpeech()
|
||||||
|
|
||||||
while True:
|
# Starts the audio streaming and processes incoming audio to recognize speech commands.
|
||||||
# Run micro-speech without a timeout and filter detections by label index.
|
# If a callback is passed, listen() will loop forever and call the callback when a keyword
|
||||||
idx = speech.listen(model, timeout=0, threshold=0.70, filter=[2, 3])
|
# is detected. Alternatively, `listen()` can be called with a timeout (in ms), and it
|
||||||
led = led_green if idx == 2 else led_red
|
# returns if the timeout expires before detecting a keyword.
|
||||||
print(labels[idx])
|
speech.listen(callback=callback, threshold=0.70)
|
||||||
for i in range(0, 4):
|
|
||||||
led.on()
|
|
||||||
time.sleep_ms(25)
|
|
||||||
led.off()
|
|
||||||
time.sleep_ms(25)
|
|
||||||
|
|
||||||
# Stop streaming
|
|
||||||
audio.stop_streaming()
|
|
||||||
|
|||||||
@ -1,40 +1,26 @@
|
|||||||
# This work is licensed under the MIT license.
|
# This work is licensed under the MIT license.
|
||||||
# Copyright (c) 2013-2023 OpenMV LLC. All rights reserved.
|
# Copyright (c) 2024 OpenMV LLC. All rights reserved.
|
||||||
# https://github.com/openmv/openmv/blob/master/LICENSE
|
# https://github.com/openmv/openmv/blob/master/LICENSE
|
||||||
#
|
#
|
||||||
# MicroSpeech demo.
|
# The MicroSpeech module is designed for real-time audio processing and speech recognition
|
||||||
#
|
# on microcontroller platforms. It leverages pre-trained models for audio preprocessing and
|
||||||
# Download the pre-trained Yes/No model from here:
|
# speech recognition, specifically optimized for detecting keywords such as "Yes" and "No".
|
||||||
# https://raw.githubusercontent.com/iabdalkader/microspeech-yesno-model/main/model.tflite
|
import ml
|
||||||
# Save the model to storage, reset and run the example.
|
|
||||||
import audio
|
|
||||||
import time
|
import time
|
||||||
import tf
|
|
||||||
import micro_speech
|
|
||||||
import pyb
|
|
||||||
|
|
||||||
labels = ["Silence", "Unknown", "Yes", "No"]
|
|
||||||
|
|
||||||
led_red = pyb.LED(1)
|
def callback(label, scores):
|
||||||
led_green = pyb.LED(2)
|
print(f'\nHeard: "{label}" @{time.ticks_ms()}ms Scores: {scores}')
|
||||||
|
|
||||||
model = tf.load("/model.tflite")
|
|
||||||
speech = micro_speech.MicroSpeech()
|
|
||||||
audio.init(channels=1, frequency=16000, gain_db=24, highpass=0.9883)
|
|
||||||
|
|
||||||
# Start audio streaming
|
# By default, the MicroSpeech object uses the built-in audio preprocessor (float) and the
|
||||||
audio.start_streaming(speech.audio_callback)
|
# micro speech module for audio preprocessing and speech recognition, respectively. The
|
||||||
|
# user can override both by passing two models:
|
||||||
|
# MicroSpeech(preprocessor=ml.Model(...), micro_speech=ml.Model(...), labels=["label",...])
|
||||||
|
speech = ml.MicroSpeech()
|
||||||
|
|
||||||
while True:
|
# Starts the audio streaming and processes incoming audio to recognize speech commands.
|
||||||
# Run micro-speech without a timeout and filter detections by label index.
|
# If a callback is passed, listen() will loop forever and call the callback when a keyword
|
||||||
idx = speech.listen(model, timeout=0, threshold=0.70, filter=[2, 3])
|
# is detected. Alternatively, `listen()` can be called with a timeout (in ms), and it
|
||||||
led = led_green if idx == 2 else led_red
|
# returns if the timeout expires before detecting a keyword.
|
||||||
print(labels[idx])
|
speech.listen(callback=callback, threshold=0.70)
|
||||||
for i in range(0, 4):
|
|
||||||
led.on()
|
|
||||||
time.sleep_ms(25)
|
|
||||||
led.off()
|
|
||||||
time.sleep_ms(25)
|
|
||||||
|
|
||||||
# Stop streaming
|
|
||||||
audio.stop_streaming()
|
|
||||||
|
|||||||
@ -1,40 +1,26 @@
|
|||||||
# This work is licensed under the MIT license.
|
# This work is licensed under the MIT license.
|
||||||
# Copyright (c) 2013-2023 OpenMV LLC. All rights reserved.
|
# Copyright (c) 2024 OpenMV LLC. All rights reserved.
|
||||||
# https://github.com/openmv/openmv/blob/master/LICENSE
|
# https://github.com/openmv/openmv/blob/master/LICENSE
|
||||||
#
|
#
|
||||||
# MicroSpeech demo.
|
# The MicroSpeech module is designed for real-time audio processing and speech recognition
|
||||||
#
|
# on microcontroller platforms. It leverages pre-trained models for audio preprocessing and
|
||||||
# Download the pre-trained Yes/No model from here:
|
# speech recognition, specifically optimized for detecting keywords such as "Yes" and "No".
|
||||||
# https://raw.githubusercontent.com/iabdalkader/microspeech-yesno-model/main/model.tflite
|
import ml
|
||||||
# Save the model to storage, reset and run the example.
|
|
||||||
import audio
|
|
||||||
import time
|
import time
|
||||||
import tf
|
|
||||||
import micro_speech
|
|
||||||
import pyb
|
|
||||||
|
|
||||||
labels = ["Silence", "Unknown", "Yes", "No"]
|
|
||||||
|
|
||||||
led_red = pyb.LED(1)
|
def callback(label, scores):
|
||||||
led_green = pyb.LED(2)
|
print(f'\nHeard: "{label}" @{time.ticks_ms()}ms Scores: {scores}')
|
||||||
|
|
||||||
model = tf.load("/model.tflite")
|
|
||||||
speech = micro_speech.MicroSpeech()
|
|
||||||
audio.init(channels=1, frequency=16000, gain_db=24, highpass=0.9883)
|
|
||||||
|
|
||||||
# Start audio streaming
|
# By default, the MicroSpeech object uses the built-in audio preprocessor (float) and the
|
||||||
audio.start_streaming(speech.audio_callback)
|
# micro speech module for audio preprocessing and speech recognition, respectively. The
|
||||||
|
# user can override both by passing two models:
|
||||||
|
# MicroSpeech(preprocessor=ml.Model(...), micro_speech=ml.Model(...), labels=["label",...])
|
||||||
|
speech = ml.MicroSpeech()
|
||||||
|
|
||||||
while True:
|
# Starts the audio streaming and processes incoming audio to recognize speech commands.
|
||||||
# Run micro-speech without a timeout and filter detections by label index.
|
# If a callback is passed, listen() will loop forever and call the callback when a keyword
|
||||||
idx = speech.listen(model, timeout=0, threshold=0.70, filter=[2, 3])
|
# is detected. Alternatively, `listen()` can be called with a timeout (in ms), and it
|
||||||
led = led_green if idx == 2 else led_red
|
# returns if the timeout expires before detecting a keyword.
|
||||||
print(labels[idx])
|
speech.listen(callback=callback, threshold=0.70)
|
||||||
for i in range(0, 4):
|
|
||||||
led.on()
|
|
||||||
time.sleep_ms(25)
|
|
||||||
led.off()
|
|
||||||
time.sleep_ms(25)
|
|
||||||
|
|
||||||
# Stop streaming
|
|
||||||
audio.stop_streaming()
|
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user