scripts/examples: Update examples.

This commit is contained in:
iabdalkader 2024-06-28 12:57:32 +02:00
parent 40710a3573
commit 8ee4a28327
5 changed files with 73 additions and 122 deletions

View File

@ -4,23 +4,15 @@
#
# TensorFlow Lite Mobilenet V1 Example
#
# Google's Mobilenet V1 detects 1000 classes of objects
# Google's Mobilenet is trained to detect 1000 classes of objects.
#
# WARNING: Mobilenet is trained on ImageNet and isn't meant to classify anything
# in the real world. It's just designed to score well on the ImageNet dataset.
# This example just shows off running mobilenet on the OpenMV Cam. However, the
# default model is not really usable for anything. You have to use transfer
# learning to apply the model to a target problem by re-training the model.
#
# NOTE: This example only works on the OpenMV Cam H7 Plus (that has SDRAM) and better!
# To get the models please see the CNN Network library in OpenMV IDE under
# Tools -> Machine Vision. The labels are there too.
# You should insert a microSD card into your camera and copy-paste the mobilenet_labels.txt
# file and your chosen model into the root folder for this script to work.
# NOTE: This example only works on boards that have enough memory to load the model.
# To get the models, please see the CNN Network library in OpenMV IDE under Tools->
# Machine Vision. The labels file (mobilenet_labels.txt) is included there as well,
# and it should be copied to the root of the filesystem for this script to work.
import sensor
import time
import tf
import ml
sensor.reset() # Reset and initialize the sensor.
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
@ -38,7 +30,7 @@ mobilenet = "mobilenet_v%s_%s_%s_quant.tflite" % (
mobilenet_resolution,
)
net = tf.Model(mobilenet, load_to_fb=True)
model = ml.Model(mobilenet, load_to_fb=True)
labels = [line.rstrip("\n") for line in open("mobilenet_labels.txt")]
clock = time.clock()
@ -51,7 +43,7 @@ while True:
# This combines the labels and confidence values into a list of tuples
# and then sorts that list by the confidence values.
sorted_list = sorted(
zip(labels, net.predict(img)), key=lambda x: x[1], reverse=True
zip(labels, model.predict(img)[0]), key=lambda x: x[1], reverse=True
)
for i in range(5):
print("%s = %f" % (sorted_list[i][0], sorted_list[i][1]))

View File

@ -8,7 +8,7 @@
import sensor
import time
import tf
import ml
import math
import image
@ -21,10 +21,10 @@ min_confidence = 0.4
threshold_list = [(math.ceil(min_confidence * 255), 255)]
# Load built-in FOMO face detection model
labels, net = tf.Model("fomo_face_detection")
labels, model = ml.Model("fomo_face_detection")
# Alternatively, models can be loaded from the filesystem storage.
# net = tf.Model('<object_detection_network>', load_to_fb=True)
# model = ml.Model('<object_detection_modelwork>.tflite', load_to_fb=True)
# labels = [line.rstrip('\n') for line in open("labels.txt")]
colors = [ # Add more colors if you are detecting more than 7 types of classes at once.
@ -37,6 +37,7 @@ colors = [ # Add more colors if you are detecting more than 7 types of classes
(255, 255, 255),
]
# FOMO outputs an image per class where each pixel in the image is the centroid of the trained
# object. So, we will get those output images and then run find_blobs() on them to extract the
# centroids. We will also run get_stats() on the detected blobs to determine their score.
@ -44,18 +45,20 @@ colors = [ # Add more colors if you are detecting more than 7 types of classes
# position in the output image back to the original input image. The function then returns a
# list per class which each contain a list of (rect, score) tuples representing the detected
# objects.
def fomo_post_process(model, output, rect):
oh, ow, oc = model.output_shape
nms = tf.NMS(ow, oh, rect)
n, oh, ow, oc = model.output_shape[0]
nms = ml.NMS(ow, oh, rect)
for i in range(oc):
img = image.Image(output, shape=(oh, ow, 1), strides=(i, oc), scale=(255, 0))
blobs = img.find_blobs(threshold_list, x_stride=1, area_threshold=1, pixels_threshold=1)
img = image.Image(output[0], shape=(oh, ow, 1), strides=(i, oc), scale=(255, 0))
blobs = img.find_blobs(
threshold_list, x_stride=1, area_threshold=1, pixels_threshold=1
)
for b in blobs:
rect = b.rect()
x, y, w, h = rect
score = img.get_statistics(thresholds=threshold_list, roi=rect).l_mean() / 255.0
score = (
img.get_statistics(thresholds=threshold_list, roi=rect).l_mean() / 255.0
)
nms.add_bounding_box(x, y, x + w, y + h, score, i)
return nms.get_bounding_boxes()
@ -66,9 +69,7 @@ while True:
img = sensor.snapshot()
for i, detection_list in enumerate(
fomo_post_process(net, net.predict(img), rect=(0, 0, img.width(), img.height()))
):
for i, detection_list in enumerate(model.predict(img, callback=fomo_post_process)):
if i == 0:
continue # background class
if len(detection_list) == 0:

View File

@ -1,40 +1,26 @@
# This work is licensed under the MIT license.
# Copyright (c) 2013-2023 OpenMV LLC. All rights reserved.
# Copyright (c) 2024 OpenMV LLC. All rights reserved.
# https://github.com/openmv/openmv/blob/master/LICENSE
#
# MicroSpeech demo.
#
# Download the pre-trained Yes/No model from here:
# https://raw.githubusercontent.com/iabdalkader/microspeech-yesno-model/main/model.tflite
# Save the model to storage, reset and run the example.
import audio
# The MicroSpeech module is designed for real-time audio processing and speech recognition
# on microcontroller platforms. It leverages pre-trained models for audio preprocessing and
# speech recognition, specifically optimized for detecting keywords such as "Yes" and "No".
import ml
import time
import tf
import micro_speech
import pyb
labels = ["Silence", "Unknown", "Yes", "No"]
led_red = pyb.LED(1)
led_green = pyb.LED(2)
def callback(label, scores):
print(f'\nHeard: "{label}" @{time.ticks_ms()}ms Scores: {scores}')
model = tf.load("/model.tflite")
speech = micro_speech.MicroSpeech()
audio.init(channels=1, frequency=16000, gain_db=24, highpass=0.9883)
# Start audio streaming
audio.start_streaming(speech.audio_callback)
# By default, the MicroSpeech object uses the built-in audio preprocessor (float) and the
# micro speech module for audio preprocessing and speech recognition, respectively. The
# user can override both by passing two models:
# MicroSpeech(preprocessor=ml.Model(...), micro_speech=ml.Model(...), labels=["label",...])
speech = ml.MicroSpeech()
while True:
# Run micro-speech without a timeout and filter detections by label index.
idx = speech.listen(model, timeout=0, threshold=0.70, filter=[2, 3])
led = led_green if idx == 2 else led_red
print(labels[idx])
for i in range(0, 4):
led.on()
time.sleep_ms(25)
led.off()
time.sleep_ms(25)
# Stop streaming
audio.stop_streaming()
# Starts the audio streaming and processes incoming audio to recognize speech commands.
# If a callback is passed, listen() will loop forever and call the callback when a keyword
# is detected. Alternatively, `listen()` can be called with a timeout (in ms), and it
# returns if the timeout expires before detecting a keyword.
speech.listen(callback=callback, threshold=0.70)

View File

@ -1,40 +1,26 @@
# This work is licensed under the MIT license.
# Copyright (c) 2013-2023 OpenMV LLC. All rights reserved.
# Copyright (c) 2024 OpenMV LLC. All rights reserved.
# https://github.com/openmv/openmv/blob/master/LICENSE
#
# MicroSpeech demo.
#
# Download the pre-trained Yes/No model from here:
# https://raw.githubusercontent.com/iabdalkader/microspeech-yesno-model/main/model.tflite
# Save the model to storage, reset and run the example.
import audio
# The MicroSpeech module is designed for real-time audio processing and speech recognition
# on microcontroller platforms. It leverages pre-trained models for audio preprocessing and
# speech recognition, specifically optimized for detecting keywords such as "Yes" and "No".
import ml
import time
import tf
import micro_speech
import pyb
labels = ["Silence", "Unknown", "Yes", "No"]
led_red = pyb.LED(1)
led_green = pyb.LED(2)
def callback(label, scores):
print(f'\nHeard: "{label}" @{time.ticks_ms()}ms Scores: {scores}')
model = tf.load("/model.tflite")
speech = micro_speech.MicroSpeech()
audio.init(channels=1, frequency=16000, gain_db=24, highpass=0.9883)
# Start audio streaming
audio.start_streaming(speech.audio_callback)
# By default, the MicroSpeech object uses the built-in audio preprocessor (float) and the
# micro speech module for audio preprocessing and speech recognition, respectively. The
# user can override both by passing two models:
# MicroSpeech(preprocessor=ml.Model(...), micro_speech=ml.Model(...), labels=["label",...])
speech = ml.MicroSpeech()
while True:
# Run micro-speech without a timeout and filter detections by label index.
idx = speech.listen(model, timeout=0, threshold=0.70, filter=[2, 3])
led = led_green if idx == 2 else led_red
print(labels[idx])
for i in range(0, 4):
led.on()
time.sleep_ms(25)
led.off()
time.sleep_ms(25)
# Stop streaming
audio.stop_streaming()
# Starts the audio streaming and processes incoming audio to recognize speech commands.
# If a callback is passed, listen() will loop forever and call the callback when a keyword
# is detected. Alternatively, `listen()` can be called with a timeout (in ms), and it
# returns if the timeout expires before detecting a keyword.
speech.listen(callback=callback, threshold=0.70)

View File

@ -1,40 +1,26 @@
# This work is licensed under the MIT license.
# Copyright (c) 2013-2023 OpenMV LLC. All rights reserved.
# Copyright (c) 2024 OpenMV LLC. All rights reserved.
# https://github.com/openmv/openmv/blob/master/LICENSE
#
# MicroSpeech demo.
#
# Download the pre-trained Yes/No model from here:
# https://raw.githubusercontent.com/iabdalkader/microspeech-yesno-model/main/model.tflite
# Save the model to storage, reset and run the example.
import audio
# The MicroSpeech module is designed for real-time audio processing and speech recognition
# on microcontroller platforms. It leverages pre-trained models for audio preprocessing and
# speech recognition, specifically optimized for detecting keywords such as "Yes" and "No".
import ml
import time
import tf
import micro_speech
import pyb
labels = ["Silence", "Unknown", "Yes", "No"]
led_red = pyb.LED(1)
led_green = pyb.LED(2)
def callback(label, scores):
print(f'\nHeard: "{label}" @{time.ticks_ms()}ms Scores: {scores}')
model = tf.load("/model.tflite")
speech = micro_speech.MicroSpeech()
audio.init(channels=1, frequency=16000, gain_db=24, highpass=0.9883)
# Start audio streaming
audio.start_streaming(speech.audio_callback)
# By default, the MicroSpeech object uses the built-in audio preprocessor (float) and the
# micro speech module for audio preprocessing and speech recognition, respectively. The
# user can override both by passing two models:
# MicroSpeech(preprocessor=ml.Model(...), micro_speech=ml.Model(...), labels=["label",...])
speech = ml.MicroSpeech()
while True:
# Run micro-speech without a timeout and filter detections by label index.
idx = speech.listen(model, timeout=0, threshold=0.70, filter=[2, 3])
led = led_green if idx == 2 else led_red
print(labels[idx])
for i in range(0, 4):
led.on()
time.sleep_ms(25)
led.off()
time.sleep_ms(25)
# Stop streaming
audio.stop_streaming()
# Starts the audio streaming and processes incoming audio to recognize speech commands.
# If a callback is passed, listen() will loop forever and call the callback when a keyword
# is detected. Alternatively, `listen()` can be called with a timeout (in ms), and it
# returns if the timeout expires before detecting a keyword.
speech.listen(callback=callback, threshold=0.70)