mirror of
https://github.com/openmv/openmv.git
synced 2025-11-04 14:49:50 +08:00
scripts/examples: Update examples.
This commit is contained in:
parent
93e17a3a8d
commit
eac75a75c6
@ -43,7 +43,7 @@ while True:
|
||||
# This combines the labels and confidence values into a list of tuples
|
||||
# and then sorts that list by the confidence values.
|
||||
sorted_list = sorted(
|
||||
zip(labels, model.predict(img)[0]), key=lambda x: x[1], reverse=True
|
||||
zip(labels, model.predict([img])[0]), key=lambda x: x[1], reverse=True
|
||||
)
|
||||
for i in range(5):
|
||||
print("%s = %f" % (sorted_list[i][0], sorted_list[i][1]))
|
||||
|
||||
@ -45,11 +45,11 @@ colors = [ # Add more colors if you are detecting more than 7 types of classes
|
||||
# position in the output image back to the original input image. The function then returns a
|
||||
# list per class which each contain a list of (rect, score) tuples representing the detected
|
||||
# objects.
|
||||
def fomo_post_process(model, output, rect):
|
||||
def fomo_post_process(model, inputs, outputs):
|
||||
n, oh, ow, oc = model.output_shape[0]
|
||||
nms = ml.NMS(ow, oh, rect)
|
||||
nms = ml.NMS(ow, oh, inputs[0].roi)
|
||||
for i in range(oc):
|
||||
img = image.Image(output[0], shape=(oh, ow, 1), strides=(i, oc), scale=(255, 0))
|
||||
img = image.Image(outputs[0], shape=(oh, ow, 1), strides=(i, oc), scale=(255, 0))
|
||||
blobs = img.find_blobs(
|
||||
threshold_list, x_stride=1, area_threshold=1, pixels_threshold=1
|
||||
)
|
||||
@ -69,7 +69,7 @@ while True:
|
||||
|
||||
img = sensor.snapshot()
|
||||
|
||||
for i, detection_list in enumerate(model.predict(img, callback=fomo_post_process)):
|
||||
for i, detection_list in enumerate(model.predict([img], callback=fomo_post_process)):
|
||||
if i == 0:
|
||||
continue # background class
|
||||
if len(detection_list) == 0:
|
||||
|
||||
@ -5,8 +5,8 @@
|
||||
# The MicroSpeech module is designed for real-time audio processing and speech recognition
|
||||
# on microcontroller platforms. It leverages pre-trained models for audio preprocessing and
|
||||
# speech recognition, specifically optimized for detecting keywords such as "Yes" and "No".
|
||||
import ml
|
||||
import time
|
||||
from ml.apps import MicroSpeech
|
||||
|
||||
|
||||
def callback(label, scores):
|
||||
@ -17,7 +17,7 @@ def callback(label, scores):
|
||||
# micro speech module for audio preprocessing and speech recognition, respectively. The
|
||||
# user can override both by passing two models:
|
||||
# MicroSpeech(preprocessor=ml.Model(...), micro_speech=ml.Model(...), labels=["label",...])
|
||||
speech = ml.MicroSpeech()
|
||||
speech = MicroSpeech()
|
||||
|
||||
# Starts the audio streaming and processes incoming audio to recognize speech commands.
|
||||
# If a callback is passed, listen() will loop forever and call the callback when a keyword
|
||||
|
||||
@ -5,8 +5,8 @@
|
||||
# The MicroSpeech module is designed for real-time audio processing and speech recognition
|
||||
# on microcontroller platforms. It leverages pre-trained models for audio preprocessing and
|
||||
# speech recognition, specifically optimized for detecting keywords such as "Yes" and "No".
|
||||
import ml
|
||||
import time
|
||||
from ml.apps import MicroSpeech
|
||||
|
||||
|
||||
def callback(label, scores):
|
||||
@ -17,7 +17,7 @@ def callback(label, scores):
|
||||
# micro speech module for audio preprocessing and speech recognition, respectively. The
|
||||
# user can override both by passing two models:
|
||||
# MicroSpeech(preprocessor=ml.Model(...), micro_speech=ml.Model(...), labels=["label",...])
|
||||
speech = ml.MicroSpeech()
|
||||
speech = MicroSpeech()
|
||||
|
||||
# Starts the audio streaming and processes incoming audio to recognize speech commands.
|
||||
# If a callback is passed, listen() will loop forever and call the callback when a keyword
|
||||
|
||||
@ -5,8 +5,8 @@
|
||||
# The MicroSpeech module is designed for real-time audio processing and speech recognition
|
||||
# on microcontroller platforms. It leverages pre-trained models for audio preprocessing and
|
||||
# speech recognition, specifically optimized for detecting keywords such as "Yes" and "No".
|
||||
import ml
|
||||
import time
|
||||
from ml.apps import MicroSpeech
|
||||
|
||||
|
||||
def callback(label, scores):
|
||||
@ -17,7 +17,7 @@ def callback(label, scores):
|
||||
# micro speech module for audio preprocessing and speech recognition, respectively. The
|
||||
# user can override both by passing two models:
|
||||
# MicroSpeech(preprocessor=ml.Model(...), micro_speech=ml.Model(...), labels=["label",...])
|
||||
speech = ml.MicroSpeech()
|
||||
speech = MicroSpeech()
|
||||
|
||||
# Starts the audio streaming and processes incoming audio to recognize speech commands.
|
||||
# If a callback is passed, listen() will loop forever and call the callback when a keyword
|
||||
|
||||
Loading…
Reference in New Issue
Block a user