From eac75a75c6f31af5ff1c9158cda19b525e79dcde Mon Sep 17 00:00:00 2001 From: iabdalkader Date: Sun, 7 Jul 2024 16:48:57 +0300 Subject: [PATCH] scripts/examples: Update examples. --- .../00-TensorFlow/tf_image_classification.py | 2 +- .../00-TensorFlow/tf_object_detection.py | 8 ++++---- .../50-Arduino-Boards/Giga-H7/52-Audio/micro_speech.py | 4 ++-- .../Nicla-Vision/52-Audio/micro_speech.py | 4 ++-- .../Portenta-H7/51-Audio/micro_speech.py | 4 ++-- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_image_classification.py b/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_image_classification.py index 502a248ad..9c6248fdd 100644 --- a/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_image_classification.py +++ b/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_image_classification.py @@ -43,7 +43,7 @@ while True: # This combines the labels and confidence values into a list of tuples # and then sorts that list by the confidence values. sorted_list = sorted( - zip(labels, model.predict(img)[0]), key=lambda x: x[1], reverse=True + zip(labels, model.predict([img])[0]), key=lambda x: x[1], reverse=True ) for i in range(5): print("%s = %f" % (sorted_list[i][0], sorted_list[i][1])) diff --git a/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_object_detection.py b/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_object_detection.py index 114a61adf..6367bb59b 100644 --- a/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_object_detection.py +++ b/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_object_detection.py @@ -45,11 +45,11 @@ colors = [ # Add more colors if you are detecting more than 7 types of classes # position in the output image back to the original input image. The function then returns a # list per class which each contain a list of (rect, score) tuples representing the detected # objects. -def fomo_post_process(model, output, rect): +def fomo_post_process(model, inputs, outputs): n, oh, ow, oc = model.output_shape[0] - nms = ml.NMS(ow, oh, rect) + nms = ml.NMS(ow, oh, inputs[0].roi) for i in range(oc): - img = image.Image(output[0], shape=(oh, ow, 1), strides=(i, oc), scale=(255, 0)) + img = image.Image(outputs[0], shape=(oh, ow, 1), strides=(i, oc), scale=(255, 0)) blobs = img.find_blobs( threshold_list, x_stride=1, area_threshold=1, pixels_threshold=1 ) @@ -69,7 +69,7 @@ while True: img = sensor.snapshot() - for i, detection_list in enumerate(model.predict(img, callback=fomo_post_process)): + for i, detection_list in enumerate(model.predict([img], callback=fomo_post_process)): if i == 0: continue # background class if len(detection_list) == 0: diff --git a/scripts/examples/50-Arduino-Boards/Giga-H7/52-Audio/micro_speech.py b/scripts/examples/50-Arduino-Boards/Giga-H7/52-Audio/micro_speech.py index 3e23057fa..513748310 100644 --- a/scripts/examples/50-Arduino-Boards/Giga-H7/52-Audio/micro_speech.py +++ b/scripts/examples/50-Arduino-Boards/Giga-H7/52-Audio/micro_speech.py @@ -5,8 +5,8 @@ # The MicroSpeech module is designed for real-time audio processing and speech recognition # on microcontroller platforms. It leverages pre-trained models for audio preprocessing and # speech recognition, specifically optimized for detecting keywords such as "Yes" and "No". -import ml import time +from ml.apps import MicroSpeech def callback(label, scores): @@ -17,7 +17,7 @@ def callback(label, scores): # micro speech module for audio preprocessing and speech recognition, respectively. The # user can override both by passing two models: # MicroSpeech(preprocessor=ml.Model(...), micro_speech=ml.Model(...), labels=["label",...]) -speech = ml.MicroSpeech() +speech = MicroSpeech() # Starts the audio streaming and processes incoming audio to recognize speech commands. # If a callback is passed, listen() will loop forever and call the callback when a keyword diff --git a/scripts/examples/50-Arduino-Boards/Nicla-Vision/52-Audio/micro_speech.py b/scripts/examples/50-Arduino-Boards/Nicla-Vision/52-Audio/micro_speech.py index 3e23057fa..513748310 100644 --- a/scripts/examples/50-Arduino-Boards/Nicla-Vision/52-Audio/micro_speech.py +++ b/scripts/examples/50-Arduino-Boards/Nicla-Vision/52-Audio/micro_speech.py @@ -5,8 +5,8 @@ # The MicroSpeech module is designed for real-time audio processing and speech recognition # on microcontroller platforms. It leverages pre-trained models for audio preprocessing and # speech recognition, specifically optimized for detecting keywords such as "Yes" and "No". -import ml import time +from ml.apps import MicroSpeech def callback(label, scores): @@ -17,7 +17,7 @@ def callback(label, scores): # micro speech module for audio preprocessing and speech recognition, respectively. The # user can override both by passing two models: # MicroSpeech(preprocessor=ml.Model(...), micro_speech=ml.Model(...), labels=["label",...]) -speech = ml.MicroSpeech() +speech = MicroSpeech() # Starts the audio streaming and processes incoming audio to recognize speech commands. # If a callback is passed, listen() will loop forever and call the callback when a keyword diff --git a/scripts/examples/50-Arduino-Boards/Portenta-H7/51-Audio/micro_speech.py b/scripts/examples/50-Arduino-Boards/Portenta-H7/51-Audio/micro_speech.py index 3e23057fa..513748310 100644 --- a/scripts/examples/50-Arduino-Boards/Portenta-H7/51-Audio/micro_speech.py +++ b/scripts/examples/50-Arduino-Boards/Portenta-H7/51-Audio/micro_speech.py @@ -5,8 +5,8 @@ # The MicroSpeech module is designed for real-time audio processing and speech recognition # on microcontroller platforms. It leverages pre-trained models for audio preprocessing and # speech recognition, specifically optimized for detecting keywords such as "Yes" and "No". -import ml import time +from ml.apps import MicroSpeech def callback(label, scores): @@ -17,7 +17,7 @@ def callback(label, scores): # micro speech module for audio preprocessing and speech recognition, respectively. The # user can override both by passing two models: # MicroSpeech(preprocessor=ml.Model(...), micro_speech=ml.Model(...), labels=["label",...]) -speech = ml.MicroSpeech() +speech = MicroSpeech() # Starts the audio streaming and processes incoming audio to recognize speech commands. # If a callback is passed, listen() will loop forever and call the callback when a keyword