scripts/examples: Update examples.

Use romfs paths.
This commit is contained in:
iabdalkader 2024-12-30 17:56:50 +01:00
parent 3f55d956c5
commit 416bc46136
4 changed files with 5 additions and 5 deletions

View File

@ -16,7 +16,7 @@ sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA) sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time=2000) sensor.skip_frames(time=2000)
model = ml.Model("person_detect", load_to_fb=True) model = ml.Model("/rom/person_detect.tflite")
print(model) print(model)
clock = time.clock() clock = time.clock()

View File

@ -23,7 +23,7 @@ min_confidence = 0.4
threshold_list = [(math.ceil(min_confidence * 255), 255)] threshold_list = [(math.ceil(min_confidence * 255), 255)]
# Load built-in FOMO face detection model # Load built-in FOMO face detection model
model = ml.Model("fomo_face_detection") model = ml.Model("/rom/fomo_face_detection.tflite")
print(model) print(model)
# Alternatively, models can be loaded from the filesystem storage. # Alternatively, models can be loaded from the filesystem storage.

View File

@ -18,7 +18,7 @@ from ulab import numpy as np
# The model is built-in on the RT1062. On other OpenMV Cam's with limited flash space please grab # The model is built-in on the RT1062. On other OpenMV Cam's with limited flash space please grab
# the model from here: https://github.com/openmv/openmv/tree/master/src/lib/tflm/models and # the model from here: https://github.com/openmv/openmv/tree/master/src/lib/tflm/models and
# copy it to the OpenMV Cam's file system. E.g. model = ml.Model("force_int_quant.tflite") # copy it to the OpenMV Cam's file system. E.g. model = ml.Model("force_int_quant.tflite")
model = ml.Model("force_int_quant") model = ml.Model("/rom/force_int_quant.tflite")
print(model) print(model)
i = np.array([-3, -1, -2, 5, -2, 10, -1, 9, 0, # noqa i = np.array([-3, -1, -2, 5, -2, 10, -1, 9, 0, # noqa

View File

@ -49,10 +49,10 @@ class MicroSpeech:
def __init__(self, preprocessor=None, micro_speech=None, labels=None, **kwargs): def __init__(self, preprocessor=None, micro_speech=None, labels=None, **kwargs):
self.preprocessor = preprocessor self.preprocessor = preprocessor
if preprocessor is None: if preprocessor is None:
self.preprocessor = Model("audio_preprocessor") self.preprocessor = Model("/rom/audio_preprocessor.tflite")
self.labels, self.micro_speech = (labels, micro_speech) self.labels, self.micro_speech = (labels, micro_speech)
if micro_speech is None: if micro_speech is None:
self.micro_speech = Model("micro_speech") self.micro_speech = Model("/rom/micro_speech.tflite")
self.labels = self.micro_speech.labels self.labels = self.micro_speech.labels
# 16 samples/1ms # 16 samples/1ms
self.audio_buffer = np.zeros((1, _SAMPLES_PER_STEP * 3), dtype=np.int16) self.audio_buffer = np.zeros((1, _SAMPLES_PER_STEP * 3), dtype=np.int16)