Merge pull request #2274 from openmv/ml_updates

modules/py_ml: ML updates and fix.
This commit is contained in:
Ibrahim Abdelkader 2024-07-12 23:40:55 +02:00 committed by GitHub
commit abe54df3e7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 23 additions and 25 deletions

View File

@ -22,7 +22,7 @@ min_confidence = 0.4
threshold_list = [(math.ceil(min_confidence * 255), 255)]
# Load built-in FOMO face detection model
labels, model = ml.Model("fomo_face_detection")
model = ml.Model("fomo_face_detection")
# Alternatively, models can be loaded from the filesystem storage.
# model = ml.Model('<object_detection_modelwork>.tflite', load_to_fb=True)
@ -76,7 +76,7 @@ while True:
if len(detection_list) == 0:
continue # no detections for this class?
print("********** %s **********" % labels[i])
print("********** %s **********" % model.labels[i])
for (x, y, w, h), score in detection_list:
center_x = math.floor(x + (w / 2))
center_y = math.floor(y + (h / 2))

View File

@ -28,10 +28,11 @@ class MicroSpeech:
def __init__(self, preprocessor=None, micro_speech=None, labels=None):
self.preprocessor = preprocessor
if preprocessor is None:
self.preprocessor = Model("audio_preprocessor")[1]
self.preprocessor = Model("audio_preprocessor")
self.labels, self.micro_speech = (labels, micro_speech)
if micro_speech is None:
self.labels, self.micro_speech = Model("micro_speech")
self.micro_speech = Model("micro_speech")
self.labels = self.micro_speech.labels
# 16 samples/1ms
self.audio_buffer = np.zeros((1, _SAMPLES_PER_STEP * 3), dtype=np.int16)
self.spectrogram = np.zeros((1, _SLICE_COUNT * _SLICE_SIZE), dtype=np.int8)

View File

@ -9,19 +9,10 @@ import image
from ml.preprocessing import Normalization
class Model:
def __new__(cls, *args, **kwargs):
self = super().__new__(cls)
retobj = uml.Model(*args, **kwargs)
if isinstance(retobj, tuple):
labels, self.model = retobj
return labels, self
self.model = retobj
return self
def __str__(self):
return str(self.model)
class Model(uml.Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def predict(self, args, **kwargs):
args = [Normalization()(x) if isinstance(x, image.Image) else x for x in args]
return self.model.predict(args, **kwargs)
return super().predict(args, **kwargs)

View File

@ -75,7 +75,8 @@ static void py_ml_process_input(py_ml_model_obj_t *model, mp_obj_t arg) {
}
for (size_t i = 0; i < input_array->ndim; i++) {
if (input_array->shape[i] != mp_obj_get_int(input_shape->items[i])) {
size_t ulab_offset = ULAB_MAX_DIMS - input_array->ndim;
if (input_array->shape[ulab_offset + i] != mp_obj_get_int(input_shape->items[i])) {
mp_raise_msg(&mp_type_ValueError,
MP_ERROR_TEXT("Input shape does not match the model input shape"));
}
@ -225,6 +226,9 @@ static void py_ml_model_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
case MP_QSTR_output_zero_point:
dest[0] = mp_obj_new_int(self->output_zero_point);
break;
case MP_QSTR_labels:
dest[0] = self->labels;
break;
default:
// Continue lookup in locals_dict.
dest[1] = MP_OBJ_SENTINEL;
@ -251,7 +255,7 @@ mp_obj_t py_ml_model_make_new(const mp_obj_type_t *type, size_t n_args, size_t n
py_ml_model_obj_t *model = mp_obj_malloc_with_finaliser(py_ml_model_obj_t, &py_ml_model_type);
model->data = NULL;
model->fb_alloc = args[ARG_load_to_fb].u_int;
mp_obj_list_t *labels = NULL;
model->labels = mp_const_none;
for (const tflm_builtin_model_t *_model = &tflm_builtin_models[0]; _model->name != NULL; _model++) {
if (!strcmp(path, _model->name)) {
@ -259,8 +263,13 @@ mp_obj_t py_ml_model_make_new(const mp_obj_type_t *type, size_t n_args, size_t n
model->size = _model->size;
model->data = (unsigned char *) _model->data;
if (_model->n_labels == 0) {
break;
}
// Load model labels
labels = MP_OBJ_TO_PTR(mp_obj_new_list(_model->n_labels, NULL));
model->labels = mp_obj_new_list(_model->n_labels, NULL);
mp_obj_list_t *labels = MP_OBJ_TO_PTR(model->labels);
for (int l = 0; l < _model->n_labels; l++) {
const char *label = _model->labels[l];
labels->items[l] = mp_obj_new_str(label, strlen(label));
@ -300,11 +309,7 @@ mp_obj_t py_ml_model_make_new(const mp_obj_type_t *type, size_t n_args, size_t n
model->output_scale = 1.0;
}
if (labels == NULL) {
return MP_OBJ_FROM_PTR(model);
} else {
return mp_obj_new_tuple(2, (mp_obj_t []) {MP_OBJ_FROM_PTR(labels), MP_OBJ_FROM_PTR(model)});
}
return MP_OBJ_FROM_PTR(model);
}
static mp_obj_t py_ml_model_deinit(mp_obj_t self_in) {

View File

@ -27,6 +27,7 @@ typedef struct py_ml_model_obj {
float output_scale;
int output_zero_point;
char output_dtype;
mp_obj_t labels;
void *state; // Private context for the backend.
} py_ml_model_obj_t;