diff --git a/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_image_classification.py b/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_image_classification.py index 502a248ad..9c6248fdd 100644 --- a/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_image_classification.py +++ b/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_image_classification.py @@ -43,7 +43,7 @@ while True: # This combines the labels and confidence values into a list of tuples # and then sorts that list by the confidence values. sorted_list = sorted( - zip(labels, model.predict(img)[0]), key=lambda x: x[1], reverse=True + zip(labels, model.predict([img])[0]), key=lambda x: x[1], reverse=True ) for i in range(5): print("%s = %f" % (sorted_list[i][0], sorted_list[i][1])) diff --git a/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_object_detection.py b/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_object_detection.py index 114a61adf..6367bb59b 100644 --- a/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_object_detection.py +++ b/scripts/examples/03-Machine-Learning/00-TensorFlow/tf_object_detection.py @@ -45,11 +45,11 @@ colors = [ # Add more colors if you are detecting more than 7 types of classes # position in the output image back to the original input image. The function then returns a # list per class which each contain a list of (rect, score) tuples representing the detected # objects. -def fomo_post_process(model, output, rect): +def fomo_post_process(model, inputs, outputs): n, oh, ow, oc = model.output_shape[0] - nms = ml.NMS(ow, oh, rect) + nms = ml.NMS(ow, oh, inputs[0].roi) for i in range(oc): - img = image.Image(output[0], shape=(oh, ow, 1), strides=(i, oc), scale=(255, 0)) + img = image.Image(outputs[0], shape=(oh, ow, 1), strides=(i, oc), scale=(255, 0)) blobs = img.find_blobs( threshold_list, x_stride=1, area_threshold=1, pixels_threshold=1 ) @@ -69,7 +69,7 @@ while True: img = sensor.snapshot() - for i, detection_list in enumerate(model.predict(img, callback=fomo_post_process)): + for i, detection_list in enumerate(model.predict([img], callback=fomo_post_process)): if i == 0: continue # background class if len(detection_list) == 0: diff --git a/scripts/examples/50-Arduino-Boards/Giga-H7/52-Audio/micro_speech.py b/scripts/examples/50-Arduino-Boards/Giga-H7/52-Audio/micro_speech.py index 3e23057fa..513748310 100644 --- a/scripts/examples/50-Arduino-Boards/Giga-H7/52-Audio/micro_speech.py +++ b/scripts/examples/50-Arduino-Boards/Giga-H7/52-Audio/micro_speech.py @@ -5,8 +5,8 @@ # The MicroSpeech module is designed for real-time audio processing and speech recognition # on microcontroller platforms. It leverages pre-trained models for audio preprocessing and # speech recognition, specifically optimized for detecting keywords such as "Yes" and "No". -import ml import time +from ml.apps import MicroSpeech def callback(label, scores): @@ -17,7 +17,7 @@ def callback(label, scores): # micro speech module for audio preprocessing and speech recognition, respectively. The # user can override both by passing two models: # MicroSpeech(preprocessor=ml.Model(...), micro_speech=ml.Model(...), labels=["label",...]) -speech = ml.MicroSpeech() +speech = MicroSpeech() # Starts the audio streaming and processes incoming audio to recognize speech commands. # If a callback is passed, listen() will loop forever and call the callback when a keyword diff --git a/scripts/examples/50-Arduino-Boards/Nicla-Vision/52-Audio/micro_speech.py b/scripts/examples/50-Arduino-Boards/Nicla-Vision/52-Audio/micro_speech.py index 3e23057fa..513748310 100644 --- a/scripts/examples/50-Arduino-Boards/Nicla-Vision/52-Audio/micro_speech.py +++ b/scripts/examples/50-Arduino-Boards/Nicla-Vision/52-Audio/micro_speech.py @@ -5,8 +5,8 @@ # The MicroSpeech module is designed for real-time audio processing and speech recognition # on microcontroller platforms. It leverages pre-trained models for audio preprocessing and # speech recognition, specifically optimized for detecting keywords such as "Yes" and "No". -import ml import time +from ml.apps import MicroSpeech def callback(label, scores): @@ -17,7 +17,7 @@ def callback(label, scores): # micro speech module for audio preprocessing and speech recognition, respectively. The # user can override both by passing two models: # MicroSpeech(preprocessor=ml.Model(...), micro_speech=ml.Model(...), labels=["label",...]) -speech = ml.MicroSpeech() +speech = MicroSpeech() # Starts the audio streaming and processes incoming audio to recognize speech commands. # If a callback is passed, listen() will loop forever and call the callback when a keyword diff --git a/scripts/examples/50-Arduino-Boards/Portenta-H7/51-Audio/micro_speech.py b/scripts/examples/50-Arduino-Boards/Portenta-H7/51-Audio/micro_speech.py index 3e23057fa..513748310 100644 --- a/scripts/examples/50-Arduino-Boards/Portenta-H7/51-Audio/micro_speech.py +++ b/scripts/examples/50-Arduino-Boards/Portenta-H7/51-Audio/micro_speech.py @@ -5,8 +5,8 @@ # The MicroSpeech module is designed for real-time audio processing and speech recognition # on microcontroller platforms. It leverages pre-trained models for audio preprocessing and # speech recognition, specifically optimized for detecting keywords such as "Yes" and "No". -import ml import time +from ml.apps import MicroSpeech def callback(label, scores): @@ -17,7 +17,7 @@ def callback(label, scores): # micro speech module for audio preprocessing and speech recognition, respectively. The # user can override both by passing two models: # MicroSpeech(preprocessor=ml.Model(...), micro_speech=ml.Model(...), labels=["label",...]) -speech = ml.MicroSpeech() +speech = MicroSpeech() # Starts the audio streaming and processes incoming audio to recognize speech commands. # If a callback is passed, listen() will loop forever and call the callback when a keyword diff --git a/scripts/libraries/ml/manifest.py b/scripts/libraries/ml/manifest.py new file mode 100644 index 000000000..8cab052ea --- /dev/null +++ b/scripts/libraries/ml/manifest.py @@ -0,0 +1,5 @@ +metadata( + description="Machine Learning Extension Package.", + version="0.0.1", +) +package("ml") diff --git a/scripts/libraries/ml/ml/__init__.py b/scripts/libraries/ml/ml/__init__.py new file mode 100644 index 000000000..e7ccae809 --- /dev/null +++ b/scripts/libraries/ml/ml/__init__.py @@ -0,0 +1,11 @@ +# This file is part of the OpenMV project. +# +# Copyright (c) 2024 Ibrahim Abdelkader +# Copyright (c) 2024 Kwabena W. Agyeman +# +# This work is licensed under the MIT license, see the file LICENSE for details. +# +# This is an extension package to the ml C user-module. + +from uml import NMS # noqa +from .model import * # noqa diff --git a/scripts/libraries/ml.py b/scripts/libraries/ml/ml/apps.py similarity index 74% rename from scripts/libraries/ml.py rename to scripts/libraries/ml/ml/apps.py index 47e24723a..770f7198b 100644 --- a/scripts/libraries/ml.py +++ b/scripts/libraries/ml/ml/apps.py @@ -1,46 +1,21 @@ # This file is part of the OpenMV project. # -# Copyright (c) 2023 Ibrahim Abdelkader -# Copyright (c) 2023 Kwabena W. Agyeman +# Copyright (c) 2024 Ibrahim Abdelkader +# Copyright (c) 2024 Kwabena W. Agyeman # # This work is licensed under the MIT license, see the file LICENSE for details. -# -# This is an extension to the display C user-module. Add or import any display-related -# drivers here, and freeze this module in the board's manifest, and those drivers will -# be importable from display. import time -from uml import * # noqa +from ml import Model from micropython import const from ulab import numpy as np + try: import audio except (ImportError, AttributeError): pass -def draw_predictions(img, boxes, labels, colors, format="pascal_voc", text_color=(255, 255, 255)): - CHAR_W = 8 - CHAR_H = 10 - img_w = img.width() - img_h = img.height() - for i, (x, y, w, h) in enumerate(boxes): - label = labels[i] - box_color = colors[i] - - if format == "pascal_voc": - x = int(x * img_w) - y = int(y * img_h) - w = int(w * img_w) - x - h = int(h * img_h) - y - - img.draw_rectangle(x, y, w, h, color=box_color) - img.draw_rectangle( - x, y - CHAR_H, len(label) * CHAR_W, CHAR_H, fill=True, color=box_color - ) - img.draw_string(x, y - CHAR_H, label.upper(), text_color) - - class MicroSpeech: _SLICE_SIZE = const(40) _SLICE_COUNT = const(49) @@ -71,13 +46,11 @@ class MicroSpeech: # Roll the spectrogram to the left and add the new slice. self.spectrogram = np.roll(self.spectrogram, -_SLICE_SIZE, axis=1) - self.spectrogram[0, -_SLICE_SIZE:] = self.preprocessor.predict( - self.audio_buffer - ) + self.spectrogram[0, -_SLICE_SIZE:] = self.preprocessor.predict([self.audio_buffer]) # Roll the prediction history and add the new prediction. self.pred_history = np.roll(self.pred_history, -1, axis=0) - self.pred_history[-1] = self.micro_speech.predict(self.spectrogram)[0] + self.pred_history[-1] = self.micro_speech.predict([self.spectrogram])[0] def start_audio_streaming(self): if self.audio_started is False: diff --git a/scripts/libraries/ml/ml/model.py b/scripts/libraries/ml/ml/model.py new file mode 100644 index 000000000..b015d3295 --- /dev/null +++ b/scripts/libraries/ml/ml/model.py @@ -0,0 +1,26 @@ +# This file is part of the OpenMV project. +# +# Copyright (c) 2024 Ibrahim Abdelkader +# Copyright (c) 2024 Kwabena W. Agyeman +# +# This work is licensed under the MIT license, see the file LICENSE for details. +import uml +import image +from ml.preprocessing import Normalization + + +class Model: + def __new__(cls, *args, **kwargs): + self = super().__new__(cls) + retobj = uml.Model(*args, **kwargs) + if isinstance(retobj, tuple): + labels, self.model = retobj + return labels, self + return self + + def __str__(self): + return str(self.model) + + def predict(self, args, **kwargs): + args = [Normalization()(x) if isinstance(x, image.Image) else x for x in args] + return self.model.predict(args, **kwargs) diff --git a/scripts/libraries/ml/ml/preprocessing.py b/scripts/libraries/ml/ml/preprocessing.py new file mode 100644 index 000000000..7cd37fb3b --- /dev/null +++ b/scripts/libraries/ml/ml/preprocessing.py @@ -0,0 +1,41 @@ +# This file is part of the OpenMV project. +# +# Copyright (c) 2024 Ibrahim Abdelkader +# Copyright (c) 2024 Kwabena W. Agyeman +# +# This work is licensed under the MIT license, see the file LICENSE for details. + +import image + + +class Normalization: + def __init__( + self, + image=None, + scale=(0.0, 1.0), + mean=(0.0, 0.0, 0.0), + stdev=(1.0, 1.0, 1.0), + roi=None, + ): + self.image = image + self.scale = scale + self.mean = mean + self.stdev = stdev + self.roi = roi + + def __call__(self, *args): + if len(args) == 1: + img = args[0] + if not isinstance(img, image.Image): + raise ValueError("Expected an image input") + if self.roi is None: + self.roi = (0, 0, img.width(), img.height()) + return Normalization(img, self.scale, self.mean, self.stdev, self.roi) + buffer, shape, dtype = args + # Create an image using the input tensor as buffer. + img = image.Image(shape[2], shape[1], self.image.format(), buffer=buffer) + # Copy and scale (if needed) the input image to the input buffer. + hints = image.BILINEAR | image.CENTER | image.SCALE_ASPECT_EXPAND | image.BLACK_BACKGROUND + img.draw_image(self.image, 0, 0, roi=self.roi, hint=hints) + # Scale and convert the image to input tensor data. + img.unpack(buffer, dtype, scale=self.scale, mean=self.mean, stdev=self.stdev) diff --git a/scripts/libraries/ml/ml/utils.py b/scripts/libraries/ml/ml/utils.py new file mode 100644 index 000000000..370c1378a --- /dev/null +++ b/scripts/libraries/ml/ml/utils.py @@ -0,0 +1,39 @@ +# This file is part of the OpenMV project. +# +# Copyright (c) 2024 Ibrahim Abdelkader +# Copyright (c) 2024 Kwabena W. Agyeman +# +# This work is licensed under the MIT license, see the file LICENSE for details. + +def draw_predictions( + image, + boxes, + labels, + colors, + format="pascal_voc", + font_width=8, + font_height=10, + text_color=(255, 255, 255), +): + image_w = image.width() + image_h = image.height() + for i, (x, y, w, h) in enumerate(boxes): + label = labels[i] + box_color = colors[i] + + if format == "pascal_voc": + x = int(x * image_w) + y = int(y * image_h) + w = int(w * image_w) - x + h = int(h * image_h) - y + + image.draw_rectangle(x, y, w, h, color=box_color) + image.draw_rectangle( + x, + y - font_height, + len(label) * font_width, + font_height, + fill=True, + color=box_color, + ) + image.draw_string(x, y - font_height, label.upper(), text_color) diff --git a/src/lib/tflm/tflm_backend.cc b/src/lib/tflm/tflm_backend.cc index 879b14e57..eee034656 100644 --- a/src/lib/tflm/tflm_backend.cc +++ b/src/lib/tflm/tflm_backend.cc @@ -56,15 +56,15 @@ static bool ml_backend_valid_dataype(TfLiteType type) { type == kTfLiteFloat32); } -static py_ml_dtype_t ml_backend_map_dtype(TfLiteType type) { +static char ml_backend_map_dtype(TfLiteType type) { if (type == kTfLiteUInt8) { - return PY_ML_DTYPE_UINT8; + return 'B'; } else if (type == kTfLiteInt8) { - return PY_ML_DTYPE_INT8; + return 'b'; } else if (type == kTfLiteInt16) { - return PY_ML_DTYPE_INT16; + return 'h'; } else { - return PY_ML_DTYPE_FLOAT; + return 'f'; } } @@ -278,21 +278,14 @@ int ml_backend_init_model(py_ml_model_obj_t *model) { return 0; } -int ml_backend_run_inference(py_ml_model_obj_t *model, - ml_backend_input_callback_t input_callback, - void *input_arg, - ml_backend_output_callback_t output_callback, - void *output_arg) { +int ml_backend_run_inference(py_ml_model_obj_t *model) { RegisterDebugLogCallback(ml_backend_log_handler); ml_backend_state_t *state = (ml_backend_state_t *) model->state; - input_callback(model, input_arg); - if (state->interpreter->Invoke() != kTfLiteOk) { mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Invoke failed")); } - output_callback(model, output_arg); return 0; } diff --git a/src/omv/boards/ARDUINO_GIGA/manifest.py b/src/omv/boards/ARDUINO_GIGA/manifest.py index 83a4c1aec..528561800 100644 --- a/src/omv/boards/ARDUINO_GIGA/manifest.py +++ b/src/omv/boards/ARDUINO_GIGA/manifest.py @@ -10,7 +10,7 @@ freeze ("$(OMV_LIB_DIR)/", "gt911.py") freeze ("$(OMV_LIB_DIR)/", "st7701.py") freeze ("$(OMV_LIB_DIR)/", "machine.py") freeze ("$(OMV_LIB_DIR)/", "display.py") -freeze ("$(OMV_LIB_DIR)/", "ml.py") +freeze ("$(OMV_LIB_DIR)/ml") # Networking require("ssl") diff --git a/src/omv/boards/ARDUINO_NICLA_VISION/manifest.py b/src/omv/boards/ARDUINO_NICLA_VISION/manifest.py index 460ba0dd8..d7b084a0b 100644 --- a/src/omv/boards/ARDUINO_NICLA_VISION/manifest.py +++ b/src/omv/boards/ARDUINO_NICLA_VISION/manifest.py @@ -10,7 +10,7 @@ freeze ("$(OMV_LIB_DIR)/", "modbus.py") freeze ("$(OMV_LIB_DIR)/", "pid.py") freeze ("$(OMV_LIB_DIR)/", "vl53l1x.py") freeze ("$(OMV_LIB_DIR)/", "machine.py") -freeze ("$(OMV_LIB_DIR)/", "ml.py") +freeze ("$(OMV_LIB_DIR)/ml") # Networking require("ssl") diff --git a/src/omv/boards/ARDUINO_PORTENTA_H7/manifest.py b/src/omv/boards/ARDUINO_PORTENTA_H7/manifest.py index 3d8aba416..efb5c5162 100644 --- a/src/omv/boards/ARDUINO_PORTENTA_H7/manifest.py +++ b/src/omv/boards/ARDUINO_PORTENTA_H7/manifest.py @@ -14,7 +14,7 @@ freeze ("$(OMV_LIB_DIR)/", "vl53l1x.py") freeze ("$(OMV_LIB_DIR)/", "bno055.py") freeze ("$(OMV_LIB_DIR)/", "machine.py") freeze ("$(OMV_LIB_DIR)/", "display.py") -freeze ("$(OMV_LIB_DIR)/", "ml.py") +freeze ("$(OMV_LIB_DIR)/ml") # Networking require("ssl") diff --git a/src/omv/boards/OPENMV4/manifest.py b/src/omv/boards/OPENMV4/manifest.py index c6c9e4d9d..9dcf56ed8 100644 --- a/src/omv/boards/OPENMV4/manifest.py +++ b/src/omv/boards/OPENMV4/manifest.py @@ -13,7 +13,7 @@ freeze ("$(OMV_LIB_DIR)/", "tb6612.py") freeze ("$(OMV_LIB_DIR)/", "vl53l1x.py") freeze ("$(OMV_LIB_DIR)/", "machine.py") freeze ("$(OMV_LIB_DIR)/", "display.py") -freeze ("$(OMV_LIB_DIR)/", "ml.py") +freeze ("$(OMV_LIB_DIR)/ml") # Networking require("ssl") diff --git a/src/omv/boards/OPENMV4P/manifest.py b/src/omv/boards/OPENMV4P/manifest.py index c6c9e4d9d..9dcf56ed8 100644 --- a/src/omv/boards/OPENMV4P/manifest.py +++ b/src/omv/boards/OPENMV4P/manifest.py @@ -13,7 +13,7 @@ freeze ("$(OMV_LIB_DIR)/", "tb6612.py") freeze ("$(OMV_LIB_DIR)/", "vl53l1x.py") freeze ("$(OMV_LIB_DIR)/", "machine.py") freeze ("$(OMV_LIB_DIR)/", "display.py") -freeze ("$(OMV_LIB_DIR)/", "ml.py") +freeze ("$(OMV_LIB_DIR)/ml") # Networking require("ssl") diff --git a/src/omv/boards/OPENMV4_PRO/manifest.py b/src/omv/boards/OPENMV4_PRO/manifest.py index b9fdd2856..9dcf56ed8 100644 --- a/src/omv/boards/OPENMV4_PRO/manifest.py +++ b/src/omv/boards/OPENMV4_PRO/manifest.py @@ -13,6 +13,7 @@ freeze ("$(OMV_LIB_DIR)/", "tb6612.py") freeze ("$(OMV_LIB_DIR)/", "vl53l1x.py") freeze ("$(OMV_LIB_DIR)/", "machine.py") freeze ("$(OMV_LIB_DIR)/", "display.py") +freeze ("$(OMV_LIB_DIR)/ml") # Networking require("ssl") diff --git a/src/omv/boards/OPENMVPT/manifest.py b/src/omv/boards/OPENMVPT/manifest.py index c6c9e4d9d..9dcf56ed8 100644 --- a/src/omv/boards/OPENMVPT/manifest.py +++ b/src/omv/boards/OPENMVPT/manifest.py @@ -13,7 +13,7 @@ freeze ("$(OMV_LIB_DIR)/", "tb6612.py") freeze ("$(OMV_LIB_DIR)/", "vl53l1x.py") freeze ("$(OMV_LIB_DIR)/", "machine.py") freeze ("$(OMV_LIB_DIR)/", "display.py") -freeze ("$(OMV_LIB_DIR)/", "ml.py") +freeze ("$(OMV_LIB_DIR)/ml") # Networking require("ssl") diff --git a/src/omv/boards/OPENMV_RT1060/manifest.py b/src/omv/boards/OPENMV_RT1060/manifest.py index 78af2d849..0cb4ca59f 100644 --- a/src/omv/boards/OPENMV_RT1060/manifest.py +++ b/src/omv/boards/OPENMV_RT1060/manifest.py @@ -13,7 +13,7 @@ freeze ("$(OMV_LIB_DIR)/", "tb6612.py") freeze ("$(OMV_LIB_DIR)/", "vl53l1x.py") freeze ("$(OMV_LIB_DIR)/", "machine.py") freeze ("$(OMV_LIB_DIR)/", "display.py") -freeze ("$(OMV_LIB_DIR)/", "ml.py") +freeze ("$(OMV_LIB_DIR)/ml") # Networking require("ssl") diff --git a/src/omv/imlib/imlib.c b/src/omv/imlib/imlib.c index 1234356b1..d2f81de09 100644 --- a/src/omv/imlib/imlib.c +++ b/src/omv/imlib/imlib.c @@ -423,6 +423,95 @@ void imlib_fill_image_from_float(image_t *img, int w, int h, float *data, float } } +// Unpacks src into dst. dst must be an array of src->w*src->h*dtype*channels bytes, where channels is +// 1 for grayscale and 3 for RGB. +void imlib_unpack(void *dst, image_t *src, const char dtype, float *scale, float *mean, float *stdev) { + // src will be unpacked into dst in reverse order so that we can handle in-place unpacking. + int size = (src->w * src->h) - 1; // must be int per countdown loop + float fscale = 1.0f, fadd = 0.0f; + + if (scale[0] == 0.0f && scale[1] == 1.0f) { + fscale = 1.0f / 255.0f; + } else if (scale[0] == -1.0f && scale[1] == 1.0f) { + fscale = 2.0f / 255.0f; + fadd = -1.0f; + } else if (scale[0] == -128.0f && scale[1] == 127.0f) { + fadd = -128.0f; + } + + float fscale_r = fscale, fadd_r = fadd; + float fscale_g = fscale, fadd_g = fadd; + float fscale_b = fscale, fadd_b = fadd; + + // To normalize the input image we need to subtract the mean and divide by the standard deviation. + // We can do this by applying the normalization to fscale and fadd outside the loop. + // Red + fadd_r = (fadd_r - mean[0]) / stdev[0]; + fscale_r /= stdev[0]; + + // Green + fadd_g = (fadd_g - mean[1]) / stdev[1]; + fscale_g /= stdev[1]; + + // Blue + fadd_b = (fadd_b - mean[2]) / stdev[2]; + fscale_b /= stdev[2]; + + // Grayscale -> Y = 0.299R + 0.587G + 0.114B + float m = (mean[0] * 0.299f) + (mean[1] * 0.587f) + (mean[2] * 0.114f); + float s = (stdev[0] * 0.299f) + (stdev[1] * 0.587f) + (stdev[2] * 0.114f); + fadd = (fadd - m) / s; + fscale /= s; + + if (src->pixfmt == PIXFORMAT_GRAYSCALE) { + uint8_t *input_u8 = (uint8_t *) src->data; + if (dtype == 'f') { + // convert u8 -> f32 + float *output_f32 = (float *) dst; + for (; size >= 0; size -= 1) { + output_f32[size] = (input_u8[size] * fscale) + fadd; + } + } else { + // convert u8 -> s8 + #if (__ARM_ARCH > 6) + uint32_t *input_u32 = (uint32_t *) src->data; + uint32_t *output_u32 = (uint32_t *) dst; + for (; size >= 3; size -= 4) { + output_u32[size / 4] = input_u32[size / 4] ^ 0x80808080; + } + #endif + uint8_t *input_u8 = (uint8_t *) src->data; + uint8_t *output_u8 = (uint8_t *) dst; + for (; size >= 0; size -= 1) { + output_u8[size] = input_u8[size] ^ 128; + } + } + } else if (src->pixfmt == PIXFORMAT_RGB565) { + int rgb_size = size * 3; // must be int per countdown loop + if (dtype == 'f') { + uint16_t *input_u16 = (uint16_t *) src->data; + float *output_f32 = (float *) dst; + for (; size >= 0; size -= 1, rgb_size -= 3) { + int pixel = input_u16[size]; + output_f32[rgb_size + 0] = (COLOR_RGB565_TO_R8(pixel) * fscale_r) + fadd_r; + output_f32[rgb_size + 1] = (COLOR_RGB565_TO_G8(pixel) * fscale_g) + fadd_g; + output_f32[rgb_size + 2] = (COLOR_RGB565_TO_B8(pixel) * fscale_b) + fadd_b; + } + } else { + uint16_t *input_u16 = (uint16_t *) src->data; + uint8_t *output_u8 = (uint8_t *) dst; + for (; size >= 0; size -= 1, rgb_size -= 3) { + int pixel = input_u16[size]; + output_u8[rgb_size + 0] = COLOR_RGB565_TO_R8(pixel) ^ 128; + output_u8[rgb_size + 1] = COLOR_RGB565_TO_G8(pixel) ^ 128; + output_u8[rgb_size + 2] = COLOR_RGB565_TO_B8(pixel) ^ 128; + } + } + } else { + mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Expected input channels to be 1 or 3")); + } +} + int8_t imlib_rgb565_to_l(uint16_t pixel) { float r_lin = xyz_table[COLOR_RGB565_TO_R8(pixel)]; float g_lin = xyz_table[COLOR_RGB565_TO_G8(pixel)]; diff --git a/src/omv/imlib/imlib.h b/src/omv/imlib/imlib.h index 903ef322c..85754c812 100644 --- a/src/omv/imlib/imlib.h +++ b/src/omv/imlib/imlib.h @@ -1157,6 +1157,7 @@ void imlib_deinit_all(); // Generic Helper Functions void imlib_fill_image_from_float(image_t *img, int w, int h, float *data, float min, float max, bool mirror, bool flip, bool dst_transpose, bool src_transpose); +void imlib_unpack(void *dst, image_t *src, const char dtype, float *scale, float *mean, float *stdev); // Bayer Image Processing pixformat_t imlib_bayer_shift(pixformat_t pixfmt, int x, int y, bool transpose); diff --git a/src/omv/modules/py_image.c b/src/omv/modules/py_image.c index c33633f53..393d4718b 100644 --- a/src/omv/modules/py_image.c +++ b/src/omv/modules/py_image.c @@ -724,6 +724,85 @@ static mp_obj_t py_image_bytearray(mp_obj_t img_obj) { } static MP_DEFINE_CONST_FUN_OBJ_1(py_image_bytearray_obj, py_image_bytearray); +static mp_obj_t py_image_unpack(uint n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { + enum { ARG_buffer, ARG_dtype, ARG_scale, ARG_mean, ARG_stdev }; + static const mp_arg_t allowed_args[] = { + { MP_QSTR_buffer, MP_ARG_OBJ | MP_ARG_REQUIRED, {.u_rom_obj = MP_ROM_NONE} }, + { MP_QSTR_dtype, MP_ARG_OBJ | MP_ARG_REQUIRED, {.u_rom_obj = MP_ROM_NONE } }, + { MP_QSTR_scale, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE } }, + { MP_QSTR_mean, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE } }, + { MP_QSTR_stdev, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE } }, + }; + + image_t *image = py_helper_arg_to_image(pos_args[0], ARG_IMAGE_ANY); + mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; + mp_arg_parse_all(n_args - 1, pos_args + 1, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + + mp_buffer_info_t bufinfo = {0}; + mp_get_buffer_raise(args[ARG_buffer].u_obj, &bufinfo, MP_BUFFER_WRITE); + + int dtype_code; + int dtype_size; + + if (mp_obj_is_integer(args[ARG_dtype].u_obj)) { + dtype_code = mp_obj_get_int(args[ARG_dtype].u_obj); + } else { + // The first character is either 0 or the typecode. + dtype_code = mp_obj_str_get_str(args[ARG_dtype].u_obj)[0]; + } + + switch (dtype_code) { + case 'c': + case 'b': + case 'B': { + dtype_size = 1; + break; + } + case 'f': { + dtype_size = 4; + break; + } + default: { + mp_raise_ValueError(MP_ERROR_TEXT("Unsupported dtype")); + break; + } + } + + int channels; + switch (image->pixfmt) { + case PIXFORMAT_GRAYSCALE: { + channels = 1; + break; + } + case PIXFORMAT_RGB565: { + channels = 3; + break; + } + default: { + mp_raise_ValueError(MP_ERROR_TEXT("Unsupported pixformat")); + break; + } + } + + if ((image->w * image->h * dtype_size * channels) > bufinfo.len) { + mp_raise_ValueError(MP_ERROR_TEXT("Buffer size is too small")); + } + + // scale, offset + float scale[2] = {0.0f, 1.0f}; + py_helper_arg_to_float_array(args[ARG_scale].u_obj, scale, 2); + + float mean[3] = {0.0f, 0.0f, 0.0f}; + py_helper_arg_to_float_array(args[ARG_mean].u_obj, mean, 3); + + float stdev[3] = {1.0f, 1.0f, 1.0f}; + py_helper_arg_to_float_array(args[ARG_stdev].u_obj, stdev, 3); + + imlib_unpack(bufinfo.buf, image, dtype_code, scale, mean, stdev); + return pos_args[0]; +} +static MP_DEFINE_CONST_FUN_OBJ_KW(py_image_unpack_obj, 1, py_image_unpack); + static mp_obj_t py_image_get_pixel(uint n_args, const mp_obj_t *args, mp_map_t *kw_args) { image_t *arg_img = py_helper_arg_to_image(args[0], ARG_IMAGE_UNCOMPRESSED); @@ -6366,6 +6445,7 @@ static const mp_rom_map_elem_t locals_dict_table[] = { {MP_ROM_QSTR(MP_QSTR_format), MP_ROM_PTR(&py_image_format_obj)}, {MP_ROM_QSTR(MP_QSTR_size), MP_ROM_PTR(&py_image_size_obj)}, {MP_ROM_QSTR(MP_QSTR_bytearray), MP_ROM_PTR(&py_image_bytearray_obj)}, + {MP_ROM_QSTR(MP_QSTR_unpack), MP_ROM_PTR(&py_image_unpack_obj)}, {MP_ROM_QSTR(MP_QSTR_get_pixel), MP_ROM_PTR(&py_image_get_pixel_obj)}, {MP_ROM_QSTR(MP_QSTR_set_pixel), MP_ROM_PTR(&py_image_set_pixel_obj)}, {MP_ROM_QSTR(MP_QSTR_to_bitmap), MP_ROM_PTR(&py_image_to_bitmap_obj)}, diff --git a/src/omv/modules/py_ml.c b/src/omv/modules/py_ml.c index c0bf49c81..e14bc1eb5 100644 --- a/src/omv/modules/py_ml.c +++ b/src/omv/modules/py_ml.c @@ -25,32 +25,6 @@ #include "tflm_builtin_models.h" #include "ulab/code/ndarray.h" -#define PY_ML_GRAYSCALE_RANGE ((COLOR_GRAYSCALE_MAX) -(COLOR_GRAYSCALE_MIN)) -#define PY_ML_GRAYSCALE_MID (((PY_ML_GRAYSCALE_RANGE) +1) / 2) - -static const char *py_ml_map_dtype(py_ml_dtype_t dtype) { - if (dtype == PY_ML_DTYPE_UINT8) { - return "uint8"; - } else if (dtype == PY_ML_DTYPE_INT8) { - return "int8"; - } else if (dtype == PY_ML_DTYPE_INT16) { - return "int16"; - } else { - return "float"; - } -} - -// TF Input/Output callback functions. -typedef mp_obj_t py_ml_output_data_t; - -typedef struct _py_ml_input_callback_data { - void *data; - rectangle_t roi; - py_ml_scale_t scale; - float mean[3]; - float stdev[3]; -} py_ml_input_data_t; - static size_t py_ml_tuple_sum(mp_obj_tuple_t *o) { if (o->len < 1) { mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Unexpected tensor shape")); @@ -63,196 +37,87 @@ static size_t py_ml_tuple_sum(mp_obj_tuple_t *o) { return size; } -static void py_ml_tuple_hwc(mp_obj_tuple_t *o, size_t *h, size_t *w, size_t *c) { - if (o->len != 1 || ((mp_obj_tuple_t *) MP_OBJ_TO_PTR(o->items[0]))->len != 4) { - mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Unexpected tensor shape")); - } - o = MP_OBJ_TO_PTR(o->items[0]); - *h = mp_obj_get_int(o->items[1]); - *w = mp_obj_get_int(o->items[2]); - *c = mp_obj_get_int(o->items[3]); -} +static void py_ml_process_input(py_ml_model_obj_t *model, mp_obj_t arg) { + mp_obj_list_t *input_list = MP_OBJ_TO_PTR(arg); -static void py_ml_input_callback(py_ml_model_obj_t *model, void *arg) { - // TODO we assume that there's a single input. - void *model_input = ml_backend_get_input(model, 0); - py_ml_input_data_t *input_data = (py_ml_input_data_t *) arg; + for (size_t i = 0; i < model->inputs_size; i++) { + void *input_buffer = ml_backend_get_input(model, i); + size_t input_size = py_ml_tuple_sum(MP_OBJ_TO_PTR(model->input_shape->items[i])); + mp_obj_tuple_t *input_shape = MP_OBJ_TO_PTR(model->input_shape->items[i]); + mp_obj_t input_arg = input_list->items[i]; - // TODO we assume that the input shape is (1, h, w, c) - size_t input_height = 0, input_width = 0, input_channels = 0; - py_ml_tuple_hwc(model->input_shape, &input_height, &input_width, &input_channels); + if (mp_obj_is_callable(input_arg)) { + // Input is a callable. Call the object and pass the tensor buffer and dtype. + mp_obj_t fargs[3] = { + mp_obj_new_bytearray_by_ref(input_size, input_buffer), + MP_OBJ_FROM_PTR(input_shape), + mp_obj_new_int(model->input_dtype) + }; + mp_call_function_n_kw(input_arg, 3, 0, fargs); + } else if (MP_OBJ_IS_TYPE(input_arg, &ulab_ndarray_type)) { + // Input is an ndarry. The input is converted and copied to the tensor buffer. + ndarray_obj_t *input_array = MP_OBJ_TO_PTR(input_arg); - int shift = (model->input_dtype == PY_ML_DTYPE_INT8) ? PY_ML_GRAYSCALE_MID : 0; - float fscale = 1.0f, fadd = 0.0f; - - switch (input_data->scale) { - case PY_ML_SCALE_0_1: // convert 0->255 to 0->1 - fscale = 1.0f / 255.0f; - break; - case PY_ML_SCALE_S1_1: // convert 0->255 to -1->1 - fscale = 2.0f / 255.0f; - fadd = -1.0f; - break; - case PY_ML_SCALE_S128_127: // convert 0->255 to -128->127 - fadd = -128.0f; - break; - case PY_ML_SCALE_NONE: // convert 0->255 to 0->255 - default: - break; - } - - float fscale_r = fscale, fadd_r = fadd; - float fscale_g = fscale, fadd_g = fadd; - float fscale_b = fscale, fadd_b = fadd; - - // To normalize the input image we need to subtract the mean and divide by the standard deviation. - // We can do this by applying the normalization to fscale and fadd outside the loop. - // Red - fadd_r = (fadd_r - input_data->mean[0]) / input_data->stdev[0]; - fscale_r /= input_data->stdev[0]; - - // Green - fadd_g = (fadd_g - input_data->mean[1]) / input_data->stdev[1]; - fscale_g /= input_data->stdev[1]; - - // Blue - fadd_b = (fadd_b - input_data->mean[2]) / input_data->stdev[2]; - fscale_b /= input_data->stdev[2]; - - // Grayscale -> Y = 0.299R + 0.587G + 0.114B - float mean = (input_data->mean[0] * 0.299f) + (input_data->mean[1] * 0.587f) + (input_data->mean[2] * 0.114f); - float std = (input_data->stdev[0] * 0.299f) + (input_data->stdev[1] * 0.587f) + (input_data->stdev[2] * 0.114f); - fadd = (fadd - mean) / std; - fscale /= std; - - image_t dst_img; - dst_img.w = input_width; - dst_img.h = input_height; - dst_img.data = (uint8_t *) model_input; - - if (input_channels == 1) { - dst_img.pixfmt = PIXFORMAT_GRAYSCALE; - } else if (input_channels == 3) { - dst_img.pixfmt = PIXFORMAT_RGB565; - } else { - mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Expected model input channels to be 1 or 3!")); - } - - imlib_draw_image(&dst_img, input_data->data, 0, 0, 1.0f, 1.0f, &input_data->roi, - -1, 256, NULL, NULL, IMAGE_HINT_BILINEAR | IMAGE_HINT_CENTER | - IMAGE_HINT_SCALE_ASPECT_EXPAND | IMAGE_HINT_BLACK_BACKGROUND, NULL, NULL, NULL); - - int size = (input_width * input_height) - 1; // must be int per countdown loop - - if (input_channels == 1) { - // GRAYSCALE - if (model->input_dtype == PY_ML_DTYPE_FLOAT) { - // convert u8 -> f32 - uint8_t *model_input_u8 = (uint8_t *) model_input; - float *model_input_f32 = (float *) model_input; - for (; size >= 0; size -= 1) { - model_input_f32[size] = (model_input_u8[size] * fscale) + fadd; + if (input_array->ndim != input_shape->len) { + mp_raise_msg(&mp_type_ValueError, + MP_ERROR_TEXT("Input shape does not match the model input shape")); } - } else { - if (shift) { - // convert u8 -> s8 - uint8_t *model_input_8 = (uint8_t *) model_input; - #if (__ARM_ARCH > 6) - for (; size >= 3; size -= 4) { - *((uint32_t *) (model_input_8 + size - 3)) ^= 0x80808080; - } - #endif - for (; size >= 0; size -= 1) { - model_input_8[size] ^= PY_ML_GRAYSCALE_MID; + + for (size_t i = 0; i < input_array->ndim; i++) { + if (input_array->shape[i] != mp_obj_get_int(input_shape->items[i])) { + mp_raise_msg(&mp_type_ValueError, + MP_ERROR_TEXT("Input shape does not match the model input shape")); } } - } - } else if (input_channels == 3) { - // RGB888 - int rgb_size = size * 3; // must be int per countdown loop - if (model->input_dtype == PY_ML_DTYPE_FLOAT) { - uint16_t *model_input_u16 = (uint16_t *) model_input; - float *model_input_f32 = (float *) model_input; - for (; size >= 0; size -= 1, rgb_size -= 3) { - int pixel = model_input_u16[size]; - model_input_f32[rgb_size] = (COLOR_RGB565_TO_R8(pixel) * fscale_r) + fadd_r; - model_input_f32[rgb_size + 1] = (COLOR_RGB565_TO_G8(pixel) * fscale_g) + fadd_g; - model_input_f32[rgb_size + 2] = (COLOR_RGB565_TO_B8(pixel) * fscale_b) + fadd_b; + + if (model->input_dtype == 'f') { + float *model_input_float = (float *) input_buffer; + for (size_t i = 0; i < input_array->len; i++) { + float value = ndarray_get_float_index(input_array->array, input_array->dtype, i); + model_input_float[i] = value; + } + } else if (model->input_dtype == 'b') { + int8_t *model_input_8 = (int8_t *) input_buffer; + for (size_t i = 0; i < input_array->len; i++) { + float value = ndarray_get_float_index(input_array->array, input_array->dtype, i); + model_input_8[i] = (int8_t) ((value / model->input_scale) + model->input_zero_point); + } + } else if (model->input_dtype == 'B') { + uint8_t *model_input_8 = (uint8_t *) input_buffer; + for (size_t i = 0; i < input_array->len; i++) { + float value = ndarray_get_float_index(input_array->array, input_array->dtype, i); + model_input_8[i] = (uint8_t) ((value / model->input_scale) + model->input_zero_point); + } + } else { + int16_t *model_input_16 = (int16_t *) input_buffer; + for (size_t i = 0; i < input_array->len; i++) { + float value = ndarray_get_float_index(input_array->array, input_array->dtype, i); + model_input_16[i] = (int16_t) ((value / model->input_scale) + model->input_zero_point); + } } } else { - uint16_t *model_input_u16 = (uint16_t *) model_input; - uint8_t *model_input_8 = (uint8_t *) model_input; - for (; size >= 0; size -= 1, rgb_size -= 3) { - int pixel = model_input_u16[size]; - model_input_8[rgb_size] = COLOR_RGB565_TO_R8(pixel) ^ shift; - model_input_8[rgb_size + 1] = COLOR_RGB565_TO_G8(pixel) ^ shift; - model_input_8[rgb_size + 2] = COLOR_RGB565_TO_B8(pixel) ^ shift; - } + mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Unsupported input type")); } } } -static void py_ml_input_callback_regression(py_ml_model_obj_t *model, void *arg) { - // TODO we assume that there's a single input. - void *model_input = ml_backend_get_input(model, 0); - py_ml_input_data_t *input_data = (py_ml_input_data_t *) arg; - - mp_obj_tuple_t *input_shape = MP_OBJ_TO_PTR(model->input_shape->items[0]); - ndarray_obj_t *input_array = MP_OBJ_TO_PTR(*((mp_obj_t *) input_data->data)); - - if (input_array->ndim != input_shape->len) { - mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Input shape does not match the model input shape")); - } - for (size_t i = 0; i < input_array->ndim; i++) { - if (input_array->shape[i] != mp_obj_get_int(input_shape->items[i])) { - mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Input shape does not match the model input shape")); - } - } - - if (model->input_dtype == PY_ML_DTYPE_FLOAT) { - float *model_input_float = (float *) model_input; - for (size_t i = 0; i < input_array->len; i++) { - float value = ndarray_get_float_index(input_array->array, input_array->dtype, i); - model_input_float[i] = value; - } - } else if (model->input_dtype == PY_ML_DTYPE_INT8) { - int8_t *model_input_8 = (int8_t *) model_input; - for (size_t i = 0; i < input_array->len; i++) { - float value = ndarray_get_float_index(input_array->array, input_array->dtype, i); - model_input_8[i] = (int8_t) ((value / model->input_scale) + model->input_zero_point); - } - } else if (model->input_dtype == PY_ML_DTYPE_UINT8) { - uint8_t *model_input_8 = (uint8_t *) model_input; - for (size_t i = 0; i < input_array->len; i++) { - float value = ndarray_get_float_index(input_array->array, input_array->dtype, i); - model_input_8[i] = (uint8_t) ((value / model->input_scale) + model->input_zero_point); - } - } else { - int16_t *model_input_16 = (int16_t *) model_input; - for (size_t i = 0; i < input_array->len; i++) { - float value = ndarray_get_float_index(input_array->array, input_array->dtype, i); - model_input_16[i] = (int16_t) ((value / model->input_scale) + model->input_zero_point); - } - } -} - -static void py_ml_output_callback(py_ml_model_obj_t *model, void *arg) { +static mp_obj_t py_ml_process_output(py_ml_model_obj_t *model) { mp_obj_list_t *output_list = MP_OBJ_TO_PTR(mp_obj_new_list(model->outputs_size, NULL)); for (size_t i = 0; i < model->outputs_size; i++) { void *model_output = ml_backend_get_output(model, i); size_t size = py_ml_tuple_sum(MP_OBJ_TO_PTR(model->output_shape->items[i])); mp_obj_tuple_t *output = MP_OBJ_TO_PTR(mp_obj_new_tuple(size, NULL)); - if (model->output_dtype == PY_ML_DTYPE_FLOAT) { + if (model->output_dtype == 'f') { for (size_t j = 0; j < size; j++) { output->items[j] = mp_obj_new_float(((float *) model_output)[j]); } - } else if (model->output_dtype == PY_ML_DTYPE_INT8) { + } else if (model->output_dtype == 'b') { for (size_t j = 0; j < size; j++) { float v = (((int8_t *) model_output)[j] - model->output_zero_point); output->items[j] = mp_obj_new_float(v * model->output_scale); } - } else if (model->output_dtype == PY_ML_DTYPE_UINT8) { + } else if (model->output_dtype == 'B') { for (size_t j = 0; j < size; j++) { float v = (((uint8_t *) model_output)[j] - model->output_zero_point); output->items[j] = mp_obj_new_float(v * model->output_scale); @@ -265,7 +130,7 @@ static void py_ml_output_callback(py_ml_model_obj_t *model, void *arg) { } output_list->items[i] = MP_OBJ_FROM_PTR(output); } - *((py_ml_output_data_t *) arg) = MP_OBJ_FROM_PTR(output_list); + return MP_OBJ_FROM_PTR(output_list); } // TF Model Object. @@ -274,21 +139,18 @@ static const mp_obj_type_t py_ml_model_type; static void py_ml_model_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) { py_ml_model_obj_t *self = MP_OBJ_TO_PTR(self_in); mp_printf(print, - "{size: %d, ram: %d, inputs_size: %d, input_dtype: %s, input_scale: %f, input_zero_point: %d, " - "outputs_size: %d output_dtype: %s, output_scale: %f, output_zero_point: %d}", - self->size, self->memory_size, self->inputs_size, py_ml_map_dtype(self->input_dtype), - (double) self->input_scale, self->input_zero_point, self->outputs_size, py_ml_map_dtype(self->output_dtype), + "{size: \"%d\", ram: \"%d\"," + " inputs_size: \"%d\", input_dtype: \"%c\", input_scale: \"%f\", input_zero_point: \"%d\"," + " outputs_size: \"%d\" output_dtype: \"%c\", output_scale: \"%f\", output_zero_point: \"%d\"}", + self->size, self->memory_size, self->inputs_size, self->input_dtype, + (double) self->input_scale, self->input_zero_point, self->outputs_size, self->output_dtype, (double) self->output_scale, self->output_zero_point); } static mp_obj_t py_ml_model_predict(uint n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { - enum { ARG_roi, ARG_callback, ARG_scale, ARG_mean, ARG_stdev }; + enum { ARG_callback }; static const mp_arg_t allowed_args[] = { - { MP_QSTR_roi, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE} }, { MP_QSTR_callback, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE} }, - { MP_QSTR_scale, MP_ARG_INT | MP_ARG_KW_ONLY, {.u_int = PY_ML_SCALE_0_1} }, - { MP_QSTR_mean, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE} }, - { MP_QSTR_stdev, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE} }, }; // Parse args. @@ -297,50 +159,28 @@ static mp_obj_t py_ml_model_predict(uint n_args, const mp_obj_t *pos_args, mp_ma py_ml_model_obj_t *model = MP_OBJ_TO_PTR(pos_args[0]); - py_ml_input_data_t input_data = { - .scale = args[ARG_scale].u_int, - .mean = {0.0f, 0.0f, 0.0f}, - .stdev = {1.0f, 1.0f, 1.0f} - }; - ml_backend_input_callback_t input_callback = py_ml_input_callback; - - py_ml_output_data_t output_data; - ml_backend_output_callback_t output_callback = py_ml_output_callback; - - if (MP_OBJ_IS_TYPE(pos_args[1], &ulab_ndarray_type)) { - input_data.data = (void *) &pos_args[1]; - input_callback = py_ml_input_callback_regression; - } else if (MP_OBJ_IS_TYPE(pos_args[1], &py_image_type)) { - input_data.data = py_helper_arg_to_image(pos_args[1], ARG_IMAGE_ANY); - input_data.roi = py_helper_arg_to_roi(args[ARG_roi].u_obj, input_data.data); - py_helper_arg_to_float_array(args[ARG_mean].u_obj, input_data.mean, 3); - py_helper_arg_to_float_array(args[ARG_stdev].u_obj, input_data.stdev, 3); - } else { - mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Unsupported input type")); + if (!MP_OBJ_IS_TYPE(pos_args[1], &mp_type_list)) { + mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Unsupported input type. Expected a list")); } - ml_backend_run_inference(model, input_callback, &input_data, output_callback, &output_data); + py_ml_process_input(model, pos_args[1]); + ml_backend_run_inference(model); + + mp_obj_t output = py_ml_process_output(model); if (args[ARG_callback].u_obj != mp_const_none) { - mp_obj_t rect = mp_obj_new_tuple(4, (mp_obj_t []) { mp_obj_new_int(input_data.roi.x), - mp_obj_new_int(input_data.roi.y), - mp_obj_new_int(input_data.roi.w), - mp_obj_new_int(input_data.roi.h) }); - mp_obj_t fun_args[3] = { MP_OBJ_FROM_PTR(model), output_data, rect }; - if (!MP_OBJ_IS_TYPE(pos_args[1], &py_image_type)) { - output_data = mp_call_function_n_kw(args[ARG_callback].u_obj, 2, 0, fun_args); - } else { - output_data = mp_call_function_n_kw(args[ARG_callback].u_obj, 3, 0, fun_args); - } + // Pass model, inputs, outputs to the post-processing callback. + mp_obj_t fargs[3] = { MP_OBJ_FROM_PTR(model), pos_args[1], output }; + output = mp_call_function_n_kw(args[ARG_callback].u_obj, 3, 0, fargs); } - return output_data; + return output; } static MP_DEFINE_CONST_FUN_OBJ_KW(py_ml_model_predict_obj, 2, py_ml_model_predict); static void py_ml_model_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) { py_ml_model_obj_t *self = MP_OBJ_TO_PTR(self_in); - const char *str; + if (dest[0] == MP_OBJ_NULL) { // Load attribute. switch (attr) { @@ -354,8 +194,7 @@ static void py_ml_model_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) { dest[0] = MP_OBJ_FROM_PTR(self->input_shape); break; case MP_QSTR_input_dtype: - str = py_ml_map_dtype(self->input_dtype); - dest[0] = mp_obj_new_str(str, strlen(str)); + dest[0] = mp_obj_new_str(&self->input_dtype, 1); break; case MP_QSTR_input_scale: dest[0] = mp_obj_new_float(self->input_scale); @@ -367,8 +206,7 @@ static void py_ml_model_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) { dest[0] = MP_OBJ_FROM_PTR(self->output_shape); break; case MP_QSTR_output_dtype: - str = py_ml_map_dtype(self->output_dtype); - dest[0] = mp_obj_new_str(str, strlen(str)); + dest[0] = mp_obj_new_str(&self->output_dtype, 1); break; case MP_QSTR_output_scale: dest[0] = mp_obj_new_float(self->output_scale); @@ -490,10 +328,6 @@ static const mp_rom_map_elem_t py_ml_globals_dict_table[] = { { MP_ROM_QSTR(MP_QSTR___name__), MP_OBJ_NEW_QSTR(MP_QSTR_ml) }, { MP_ROM_QSTR(MP_QSTR_Model), MP_ROM_PTR(&py_ml_model_type) }, { MP_ROM_QSTR(MP_QSTR_NMS), MP_ROM_PTR(&py_ml_nms_type) }, - { MP_ROM_QSTR(MP_QSTR_SCALE_NONE), MP_ROM_INT(PY_ML_SCALE_NONE) }, - { MP_ROM_QSTR(MP_QSTR_SCALE_0_1), MP_ROM_INT(PY_ML_SCALE_0_1) }, - { MP_ROM_QSTR(MP_QSTR_SCALE_S1_1), MP_ROM_INT(PY_ML_SCALE_S1_1) }, - { MP_ROM_QSTR(MP_QSTR_SCALE_S128_127), MP_ROM_INT(PY_ML_SCALE_S128_127) }, }; static MP_DEFINE_CONST_DICT(py_ml_globals_dict, py_ml_globals_dict_table); diff --git a/src/omv/modules/py_ml.h b/src/omv/modules/py_ml.h index 941cfc2f9..959b9760c 100644 --- a/src/omv/modules/py_ml.h +++ b/src/omv/modules/py_ml.h @@ -10,20 +10,6 @@ */ #ifndef __PY_ML_H__ #define __PY_ML_H__ -typedef enum { - PY_ML_SCALE_NONE, - PY_ML_SCALE_0_1, - PY_ML_SCALE_S1_1, - PY_ML_SCALE_S128_127 -} py_ml_scale_t; - -typedef enum py_ml_dtype { - PY_ML_DTYPE_INT8, - PY_ML_DTYPE_UINT8, - PY_ML_DTYPE_INT16, - PY_ML_DTYPE_FLOAT -} py_ml_dtype_t; - // TF Model Object. typedef struct py_ml_model_obj { mp_obj_base_t base; @@ -35,34 +21,24 @@ typedef struct py_ml_model_obj { mp_obj_tuple_t *input_shape; float input_scale; int input_zero_point; - py_ml_dtype_t input_dtype; + char input_dtype; size_t outputs_size; mp_obj_tuple_t *output_shape; float output_scale; int output_zero_point; - py_ml_dtype_t output_dtype; + char output_dtype; void *state; // Private context for the backend. } py_ml_model_obj_t; // Initialize a model. int ml_backend_init_model(py_ml_model_obj_t *model); -// Callback to populate the model input data. -typedef void (*ml_backend_input_callback_t) (py_ml_model_obj_t *model, void *arg); - -// Callback to get the model output data. -typedef void (*ml_backend_output_callback_t) (py_ml_model_obj_t *model, void *arg); +// Run inference. +int ml_backend_run_inference(py_ml_model_obj_t *model); // Return an input tensor by index. void *ml_backend_get_input(py_ml_model_obj_t *model, size_t index); // Return an output tensor by index. void *ml_backend_get_output(py_ml_model_obj_t *model, size_t index); - -// Run inference. -int ml_backend_run_inference(py_ml_model_obj_t *model, - ml_backend_input_callback_t input_callback, // Callback to populate the model input data. - void *input_data, // User data structure passed to input callback. - ml_backend_output_callback_t output_callback, // Callback to use the model output data. - void *output_data); // User data structure passed to output callback. #endif // __PY_ML_H__