modules/py_tf: Refactor TensorFlow module.

This patch decouples the MicroPython TF module from the TensorFlow library,
allowing support for more DL/ML libraries and engines in the future.

The ML backend has been completely redesigned; the model object can now be
passed directly to the backend, allowing it to initialize the model internally.
Additionally, the backend's state/memory is now persistent (surviving across
invocations), which improves inference speed by around 20% and supports models
that require persistent memory, such as LSTM.

Finally, the ML module has been mostly rewritten to handle model input/output
shapes and data properly, and to support models with multiple outputs
This commit is contained in:
iabdalkader 2024-06-26 21:24:46 +02:00
parent 13c99c6332
commit c7228cbb48
8 changed files with 985 additions and 819 deletions

View File

@ -9,10 +9,41 @@
override CFLAGS := $(CFLAGS) -Wno-unused-variable override CFLAGS := $(CFLAGS) -Wno-unused-variable
GENERATED := $(BUILD)/tflm_builtin_models.h $(BUILD)/tflm_builtin_models.c GENERATED := $(BUILD)/tflm_builtin_models.h $(BUILD)/tflm_builtin_models.c
OBJS = $(BUILD)/tflm_builtin_models.o HDR_OBJS = $(BUILD)/tflm_builtin_models.o
OBJ_DIRS = $(sort $(dir $(OBJS))) LIB_OBJS = $(BUILD)/tflm_backend.o
OBJ_DIRS = $(sort $(dir $(HDR_OBJS)))
# Extra module flags.
CXXFLAGS += $(filter-out -std=gnu99,$(CFLAGS)) \
-std=c++11 \
-fno-rtti \
-fno-exceptions \
-fno-use-cxa-atexit \
-nodefaultlibs \
-fno-unwind-tables \
-fpermissive \
-fmessage-length=0 \
-fno-threadsafe-statics \
-Wno-double-promotion \
-Wno-float-conversion \
CXXFLAGS += -DTF_LITE_STATIC_MEMORY \
-DTF_LITE_DISABLE_X86_NEON \
-DKERNELS_OPTIMIZED_FOR_SPEED \
-DTF_LITE_STRIP_ERROR_STRINGS \
-I$(TOP_DIR)/lib/tflm/libtflm/include/ \
-I$(TOP_DIR)/lib/tflm/libtflm/include/third_party/ \
-I$(TOP_DIR)/lib/tflm/libtflm/include/third_party/gemmlowp/ \
-I$(TOP_DIR)/lib/tflm/libtflm/include/third_party/flatbuffers/include/
# Add CubeAI module if enabled.
ifeq ($(MICROPY_PY_CUBEAI), 1)
SRC_USERMOD += $(OMV_MOD_DIR)/../../stm32cubeai/py_st_nn.c
endif
all: | headers $(LIB_OBJS)
headers: | $(OBJ_DIRS) $(HDR_OBJS)
all: | $(OBJ_DIRS) $(OBJS)
$(OBJ_DIRS): $(OBJ_DIRS):
$(MKDIR) -p $@ $(MKDIR) -p $@
@ -21,14 +52,19 @@ $(GENERATED): $(wildcard models/*)
$(PYTHON) $(TOOLS)/$(TFLITE2C) --input models > $(BUILD)/tflm_builtin_models.c $(PYTHON) $(TOOLS)/$(TFLITE2C) --input models > $(BUILD)/tflm_builtin_models.c
$(PYTHON) $(TOOLS)/$(TFLITE2C) --input models --header > $(BUILD)/tflm_builtin_models.h $(PYTHON) $(TOOLS)/$(TFLITE2C) --input models --header > $(BUILD)/tflm_builtin_models.h
$(OBJS): $(GENERATED) $(HDR_OBJS): $(GENERATED)
$(BUILD)/%.o : %.c $(BUILD)/%.o : %.c
$(ECHO) "CC $<" $(ECHO) "CC $<"
$(CC) $(CFLAGS) -c -o $@ $< $(CC) $(CFLAGS) -c -o $@ $<
$(BUILD)/%.o : %.cc
$(ECHO) "CXX $<"
$(CC) $(CXXFLAGS) -c -o $@ $<
$(BUILD)/%.o : %.s $(BUILD)/%.o : %.s
$(ECHO) "AS $<" $(ECHO) "AS $<"
$(AS) $(AFLAGS) $< -o $@ $(AS) $(AFLAGS) $< -o $@
-include $(OBJS:%.o=%.d) -include $(HDR_OBJS:%.o=%.d)
-include $(LIB_OBJS:%.o=%.d)

View File

@ -0,0 +1,324 @@
/*
* This file is part of the OpenMV project.
*
* Copyright (c) 2024 Ibrahim Abdelkader <iabdalkader@openmv.io>
* Copyright (c) 2024 Kwabena W. Agyeman <kwagyeman@openmv.io>
*
* This work is licensed under the MIT license, see the file LICENSE for details.
*
* TensorFlow Lite Micro ML backend.
*/
#include <string.h>
#include <stdint.h>
#include "imlib_config.h"
#ifdef IMLIB_ENABLE_TFLM
#include "tensorflow/lite/micro/micro_op_resolver.h"
#include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
#include "tensorflow/lite/micro/cortex_m_generic/debug_log_callback.h"
#include "tensorflow/lite/micro/micro_interpreter.h"
#include "tensorflow/lite/schema/schema_generated.h"
extern "C" {
#include "py/runtime.h"
#include "py/obj.h"
#include "py/objlist.h"
#include "py/objtuple.h"
#include "py/binary.h"
#include "py_ml.h"
#include "fb_alloc.h"
using namespace tflite;
#define TF_ARENA_ALIGNMENT (16 - 1)
typedef MicroMutableOpResolver<113> MicroOpsResolver;
typedef struct ml_backend_state {
void *arena;
const Model *model;
MicroOpsResolver *resolver;
MicroInterpreter *interpreter;
} ml_backend_state_t;
void abort(void) {
while (1);
}
void ml_backend_log_handler(const char *s) {
if (strcmp(s, "\r\n")) {
mp_printf(MP_PYTHON_PRINTER, "tflm_backend: %s\n", s);
}
}
static bool ml_backend_valid_dataype(TfLiteType type) {
return (type == kTfLiteUInt8 ||
type == kTfLiteInt8 ||
type == kTfLiteInt16 ||
type == kTfLiteFloat32);
}
static py_ml_dtype_t ml_backend_map_dtype(TfLiteType type) {
if (type == kTfLiteUInt8) {
return PY_ML_DTYPE_UINT8;
} else if (type == kTfLiteInt8) {
return PY_ML_DTYPE_INT8;
} else if (type == kTfLiteInt16) {
return PY_ML_DTYPE_INT16;
} else {
return PY_ML_DTYPE_FLOAT;
}
}
static void ml_backend_init_ops_resolver(MicroOpsResolver *resolver) {
resolver->AddAbs();
resolver->AddAdd();
resolver->AddAddN();
resolver->AddArgMax();
resolver->AddArgMin();
resolver->AddAssignVariable();
resolver->AddAveragePool2D();
resolver->AddBatchMatMul();
resolver->AddBatchToSpaceNd();
resolver->AddBroadcastArgs();
resolver->AddBroadcastTo();
resolver->AddCallOnce();
resolver->AddCast();
resolver->AddCeil();
resolver->AddCircularBuffer();
resolver->AddConcatenation();
resolver->AddConv2D();
resolver->AddCos();
resolver->AddCumSum();
resolver->AddDelay();
resolver->AddDepthToSpace();
resolver->AddDepthwiseConv2D();
resolver->AddDequantize();
//resolver->AddDetectionPostprocess();
resolver->AddDiv();
resolver->AddElu();
resolver->AddEmbeddingLookup();
resolver->AddEnergy();
resolver->AddEqual();
#ifdef ETHOS_U
resolver->AddEthosU();
#endif
resolver->AddExp();
resolver->AddExpandDims();
resolver->AddFftAutoScale();
resolver->AddFill();
resolver->AddFilterBank();
resolver->AddFilterBankLog();
resolver->AddFilterBankSpectralSubtraction();
resolver->AddFilterBankSquareRoot();
resolver->AddFloor();
resolver->AddFloorDiv();
resolver->AddFloorMod();
resolver->AddFramer();
resolver->AddFullyConnected();
resolver->AddGather();
resolver->AddGatherNd();
resolver->AddGreater();
resolver->AddGreaterEqual();
resolver->AddHardSwish();
resolver->AddIf();
resolver->AddIrfft();
resolver->AddL2Normalization();
resolver->AddL2Pool2D();
resolver->AddLeakyRelu();
resolver->AddLess();
resolver->AddLessEqual();
resolver->AddLog();
resolver->AddLogSoftmax();
resolver->AddLogicalAnd();
resolver->AddLogicalNot();
resolver->AddLogicalOr();
resolver->AddLogistic();
resolver->AddMaxPool2D();
resolver->AddMaximum();
resolver->AddMean();
resolver->AddMinimum();
resolver->AddMirrorPad();
resolver->AddMul();
resolver->AddNeg();
resolver->AddNotEqual();
resolver->AddOverlapAdd();
resolver->AddPCAN();
resolver->AddPack();
resolver->AddPad();
resolver->AddPadV2();
resolver->AddPrelu();
resolver->AddQuantize();
resolver->AddReadVariable();
resolver->AddReduceMax();
resolver->AddRelu();
resolver->AddRelu6();
resolver->AddReshape();
resolver->AddResizeBilinear();
resolver->AddResizeNearestNeighbor();
resolver->AddRfft();
resolver->AddRound();
resolver->AddRsqrt();
resolver->AddSelectV2();
resolver->AddShape();
resolver->AddSin();
resolver->AddSlice();
resolver->AddSoftmax();
resolver->AddSpaceToBatchNd();
resolver->AddSpaceToDepth();
resolver->AddSplit();
resolver->AddSplitV();
resolver->AddSqrt();
resolver->AddSquare();
resolver->AddSquaredDifference();
resolver->AddSqueeze();
resolver->AddStacker();
resolver->AddStridedSlice();
resolver->AddSub();
resolver->AddSum();
resolver->AddSvdf();
resolver->AddTanh();
resolver->AddTranspose();
resolver->AddTransposeConv();
resolver->AddUnidirectionalSequenceLSTM();
resolver->AddUnpack();
resolver->AddVarHandle();
resolver->AddWhile();
resolver->AddWindow();
resolver->AddZerosLike();
}
int ml_backend_init_model(py_ml_model_obj_t *model) {
RegisterDebugLogCallback(ml_backend_log_handler);
// Parse model's data.
const Model *tflite_model = GetModel(model->data);
if (tflite_model->version() != TFLITE_SCHEMA_VERSION) {
mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Unsupported model schema"));
}
// Initialize the op resolver.
MicroOpsResolver resolver;
ml_backend_init_ops_resolver(&resolver);
// Allocate the interpreter and tensors once to initialize the model, check input
// and output data types and to get the optimal tensor arena size.
fb_alloc_mark();
uint32_t tensor_arena_size;
uint8_t *tensor_arena = (uint8_t *) fb_alloc_all(&tensor_arena_size, FB_ALLOC_PREFER_SIZE | FB_ALLOC_CACHE_ALIGN);
MicroInterpreter interpreter(tflite_model,
resolver,
tensor_arena,
tensor_arena_size);
if (interpreter.AllocateTensors() != kTfLiteOk) {
mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Failed to allocate tensors"));
}
// Check input data type.
TfLiteTensor *input = interpreter.input(0);
if (!ml_backend_valid_dataype(input->type)) {
mp_raise_msg_varg(&mp_type_ValueError, MP_ERROR_TEXT("Unsupported input data type %d"), input->type);
}
// Check output data type.
TfLiteTensor *output = interpreter.output(0);
if (!ml_backend_valid_dataype(output->type)) {
mp_raise_msg_varg(&mp_type_ValueError, MP_ERROR_TEXT("Unsupported output data type %d"), output->type);
}
model->input_shape = (mp_obj_tuple_t *) MP_OBJ_TO_PTR(mp_obj_new_tuple(interpreter.inputs_size(), NULL));
for (size_t i=0; i<interpreter.inputs_size(); i++) {
TfLiteTensor *input = interpreter.input(i);
mp_obj_tuple_t *o = (mp_obj_tuple_t *) MP_OBJ_TO_PTR(mp_obj_new_tuple(input->dims->size, NULL));
for (int j=0; j<input->dims->size; j++) {
o->items[j] = mp_obj_new_int(input->dims->data[j]);
}
model->input_shape->items[i] = MP_OBJ_FROM_PTR(o);
}
model->inputs_size = interpreter.inputs_size();
model->input_dtype = ml_backend_map_dtype(input->type);
model->input_scale = input->params.scale;
model->input_zero_point = input->params.zero_point;
model->output_shape = (mp_obj_tuple_t *) MP_OBJ_TO_PTR(mp_obj_new_tuple(interpreter.outputs_size(), NULL));
for (size_t i=0; i<interpreter.outputs_size(); i++) {
TfLiteTensor *output = interpreter.output(i);
mp_obj_tuple_t *o = (mp_obj_tuple_t *) MP_OBJ_TO_PTR(mp_obj_new_tuple(output->dims->size, NULL));
for (int j=0; j<output->dims->size; j++) {
o->items[j] = mp_obj_new_int(output->dims->data[j]);
}
model->output_shape->items[i] = MP_OBJ_FROM_PTR(o);
}
model->outputs_size = interpreter.outputs_size();
model->output_dtype = ml_backend_map_dtype(output->type);
model->output_scale = output->params.scale;
model->output_zero_point = output->params.zero_point;
model->memory_size = interpreter.arena_used_bytes() + 1024;
// Free the temporary arena.
fb_alloc_free_till_mark();
// Allocate the persistent state.
ml_backend_state_t *state = m_new0(ml_backend_state_t, 1);
state->model = GetModel(model->data);
state->arena = m_new(char, model->memory_size + TF_ARENA_ALIGNMENT);
state->resolver = new(m_new0(MicroOpsResolver, 1)) MicroOpsResolver();
ml_backend_init_ops_resolver(state->resolver);
uint8_t *aligned_arena = (uint8_t *) (((uintptr_t) state->arena + TF_ARENA_ALIGNMENT) & ~(TF_ARENA_ALIGNMENT));
state->interpreter = new(m_new0(MicroInterpreter, 1)) MicroInterpreter(state->model,
*state->resolver,
aligned_arena,
model->memory_size);
if (state->interpreter->AllocateTensors() != kTfLiteOk) {
mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Failed to allocate tensors"));
}
model->state = state;
return 0;
}
int ml_backend_run_inference(py_ml_model_obj_t *model,
ml_backend_input_callback_t input_callback,
void *input_arg,
ml_backend_output_callback_t output_callback,
void *output_arg) {
RegisterDebugLogCallback(ml_backend_log_handler);
ml_backend_state_t *state = (ml_backend_state_t *) model->state;
input_callback(model, input_arg);
if (state->interpreter->Invoke() != kTfLiteOk) {
mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Invoke failed"));
}
output_callback(model, output_arg);
return 0;
}
void *ml_backend_get_input(py_ml_model_obj_t *model, size_t index) {
ml_backend_state_t *state = (ml_backend_state_t *) model->state;
if (index < state->interpreter->inputs_size()) {
return state->interpreter->input(index)->data.data;
}
mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Invalid input tensor index"));
}
void *ml_backend_get_output(py_ml_model_obj_t *model, size_t index) {
ml_backend_state_t *state = (ml_backend_state_t *) model->state;
if (index < state->interpreter->outputs_size()) {
return state->interpreter->output(index)->data.data;
}
mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Invalid output tensor index"));
}
int ml_backend_generate_micro_features(const int16_t *input,
int input_size,
int output_size,
int8_t *output,
size_t *num_samples_read) {
return 0;
}
} // extern "C"
#endif //IMLIB_ENABLE_TFLM

View File

@ -1,14 +1,31 @@
OMV_MOD_DIR := $(USERMOD_DIR)
OMV_PORT_MOD_DIR := $(OMV_MOD_DIR)/../ports/$(PORT)/modules
# Add OpenMV common modules. # Add OpenMV common modules.
OMV_MOD_DIR := $(USERMOD_DIR)
SRC_USERMOD += $(wildcard $(OMV_MOD_DIR)/*.c) SRC_USERMOD += $(wildcard $(OMV_MOD_DIR)/*.c)
SRC_USERMOD_CXX += $(wildcard $(OMV_MOD_DIR)/*.cpp)
# Add OpenMV port-specific modules. # Add OpenMV port-specific modules.
OMV_PORT_MOD_DIR := $(OMV_MOD_DIR)/../ports/$(PORT)/modules
SRC_USERMOD += $(wildcard $(OMV_PORT_MOD_DIR)/*.c) SRC_USERMOD += $(wildcard $(OMV_PORT_MOD_DIR)/*.c)
SRC_USERMOD_CXX += $(wildcard $(OMV_PORT_MOD_DIR)/*.cpp)
# Extra module flags. # Extra module flags.
CFLAGS_USERMOD += -I$(OMV_MOD_DIR) -I$(OMV_PORT_MOD_DIR) -Wno-float-conversion CFLAGS_USERMOD += \
-I$(OMV_MOD_DIR) \
-I$(OMV_PORT_MOD_DIR) \
-Wno-float-conversion
CXXFLAGS_USERMOD += \
$(CFLAGS_USERMOD) \
-std=c++11 \
-fno-rtti \
-fno-exceptions \
-fno-use-cxa-atexit \
-nodefaultlibs \
-fno-unwind-tables \
-fpermissive \
-fno-threadsafe-statics \
-fmessage-length=0 \
$(filter-out -std=gnu99,$(CFLAGS))
# Add CubeAI module if enabled. # Add CubeAI module if enabled.
ifeq ($(MICROPY_PY_CUBEAI), 1) ifeq ($(MICROPY_PY_CUBEAI), 1)

510
src/omv/modules/py_ml.c Normal file
View File

@ -0,0 +1,510 @@
/*
* This file is part of the OpenMV project.
*
* Copyright (c) 2013-2024 Ibrahim Abdelkader <iabdalkader@openmv.io>
* Copyright (c) 2013-2024 Kwabena W. Agyeman <kwagyeman@openmv.io>
*
* This work is licensed under the MIT license, see the file LICENSE for details.
*
* Python Machine Learning Module.
*/
#include <stdio.h>
#include "py/runtime.h"
#include "py/obj.h"
#include "py/objlist.h"
#include "py/objtuple.h"
#include "py/binary.h"
#include "py_helper.h"
#include "imlib_config.h"
#ifdef IMLIB_ENABLE_TFLM
#include "py_image.h"
#include "file_utils.h"
#include "py_ml.h"
#include "tflm_builtin_models.h"
#include "ulab/code/ndarray.h"
#define PY_ML_GRAYSCALE_RANGE ((COLOR_GRAYSCALE_MAX) -(COLOR_GRAYSCALE_MIN))
#define PY_ML_GRAYSCALE_MID (((PY_ML_GRAYSCALE_RANGE) +1) / 2)
STATIC const char *py_ml_map_dtype(py_ml_dtype_t dtype) {
if (dtype == PY_ML_DTYPE_UINT8) {
return "uint8";
} else if (dtype == PY_ML_DTYPE_INT8) {
return "int8";
} else if (dtype == PY_ML_DTYPE_INT16) {
return "int16";
} else {
return "float";
}
}
// TF Input/Output callback functions.
typedef mp_obj_t py_ml_output_data_t;
typedef struct _py_ml_input_callback_data {
void *data;
rectangle_t roi;
py_ml_scale_t scale;
float mean[3];
float stdev[3];
} py_ml_input_data_t;
static size_t py_ml_tuple_sum(mp_obj_tuple_t *o) {
if (o->len < 1) {
mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Unexpected tensor shape"));
}
size_t size = mp_obj_get_int(o->items[0]);
for (size_t i = 1; i < o->len; i++) {
size *= mp_obj_get_int(o->items[i]);
}
return size;
}
static void py_ml_tuple_hwc(mp_obj_tuple_t *o, size_t *h, size_t *w, size_t *c) {
if (o->len != 1 || ((mp_obj_tuple_t *) MP_OBJ_TO_PTR(o->items[0]))->len != 4) {
mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Unexpected tensor shape"));
}
o = MP_OBJ_TO_PTR(o->items[0]);
*h = mp_obj_get_int(o->items[1]);
*w = mp_obj_get_int(o->items[2]);
*c = mp_obj_get_int(o->items[3]);
}
STATIC void py_ml_input_callback(py_ml_model_obj_t *model, void *arg) {
// TODO we assume that there's a single input.
void *model_input = ml_backend_get_input(model, 0);
py_ml_input_data_t *input_data = (py_ml_input_data_t *) arg;
// TODO we assume that the input shape is (1, h, w, c)
size_t input_height = 0, input_width = 0, input_channels = 0;
py_ml_tuple_hwc(model->input_shape, &input_height, &input_width, &input_channels);
int shift = (model->input_dtype == PY_ML_DTYPE_INT8) ? PY_ML_GRAYSCALE_MID : 0;
float fscale = 1.0f, fadd = 0.0f;
switch (input_data->scale) {
case PY_ML_SCALE_0_1: // convert 0->255 to 0->1
fscale = 1.0f / 255.0f;
break;
case PY_ML_SCALE_S1_1: // convert 0->255 to -1->1
fscale = 2.0f / 255.0f;
fadd = -1.0f;
break;
case PY_ML_SCALE_S128_127: // convert 0->255 to -128->127
fadd = -128.0f;
break;
case PY_ML_SCALE_NONE: // convert 0->255 to 0->255
default:
break;
}
float fscale_r = fscale, fadd_r = fadd;
float fscale_g = fscale, fadd_g = fadd;
float fscale_b = fscale, fadd_b = fadd;
// To normalize the input image we need to subtract the mean and divide by the standard deviation.
// We can do this by applying the normalization to fscale and fadd outside the loop.
// Red
fadd_r = (fadd_r - input_data->mean[0]) / input_data->stdev[0];
fscale_r /= input_data->stdev[0];
// Green
fadd_g = (fadd_g - input_data->mean[1]) / input_data->stdev[1];
fscale_g /= input_data->stdev[1];
// Blue
fadd_b = (fadd_b - input_data->mean[2]) / input_data->stdev[2];
fscale_b /= input_data->stdev[2];
// Grayscale -> Y = 0.299R + 0.587G + 0.114B
float mean = (input_data->mean[0] * 0.299f) + (input_data->mean[1] * 0.587f) + (input_data->mean[2] * 0.114f);
float std = (input_data->stdev[0] * 0.299f) + (input_data->stdev[1] * 0.587f) + (input_data->stdev[2] * 0.114f);
fadd = (fadd - mean) / std;
fscale /= std;
image_t dst_img;
dst_img.w = input_width;
dst_img.h = input_height;
dst_img.data = (uint8_t *) model_input;
if (input_channels == 1) {
dst_img.pixfmt = PIXFORMAT_GRAYSCALE;
} else if (input_channels == 3) {
dst_img.pixfmt = PIXFORMAT_RGB565;
} else {
mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Expected model input channels to be 1 or 3!"));
}
imlib_draw_image(&dst_img, input_data->data, 0, 0, 1.0f, 1.0f, &input_data->roi,
-1, 256, NULL, NULL, IMAGE_HINT_BILINEAR | IMAGE_HINT_CENTER |
IMAGE_HINT_SCALE_ASPECT_EXPAND | IMAGE_HINT_BLACK_BACKGROUND, NULL, NULL, NULL);
int size = (input_width * input_height) - 1; // must be int per countdown loop
if (input_channels == 1) {
// GRAYSCALE
if (model->input_dtype == PY_ML_DTYPE_FLOAT) {
// convert u8 -> f32
uint8_t *model_input_u8 = (uint8_t *) model_input;
float *model_input_f32 = (float *) model_input;
for (; size >= 0; size -= 1) {
model_input_f32[size] = (model_input_u8[size] * fscale) + fadd;
}
} else {
if (shift) {
// convert u8 -> s8
uint8_t *model_input_8 = (uint8_t *) model_input;
#if (__ARM_ARCH > 6)
for (; size >= 3; size -= 4) {
*((uint32_t *) (model_input_8 + size - 3)) ^= 0x80808080;
}
#endif
for (; size >= 0; size -= 1) {
model_input_8[size] ^= PY_ML_GRAYSCALE_MID;
}
}
}
} else if (input_channels == 3) {
// RGB888
int rgb_size = size * 3; // must be int per countdown loop
if (model->input_dtype == PY_ML_DTYPE_FLOAT) {
uint16_t *model_input_u16 = (uint16_t *) model_input;
float *model_input_f32 = (float *) model_input;
for (; size >= 0; size -= 1, rgb_size -= 3) {
int pixel = model_input_u16[size];
model_input_f32[rgb_size] = (COLOR_RGB565_TO_R8(pixel) * fscale_r) + fadd_r;
model_input_f32[rgb_size + 1] = (COLOR_RGB565_TO_G8(pixel) * fscale_g) + fadd_g;
model_input_f32[rgb_size + 2] = (COLOR_RGB565_TO_B8(pixel) * fscale_b) + fadd_b;
}
} else {
uint16_t *model_input_u16 = (uint16_t *) model_input;
uint8_t *model_input_8 = (uint8_t *) model_input;
for (; size >= 0; size -= 1, rgb_size -= 3) {
int pixel = model_input_u16[size];
model_input_8[rgb_size] = COLOR_RGB565_TO_R8(pixel) ^ shift;
model_input_8[rgb_size + 1] = COLOR_RGB565_TO_G8(pixel) ^ shift;
model_input_8[rgb_size + 2] = COLOR_RGB565_TO_B8(pixel) ^ shift;
}
}
}
}
STATIC void py_ml_input_callback_regression(py_ml_model_obj_t *model, void *arg) {
// TODO we assume that there's a single input.
void *model_input = ml_backend_get_input(model, 0);
py_ml_input_data_t *input_data = (py_ml_input_data_t *) arg;
mp_obj_tuple_t *input_shape = MP_OBJ_TO_PTR(model->input_shape->items[0]);
ndarray_obj_t *input_array = MP_OBJ_TO_PTR(*((mp_obj_t *) input_data->data));
if (input_array->ndim != input_shape->len) {
mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Input shape does not match the model input shape"));
}
for (size_t i = 0; i < input_array->ndim; i++) {
if (input_array->shape[i] != mp_obj_get_int(input_shape->items[i])) {
mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Input shape does not match the model input shape"));
}
}
if (model->input_dtype == PY_ML_DTYPE_FLOAT) {
float *model_input_float = (float *) model_input;
for (size_t i = 0; i < input_array->len; i++) {
float value = ndarray_get_float_index(input_array->array, input_array->dtype, i);
model_input_float[i] = value;
}
} else if (model->input_dtype == PY_ML_DTYPE_INT8) {
int8_t *model_input_8 = (int8_t *) model_input;
for (size_t i = 0; i < input_array->len; i++) {
float value = ndarray_get_float_index(input_array->array, input_array->dtype, i);
model_input_8[i] = (int8_t) ((value / model->input_scale) + model->input_zero_point);
}
} else if (model->input_dtype == PY_ML_DTYPE_UINT8) {
uint8_t *model_input_8 = (uint8_t *) model_input;
for (size_t i = 0; i < input_array->len; i++) {
float value = ndarray_get_float_index(input_array->array, input_array->dtype, i);
model_input_8[i] = (uint8_t) ((value / model->input_scale) + model->input_zero_point);
}
} else {
int16_t *model_input_16 = (int16_t *) model_input;
for (size_t i = 0; i < input_array->len; i++) {
float value = ndarray_get_float_index(input_array->array, input_array->dtype, i);
model_input_16[i] = (int16_t) ((value / model->input_scale) + model->input_zero_point);
}
}
}
STATIC void py_ml_output_callback(py_ml_model_obj_t *model, void *arg) {
mp_obj_list_t *output_list = MP_OBJ_TO_PTR(mp_obj_new_list(model->outputs_size, NULL));
for (size_t i = 0; i < model->outputs_size; i++) {
void *model_output = ml_backend_get_output(model, i);
size_t size = py_ml_tuple_sum(MP_OBJ_TO_PTR(model->output_shape->items[i]));
mp_obj_tuple_t *output = MP_OBJ_TO_PTR(mp_obj_new_tuple(size, NULL));
if (model->output_dtype == PY_ML_DTYPE_FLOAT) {
for (size_t j = 0; j < size; j++) {
output->items[j] = mp_obj_new_float(((float *) model_output)[j]);
}
} else if (model->output_dtype == PY_ML_DTYPE_INT8) {
for (size_t j = 0; j < size; j++) {
float v = (((int8_t *) model_output)[j] - model->output_zero_point);
output->items[j] = mp_obj_new_float(v * model->output_scale);
}
} else if (model->output_dtype == PY_ML_DTYPE_UINT8) {
for (size_t j = 0; j < size; j++) {
float v = (((uint8_t *) model_output)[j] - model->output_zero_point);
output->items[j] = mp_obj_new_float(v * model->output_scale);
}
} else {
for (size_t j = 0; j < size; j++) {
float v = (((int8_t *) model_output)[j] - model->output_zero_point);
output->items[j] = mp_obj_new_float(v * model->output_scale);
}
}
output_list->items[i] = MP_OBJ_FROM_PTR(output);
}
*((py_ml_output_data_t *) arg) = MP_OBJ_FROM_PTR(output_list);
}
// TF Model Object.
static const mp_obj_type_t py_ml_model_type;
STATIC void py_ml_model_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
py_ml_model_obj_t *self = MP_OBJ_TO_PTR(self_in);
mp_printf(print,
"{size: %d, ram: %d, inputs_size: %d, input_dtype: %s, input_scale: %f, input_zero_point: %d, "
"outputs_size: %d output_dtype: %s, output_scale: %f, output_zero_point: %d}",
self->size, self->memory_size, self->inputs_size, py_ml_map_dtype(self->input_dtype),
(double) self->input_scale, self->input_zero_point, self->outputs_size, py_ml_map_dtype(self->output_dtype),
(double) self->output_scale, self->output_zero_point);
}
STATIC mp_obj_t py_ml_model_predict(uint n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
enum { ARG_roi, ARG_callback, ARG_scale, ARG_mean, ARG_stdev };
static const mp_arg_t allowed_args[] = {
{ MP_QSTR_roi, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE} },
{ MP_QSTR_callback, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE} },
{ MP_QSTR_scale, MP_ARG_INT | MP_ARG_KW_ONLY, {.u_int = PY_ML_SCALE_0_1} },
{ MP_QSTR_mean, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE} },
{ MP_QSTR_stdev, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE} },
};
// Parse args.
mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
mp_arg_parse_all(n_args - 2, pos_args + 2, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
py_ml_model_obj_t *model = MP_OBJ_TO_PTR(pos_args[0]);
py_ml_input_data_t input_data = {
.scale = args[ARG_scale].u_int,
.mean = {0.0f, 0.0f, 0.0f},
.stdev = {1.0f, 1.0f, 1.0f}
};
ml_backend_input_callback_t input_callback = py_ml_input_callback;
py_ml_output_data_t output_data;
ml_backend_output_callback_t output_callback = py_ml_output_callback;
if (MP_OBJ_IS_TYPE(pos_args[1], &ulab_ndarray_type)) {
input_data.data = (void *) &pos_args[1];
input_callback = py_ml_input_callback_regression;
} else if (MP_OBJ_IS_TYPE(pos_args[1], &py_image_type)) {
input_data.data = py_helper_arg_to_image(pos_args[1], ARG_IMAGE_ANY);
input_data.roi = py_helper_arg_to_roi(args[ARG_roi].u_obj, input_data.data);
py_helper_arg_to_float_array(args[ARG_mean].u_obj, input_data.mean, 3);
py_helper_arg_to_float_array(args[ARG_stdev].u_obj, input_data.stdev, 3);
} else {
mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Unsupported input type"));
}
ml_backend_run_inference(model, input_callback, &input_data, output_callback, &output_data);
if (args[ARG_callback].u_obj != mp_const_none) {
mp_obj_t rect = mp_obj_new_tuple(4, (mp_obj_t []) { mp_obj_new_int(input_data.roi.x),
mp_obj_new_int(input_data.roi.y),
mp_obj_new_int(input_data.roi.w),
mp_obj_new_int(input_data.roi.h) });
mp_obj_t fun_args[3] = { MP_OBJ_FROM_PTR(model), output_data, rect };
if (!MP_OBJ_IS_TYPE(pos_args[1], &py_image_type)) {
output_data = mp_call_function_n_kw(args[ARG_callback].u_obj, 2, 0, fun_args);
} else {
output_data = mp_call_function_n_kw(args[ARG_callback].u_obj, 3, 0, fun_args);
}
}
return output_data;
}
STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_ml_model_predict_obj, 2, py_ml_model_predict);
STATIC void py_ml_model_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
py_ml_model_obj_t *self = MP_OBJ_TO_PTR(self_in);
const char *str;
if (dest[0] == MP_OBJ_NULL) {
// Load attribute.
switch (attr) {
case MP_QSTR_len:
dest[0] = mp_obj_new_int(self->size);
break;
case MP_QSTR_ram:
dest[0] = mp_obj_new_int(self->memory_size);
break;
case MP_QSTR_input_shape:
dest[0] = MP_OBJ_FROM_PTR(self->input_shape);
break;
case MP_QSTR_input_dtype:
str = py_ml_map_dtype(self->input_dtype);
dest[0] = mp_obj_new_str(str, strlen(str));
break;
case MP_QSTR_input_scale:
dest[0] = mp_obj_new_float(self->input_scale);
break;
case MP_QSTR_input_zero_point:
dest[0] = mp_obj_new_int(self->input_zero_point);
break;
case MP_QSTR_output_shape:
dest[0] = MP_OBJ_FROM_PTR(self->output_shape);
break;
case MP_QSTR_output_dtype:
str = py_ml_map_dtype(self->output_dtype);
dest[0] = mp_obj_new_str(str, strlen(str));
break;
case MP_QSTR_output_scale:
dest[0] = mp_obj_new_float(self->output_scale);
break;
case MP_QSTR_output_zero_point:
dest[0] = mp_obj_new_int(self->output_zero_point);
break;
default:
// Continue lookup in locals_dict.
dest[1] = MP_OBJ_SENTINEL;
break;
}
}
}
mp_obj_t py_ml_model_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *all_args) {
enum { ARG_path, ARG_load_to_fb };
static const mp_arg_t allowed_args[] = {
{ MP_QSTR_path, MP_ARG_REQUIRED | MP_ARG_OBJ },
{ MP_QSTR_load_to_fb, MP_ARG_INT | MP_ARG_KW_ONLY, {.u_bool = false } },
};
// Parse args.
mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
mp_arg_parse_all_kw_array(n_args, n_kw, all_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
fb_alloc_mark();
const char *path = mp_obj_str_get_str(args[ARG_path].u_obj);
py_ml_model_obj_t *model = m_new_obj_with_finaliser(py_ml_model_obj_t);
model->base.type = &py_ml_model_type;
model->data = NULL;
model->fb_alloc = args[ARG_load_to_fb].u_int;
mp_obj_list_t *labels = NULL;
for (const tflm_builtin_model_t *_model = &tflm_builtin_models[0]; _model->name != NULL; _model++) {
if (!strcmp(path, _model->name)) {
// Load model data.
model->size = _model->size;
model->data = (unsigned char *) _model->data;
// Load model labels
labels = MP_OBJ_TO_PTR(mp_obj_new_list(_model->n_labels, NULL));
for (int l = 0; l < _model->n_labels; l++) {
const char *label = _model->labels[l];
labels->items[l] = mp_obj_new_str(label, strlen(label));
}
break;
}
}
if (model->data == NULL) {
#if defined(IMLIB_ENABLE_IMAGE_FILE_IO)
FIL fp;
file_open(&fp, path, false, FA_READ | FA_OPEN_EXISTING);
model->size = f_size(&fp);
model->data = model->fb_alloc ? fb_alloc(model->size, FB_ALLOC_PREFER_SIZE) : xalloc(model->size);
file_read(&fp, model->data, model->size);
file_close(&fp);
#else
mp_raise_msg(&mp_type_OSError, MP_ERROR_TEXT("Image I/O is not supported"));
#endif
}
if (model->fb_alloc) {
// The model's data will Not be free'd on exceptions.
fb_alloc_mark_permanent();
} else {
fb_alloc_free_till_mark();
}
ml_backend_init_model(model);
if (model->input_scale == 0.0f) {
model->input_scale = 1.0;
}
if (model->output_scale == 0.0f) {
model->output_scale = 1.0;
}
if (labels == NULL) {
return MP_OBJ_FROM_PTR(model);
} else {
return mp_obj_new_tuple(2, (mp_obj_t []) {MP_OBJ_FROM_PTR(labels), MP_OBJ_FROM_PTR(model)});
}
}
STATIC mp_obj_t py_ml_model_deinit(mp_obj_t self_in) {
py_ml_model_obj_t *model = MP_OBJ_TO_PTR(self_in);
if (model->fb_alloc) {
fb_alloc_free_till_mark_past_mark_permanent();
}
return mp_const_none;
}
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_ml_model_deinit_obj, py_ml_model_deinit);
STATIC const mp_rom_map_elem_t py_ml_model_locals_dict_table[] = {
{ MP_ROM_QSTR(MP_QSTR___del__), MP_ROM_PTR(&py_ml_model_deinit_obj) },
{ MP_ROM_QSTR(MP_QSTR_predict), MP_ROM_PTR(&py_ml_model_predict_obj) },
};
STATIC MP_DEFINE_CONST_DICT(py_ml_model_locals_dict, py_ml_model_locals_dict_table);
STATIC MP_DEFINE_CONST_OBJ_TYPE(
py_ml_model_type,
MP_QSTR_ml_model,
MP_TYPE_FLAG_NONE,
attr, py_ml_model_attr,
print, py_ml_model_print,
make_new, py_ml_model_make_new,
locals_dict, &py_ml_model_locals_dict
);
extern const mp_obj_type_t py_ml_nms_type;
STATIC const mp_rom_map_elem_t py_ml_globals_dict_table[] = {
{ MP_ROM_QSTR(MP_QSTR___name__), MP_OBJ_NEW_QSTR(MP_QSTR_ml) },
{ MP_ROM_QSTR(MP_QSTR_Model), MP_ROM_PTR(&py_ml_model_type) },
{ MP_ROM_QSTR(MP_QSTR_NMS), MP_ROM_PTR(&py_ml_nms_type) },
{ MP_ROM_QSTR(MP_QSTR_SCALE_NONE), MP_ROM_INT(PY_ML_SCALE_NONE) },
{ MP_ROM_QSTR(MP_QSTR_SCALE_0_1), MP_ROM_INT(PY_ML_SCALE_0_1) },
{ MP_ROM_QSTR(MP_QSTR_SCALE_S1_1), MP_ROM_INT(PY_ML_SCALE_S1_1) },
{ MP_ROM_QSTR(MP_QSTR_SCALE_S128_127), MP_ROM_INT(PY_ML_SCALE_S128_127) },
};
STATIC MP_DEFINE_CONST_DICT(py_ml_globals_dict, py_ml_globals_dict_table);
const mp_obj_module_t ml_module = {
.base = { &mp_type_module },
.globals = (mp_obj_t) &py_ml_globals_dict
};
// Alias for backwards compatibility
MP_REGISTER_EXTENSIBLE_MODULE(MP_QSTR_tf, ml_module);
MP_REGISTER_EXTENSIBLE_MODULE(MP_QSTR_ml, ml_module);
#endif // IMLIB_ENABLE_TFLM

68
src/omv/modules/py_ml.h Normal file
View File

@ -0,0 +1,68 @@
/*
* This file is part of the OpenMV project.
*
* Copyright (c) 2013-2021 Ibrahim Abdelkader <iabdalkader@openmv.io>
* Copyright (c) 2013-2021 Kwabena W. Agyeman <kwagyeman@openmv.io>
*
* This work is licensed under the MIT license, see the file LICENSE for details.
*
* Python Machine Learning Module.
*/
#ifndef __PY_ML_H__
#define __PY_ML_H__
typedef enum {
PY_ML_SCALE_NONE,
PY_ML_SCALE_0_1,
PY_ML_SCALE_S1_1,
PY_ML_SCALE_S128_127
} py_ml_scale_t;
typedef enum py_ml_dtype {
PY_ML_DTYPE_INT8,
PY_ML_DTYPE_UINT8,
PY_ML_DTYPE_INT16,
PY_ML_DTYPE_FLOAT
} py_ml_dtype_t;
// TF Model Object.
typedef struct py_ml_model_obj {
mp_obj_base_t base;
unsigned int size;
unsigned char *data;
size_t memory_size;
bool fb_alloc;
size_t inputs_size;
mp_obj_tuple_t *input_shape;
float input_scale;
int input_zero_point;
py_ml_dtype_t input_dtype;
size_t outputs_size;
mp_obj_tuple_t *output_shape;
float output_scale;
int output_zero_point;
py_ml_dtype_t output_dtype;
void *state; // Private context for the backend.
} py_ml_model_obj_t;
// Initialize a model.
int ml_backend_init_model(py_ml_model_obj_t *model);
// Callback to populate the model input data.
typedef void (*ml_backend_input_callback_t) (py_ml_model_obj_t *model, void *arg);
// Callback to get the model output data.
typedef void (*ml_backend_output_callback_t) (py_ml_model_obj_t *model, void *arg);
// Return an input tensor by index.
void *ml_backend_get_input(py_ml_model_obj_t *model, size_t index);
// Return an output tensor by index.
void *ml_backend_get_output(py_ml_model_obj_t *model, size_t index);
// Run inference.
int ml_backend_run_inference(py_ml_model_obj_t *model,
ml_backend_input_callback_t input_callback, // Callback to populate the model input data.
void *input_data, // User data structure passed to input callback.
ml_backend_output_callback_t output_callback, // Callback to use the model output data.
void *output_data); // User data structure passed to output callback.
#endif // __PY_ML_H__

View File

@ -10,25 +10,25 @@
*/ */
#include "imlib_config.h" #include "imlib_config.h"
#ifdef IMLIB_ENABLE_TF #ifdef IMLIB_ENABLE_TFLM
#include "py/runtime.h" #include "py/runtime.h"
#include "py_helper.h" #include "py_helper.h"
// TF NMS Object. // TF NMS Object.
typedef struct py_tf_nms_obj { typedef struct py_ml_nms_obj {
mp_obj_base_t base; mp_obj_base_t base;
int window_w; int window_w;
int window_h; int window_h;
rectangle_t roi; rectangle_t roi;
list_t bounding_boxes; list_t bounding_boxes;
} py_tf_nms_obj_t; } py_ml_nms_obj_t;
const mp_obj_type_t py_tf_nms_type; const mp_obj_type_t py_ml_nms_type;
// The use of mp_arg_parse_all() is deliberately avoided here to ensure this method remains fast. // The use of mp_arg_parse_all() is deliberately avoided here to ensure this method remains fast.
STATIC mp_obj_t py_tf_nms_add_bounding_box(uint n_args, const mp_obj_t *pos_args) { STATIC mp_obj_t py_ml_nms_add_bounding_box(uint n_args, const mp_obj_t *pos_args) {
enum { ARG_self, ARG_xmin, ARG_ymin, ARG_xmax, ARG_ymax, ARG_score, ARG_label_index }; enum { ARG_self, ARG_xmin, ARG_ymin, ARG_xmax, ARG_ymax, ARG_score, ARG_label_index };
py_tf_nms_obj_t *self_in = MP_OBJ_TO_PTR(pos_args[ARG_self]); py_ml_nms_obj_t *self_in = MP_OBJ_TO_PTR(pos_args[ARG_self]);
bounding_box_lnk_data_t lnk_data; bounding_box_lnk_data_t lnk_data;
lnk_data.score = mp_obj_get_float(pos_args[ARG_score]); lnk_data.score = mp_obj_get_float(pos_args[ARG_score]);
@ -52,9 +52,9 @@ STATIC mp_obj_t py_tf_nms_add_bounding_box(uint n_args, const mp_obj_t *pos_args
return mp_const_none; return mp_const_none;
} }
STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(py_tf_nms_add_bounding_box_obj, 7, 7, py_tf_nms_add_bounding_box); STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(py_ml_nms_add_bounding_box_obj, 7, 7, py_ml_nms_add_bounding_box);
STATIC mp_obj_t py_tf_nms_get_bounding_boxes(uint n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { STATIC mp_obj_t py_ml_nms_get_bounding_boxes(uint n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
enum { ARG_threshold, ARG_sigma }; enum { ARG_threshold, ARG_sigma };
static const mp_arg_t allowed_args[] = { static const mp_arg_t allowed_args[] = {
{ MP_QSTR_threshold, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE } }, { MP_QSTR_threshold, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE } },
@ -64,7 +64,7 @@ STATIC mp_obj_t py_tf_nms_get_bounding_boxes(uint n_args, const mp_obj_t *pos_ar
mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
mp_arg_parse_all(n_args - 1, pos_args + 1, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); mp_arg_parse_all(n_args - 1, pos_args + 1, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
py_tf_nms_obj_t *self_in = MP_OBJ_TO_PTR(pos_args[0]); py_ml_nms_obj_t *self_in = MP_OBJ_TO_PTR(pos_args[0]);
float threshold = py_helper_arg_to_float(args[ARG_threshold].u_obj, 0.1f); float threshold = py_helper_arg_to_float(args[ARG_threshold].u_obj, 0.1f);
float sigma = py_helper_arg_to_float(args[ARG_sigma].u_obj, 0.1f); float sigma = py_helper_arg_to_float(args[ARG_sigma].u_obj, 0.1f);
int max_label = rectangle_nms_get_bounding_boxes(&self_in->bounding_boxes, threshold, sigma); int max_label = rectangle_nms_get_bounding_boxes(&self_in->bounding_boxes, threshold, sigma);
@ -88,9 +88,9 @@ STATIC mp_obj_t py_tf_nms_get_bounding_boxes(uint n_args, const mp_obj_t *pos_ar
return list; return list;
} }
STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_tf_nms_get_bounding_boxes_obj, 1, py_tf_nms_get_bounding_boxes); STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_ml_nms_get_bounding_boxes_obj, 1, py_ml_nms_get_bounding_boxes);
mp_obj_t py_tf_nms_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *all_args) { mp_obj_t py_ml_nms_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *all_args) {
enum { ARG_window_w, ARG_window_h, ARG_roi }; enum { ARG_window_w, ARG_window_h, ARG_roi };
static const mp_arg_t allowed_args[] = { static const mp_arg_t allowed_args[] = {
{ MP_QSTR_window_w, MP_ARG_INT | MP_ARG_REQUIRED, {.u_int = 0 } }, { MP_QSTR_window_w, MP_ARG_INT | MP_ARG_REQUIRED, {.u_int = 0 } },
@ -116,8 +116,8 @@ mp_obj_t py_tf_nms_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_k
mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Invalid ROI dimensions!")); mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Invalid ROI dimensions!"));
} }
py_tf_nms_obj_t *model = m_new_obj(py_tf_nms_obj_t); py_ml_nms_obj_t *model = m_new_obj(py_ml_nms_obj_t);
model->base.type = &py_tf_nms_type; model->base.type = &py_ml_nms_type;
model->window_w = args[ARG_window_w].u_int; model->window_w = args[ARG_window_w].u_int;
model->window_h = args[ARG_window_h].u_int; model->window_h = args[ARG_window_h].u_int;
model->roi = roi; model->roi = roi;
@ -125,19 +125,18 @@ mp_obj_t py_tf_nms_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_k
return MP_OBJ_FROM_PTR(model); return MP_OBJ_FROM_PTR(model);
} }
STATIC const mp_rom_map_elem_t py_tf_nms_locals_table[] = { STATIC const mp_rom_map_elem_t py_ml_nms_locals_table[] = {
{ MP_ROM_QSTR(MP_QSTR_add_bounding_box), MP_ROM_PTR(&py_tf_nms_add_bounding_box_obj) }, { MP_ROM_QSTR(MP_QSTR_add_bounding_box), MP_ROM_PTR(&py_ml_nms_add_bounding_box_obj) },
{ MP_ROM_QSTR(MP_QSTR_get_bounding_boxes), MP_ROM_PTR(&py_tf_nms_get_bounding_boxes_obj) }, { MP_ROM_QSTR(MP_QSTR_get_bounding_boxes), MP_ROM_PTR(&py_ml_nms_get_bounding_boxes_obj) },
}; };
STATIC MP_DEFINE_CONST_DICT(py_tf_nms_locals_dict, py_tf_nms_locals_table); STATIC MP_DEFINE_CONST_DICT(py_ml_nms_locals_dict, py_ml_nms_locals_table);
MP_DEFINE_CONST_OBJ_TYPE( MP_DEFINE_CONST_OBJ_TYPE(
py_tf_nms_type, py_ml_nms_type,
MP_QSTR_tf_nms, MP_QSTR_tf_nms,
MP_TYPE_FLAG_NONE, MP_TYPE_FLAG_NONE,
make_new, py_tf_nms_make_new, make_new, py_ml_nms_make_new,
locals_dict, &py_tf_nms_locals_dict locals_dict, &py_ml_nms_locals_dict
); );
#endif // IMLIB_ENABLE_TFLM
#endif // IMLIB_ENABLE_TF

View File

@ -1,748 +0,0 @@
/*
* This file is part of the OpenMV project.
*
* Copyright (c) 2013-2024 Ibrahim Abdelkader <iabdalkader@openmv.io>
* Copyright (c) 2013-2024 Kwabena W. Agyeman <kwagyeman@openmv.io>
*
* This work is licensed under the MIT license, see the file LICENSE for details.
*
* Python Tensorflow library wrapper.
*/
#include <stdio.h>
#include "py/runtime.h"
#include "py/obj.h"
#include "py/objlist.h"
#include "py/objtuple.h"
#include "py/binary.h"
#include "py_helper.h"
#include "imlib_config.h"
#ifdef IMLIB_ENABLE_TF
#include "py_image.h"
#include "file_utils.h"
#include "py_tf.h"
#include "libtf_builtin_models.h"
#define PY_TF_LOG_BUFFER_SIZE (512)
#define PY_TF_GRAYSCALE_RANGE ((COLOR_GRAYSCALE_MAX) -(COLOR_GRAYSCALE_MIN))
#define PY_TF_GRAYSCALE_MID (((PY_TF_GRAYSCALE_RANGE) +1) / 2)
typedef enum {
PY_TF_SCALE_NONE,
PY_TF_SCALE_0_1,
PY_TF_SCALE_S1_1,
PY_TF_SCALE_S128_127
} py_tf_scale_t;
char *py_tf_log_buffer = NULL;
static size_t py_tf_log_index = 0;
void py_tf_alloc_log_buffer() {
py_tf_log_index = 0;
py_tf_log_buffer = (char *) fb_alloc0(PY_TF_LOG_BUFFER_SIZE + 1, FB_ALLOC_NO_HINT);
}
void libtf_log_handler(const char *s) {
for (size_t i = 0, j = strlen(s); i < j; i++) {
if (py_tf_log_index < PY_TF_LOG_BUFFER_SIZE) {
py_tf_log_buffer[py_tf_log_index++] = s[i];
}
}
}
STATIC const char *py_tf_map_datatype(libtf_datatype_t datatype) {
if (datatype == LIBTF_DATATYPE_UINT8) {
return "uint8";
} else if (datatype == LIBTF_DATATYPE_INT8) {
return "int8";
} else {
return "float";
}
}
// TF Model Output Object.
typedef struct py_tf_model_output_obj {
mp_obj_base_t base;
void *model_output;
libtf_parameters_t *params;
size_t output_size;
} py_tf_model_output_obj_t;
STATIC mp_obj_t py_tf_model_output_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) {
if (value == MP_OBJ_SENTINEL) {
// load
py_tf_model_output_obj_t *self = MP_OBJ_TO_PTR(self_in);
void *model_output = self->model_output;
libtf_parameters_t *params = self->params;
if (MP_OBJ_IS_TYPE(index, &mp_type_slice)) {
mp_bound_slice_t slice;
if (!mp_seq_get_fast_slice_indexes(self->output_size, index, &slice)) {
mp_raise_msg(&mp_type_OSError, MP_ERROR_TEXT("only slices with step=1 (aka None) are supported"));
}
mp_obj_tuple_t *result = mp_obj_new_tuple(slice.stop - slice.start, NULL);
for (size_t i = 0; i < result->len; i++) {
size_t j = i + slice.start;
switch (params->output_datatype) {
case LIBTF_DATATYPE_FLOAT: {
result->items[i] = mp_obj_new_float(((float *) model_output)[j]);
break;
}
case LIBTF_DATATYPE_INT8: {
int8_t mo = ((int8_t *) model_output)[i];
result->items[i] = mp_obj_new_float((mo - params->output_zero_point) * params->output_scale);
break;
}
case LIBTF_DATATYPE_UINT8: {
uint8_t mo = ((uint8_t *) model_output)[i];
result->items[i] = mp_obj_new_float((mo - params->output_zero_point) * params->output_scale);
break;
}
}
}
return result;
}
size_t i = mp_get_index(self->base.type, self->output_size, index, false);
switch (params->output_datatype) {
case LIBTF_DATATYPE_FLOAT: {
return mp_obj_new_float(((float *) model_output)[i]);
}
case LIBTF_DATATYPE_INT8: {
int8_t mo = ((int8_t *) model_output)[i];
return mp_obj_new_float((mo - params->output_zero_point) * params->output_scale);
}
case LIBTF_DATATYPE_UINT8: {
uint8_t mo = ((uint8_t *) model_output)[i];
return mp_obj_new_float((mo - params->output_zero_point) * params->output_scale);
}
}
}
return MP_OBJ_NULL; // op not supported
}
STATIC mp_obj_t py_tf_model_output_get_image(uint n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
enum { ARG_channel, ARG_roi, ARG_scale };
static const mp_arg_t allowed_args[] = {
{ MP_QSTR_channel, MP_ARG_INT | MP_ARG_REQUIRED, {.u_int = 0} },
{ MP_QSTR_roi, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE} },
{ MP_QSTR_scale, MP_ARG_INT | MP_ARG_KW_ONLY, {.u_int = PY_TF_SCALE_0_1} },
};
// Parse args.
mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
mp_arg_parse_all(n_args - 1, pos_args + 1, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
py_tf_model_output_obj_t *self = MP_OBJ_TO_PTR(pos_args[0]);
image_t temp = {.w = self->params->output_width, .h = self->params->output_height};
rectangle_t roi = py_helper_arg_to_roi(args[ARG_roi].u_obj, &temp);
image_t img = {
.w = roi.w,
.h = roi.h,
.pixfmt = PIXFORMAT_GRAYSCALE,
.pixels = xalloc(roi.w * roi.h)
};
int channel = args[ARG_channel].u_int;
int shift = (self->params->output_datatype == LIBTF_DATATYPE_INT8) ? PY_TF_GRAYSCALE_MID : 0;
float fscale = 1.0f, fadd = 0.0f;
switch (args[ARG_scale].u_int) {
case PY_TF_SCALE_0_1: // convert 0->1 to 0->255
fscale = 255.0f;
break;
case PY_TF_SCALE_S1_1: // convert -1->1 to 0->255
fscale = 127.5f;
fadd = 127.5f;
break;
case PY_TF_SCALE_S128_127: // convert -128->127 to 0->255
fadd = 128.0f;
break;
case PY_TF_SCALE_NONE: // convert 0->255 to 0->255
default:
break;
}
for (int y = 0; y < roi.h; y++) {
int row_index = (y + roi.y) * self->params->output_width * self->params->output_channels;
uint8_t *row_ptr = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(&img, y);
for (int x = 0; x < roi.w; x++) {
int index = row_index + ((x + roi.x) * self->params->output_channels) + channel;
if (self->params->output_datatype == LIBTF_DATATYPE_FLOAT) {
float mo = (((float *) self->model_output)[index] * fscale) + fadd;
IMAGE_PUT_GRAYSCALE_PIXEL_FAST(row_ptr, x, fast_floorf(mo));
} else {
uint8_t mo = ((uint8_t *) self->model_output)[index] ^ shift;
IMAGE_PUT_GRAYSCALE_PIXEL_FAST(row_ptr, x, mo);
}
}
}
return py_image_from_struct(&img);
}
STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_tf_model_output_get_image_obj, 1, py_tf_model_output_get_image);
STATIC const mp_rom_map_elem_t py_tf_model_output_locals_dict_table[] = {
{ MP_ROM_QSTR(MP_QSTR_get_image), MP_ROM_PTR(&py_tf_model_output_get_image_obj) },
};
STATIC MP_DEFINE_CONST_DICT(py_tf_model_output_locals_dict, py_tf_model_output_locals_dict_table);
STATIC MP_DEFINE_CONST_OBJ_TYPE(
py_tf_model_output_type,
MP_QSTR_tf_model_output,
MP_TYPE_FLAG_NONE,
subscr, py_tf_model_output_subscr,
locals_dict, &py_tf_model_output_locals_dict
);
// TF Input/Output callback functions.
typedef struct py_tf_input_callback_data {
image_t *img;
rectangle_t *roi;
py_tf_scale_t scale;
float mean[3];
float stdev[3];
} py_tf_input_callback_data_t;
STATIC void py_tf_input_callback(void *callback_data,
void *model_input,
libtf_parameters_t *params) {
py_tf_input_callback_data_t *arg = (py_tf_input_callback_data_t *) callback_data;
int shift = (params->input_datatype == LIBTF_DATATYPE_INT8) ? PY_TF_GRAYSCALE_MID : 0;
float fscale = 1.0f, fadd = 0.0f;
switch (arg->scale) {
case PY_TF_SCALE_0_1: // convert 0->255 to 0->1
fscale = 1.0f / 255.0f;
break;
case PY_TF_SCALE_S1_1: // convert 0->255 to -1->1
fscale = 2.0f / 255.0f;
fadd = -1.0f;
break;
case PY_TF_SCALE_S128_127: // convert 0->255 to -128->127
fadd = -128.0f;
break;
case PY_TF_SCALE_NONE: // convert 0->255 to 0->255
default:
break;
}
float fscale_r = fscale, fadd_r = fadd;
float fscale_g = fscale, fadd_g = fadd;
float fscale_b = fscale, fadd_b = fadd;
// To normalize the input image we need to subtract the mean and divide by the standard deviation.
// We can do this by applying the normalization to fscale and fadd outside the loop.
// Red
fadd_r = (fadd_r - arg->mean[0]) / arg->stdev[0];
fscale_r /= arg->stdev[0];
// Green
fadd_g = (fadd_g - arg->mean[1]) / arg->stdev[1];
fscale_g /= arg->stdev[1];
// Blue
fadd_b = (fadd_b - arg->mean[2]) / arg->stdev[2];
fscale_b /= arg->stdev[2];
// Grayscale -> Y = 0.299R + 0.587G + 0.114B
float mean = (arg->mean[0] * 0.299f) + (arg->mean[1] * 0.587f) + (arg->mean[2] * 0.114f);
float std = (arg->stdev[0] * 0.299f) + (arg->stdev[1] * 0.587f) + (arg->stdev[2] * 0.114f);
fadd = (fadd - mean) / std;
fscale /= std;
image_t dst_img;
dst_img.w = params->input_width;
dst_img.h = params->input_height;
dst_img.data = (uint8_t *) model_input;
if (params->input_channels == 1) {
dst_img.pixfmt = PIXFORMAT_GRAYSCALE;
} else if (params->input_channels == 3) {
dst_img.pixfmt = PIXFORMAT_RGB565;
} else {
mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Expected model input channels to be 1 or 3!"));
}
imlib_draw_image(&dst_img, arg->img, 0, 0, 1.0f, 1.0f, arg->roi,
-1, 256, NULL, NULL, IMAGE_HINT_BILINEAR | IMAGE_HINT_CENTER |
IMAGE_HINT_SCALE_ASPECT_EXPAND | IMAGE_HINT_BLACK_BACKGROUND, NULL, NULL, NULL);
int size = (params->input_width * params->input_height) - 1; // must be int per countdown loop
if (params->input_channels == 1) {
// GRAYSCALE
if (params->input_datatype == LIBTF_DATATYPE_FLOAT) {
// convert u8 -> f32
uint8_t *model_input_u8 = (uint8_t *) model_input;
float *model_input_f32 = (float *) model_input;
for (; size >= 0; size -= 1) {
model_input_f32[size] = (model_input_u8[size] * fscale) + fadd;
}
} else {
if (shift) {
// convert u8 -> s8
uint8_t *model_input_8 = (uint8_t *) model_input;
#if (__ARM_ARCH > 6)
for (; size >= 3; size -= 4) {
*((uint32_t *) (model_input_8 + size - 3)) ^= 0x80808080;
}
#endif
for (; size >= 0; size -= 1) {
model_input_8[size] ^= PY_TF_GRAYSCALE_MID;
}
}
}
} else if (params->input_channels == 3) {
// RGB888
int rgb_size = size * 3; // must be int per countdown loop
if (params->input_datatype == LIBTF_DATATYPE_FLOAT) {
uint16_t *model_input_u16 = (uint16_t *) model_input;
float *model_input_f32 = (float *) model_input;
for (; size >= 0; size -= 1, rgb_size -= 3) {
int pixel = model_input_u16[size];
model_input_f32[rgb_size] = (COLOR_RGB565_TO_R8(pixel) * fscale_r) + fadd_r;
model_input_f32[rgb_size + 1] = (COLOR_RGB565_TO_G8(pixel) * fscale_g) + fadd_g;
model_input_f32[rgb_size + 2] = (COLOR_RGB565_TO_B8(pixel) * fscale_b) + fadd_b;
}
} else {
uint16_t *model_input_u16 = (uint16_t *) model_input;
uint8_t *model_input_8 = (uint8_t *) model_input;
for (; size >= 0; size -= 1, rgb_size -= 3) {
int pixel = model_input_u16[size];
model_input_8[rgb_size] = COLOR_RGB565_TO_R8(pixel) ^ shift;
model_input_8[rgb_size + 1] = COLOR_RGB565_TO_G8(pixel) ^ shift;
model_input_8[rgb_size + 2] = COLOR_RGB565_TO_B8(pixel) ^ shift;
}
}
}
}
STATIC void py_tf_output_callback(void *callback_data,
void *model_output,
libtf_parameters_t *params) {
mp_obj_t *arg = (mp_obj_t *) callback_data;
size_t len = params->output_height * params->output_width * params->output_channels;
*arg = mp_obj_new_list(len, NULL);
if (params->output_datatype == LIBTF_DATATYPE_FLOAT) {
for (size_t i = 0; i < len; i++) {
((mp_obj_list_t *) *arg)->items[i] =
mp_obj_new_float(((float *) model_output)[i]);
}
} else if (params->output_datatype == LIBTF_DATATYPE_INT8) {
for (size_t i = 0; i < len; i++) {
((mp_obj_list_t *) *arg)->items[i] =
mp_obj_new_float( ((float) (((int8_t *) model_output)[i] - params->output_zero_point)) *
params->output_scale);
}
} else {
for (size_t i = 0; i < len; i++) {
((mp_obj_list_t *) *arg)->items[i] =
mp_obj_new_float( ((float) (((uint8_t *) model_output)[i] - params->output_zero_point)) *
params->output_scale);
}
}
}
STATIC void py_tf_regression_input_callback(void *callback_data,
void *model_input,
libtf_parameters_t *params) {
size_t len;
mp_obj_t *items;
mp_obj_get_array(*((mp_obj_t *) callback_data), &len, &items);
if (len == (params->input_height * params->input_width * params->input_channels)) {
if (params->input_datatype == LIBTF_DATATYPE_FLOAT) {
float *model_input_float = (float *) model_input;
for (size_t i = 0; i < len; i++) {
model_input_float[i] = mp_obj_get_float(items[i]);
}
} else {
uint8_t *model_input_8 = (uint8_t *) model_input;
for (size_t i = 0; i < len; i++) {
model_input_8[i] = fast_roundf((mp_obj_get_float(items[i]) /
params->input_scale) + params->input_zero_point);
}
}
} else if (len == params->input_height) {
for (size_t i = 0; i < len; i++) {
size_t row_len;
mp_obj_t *row_items;
mp_obj_get_array(items[i], &row_len, &row_items);
if (row_len == (params->input_width * params->input_channels)) {
if (params->input_datatype == LIBTF_DATATYPE_FLOAT) {
float *model_input_float = (float *) model_input;
for (size_t j = 0; j < row_len; j++) {
size_t index = (i * row_len) + j;
model_input_float[index] = mp_obj_get_float(row_items[index]);
}
} else {
uint8_t *model_input_8 = (uint8_t *) model_input;
for (size_t j = 0; j < row_len; j++) {
size_t index = (i * row_len) + j;
model_input_8[index] = fast_roundf((mp_obj_get_float(row_items[index]) /
params->input_scale) + params->input_zero_point);
}
}
} else if (row_len == params->input_height) {
for (size_t j = 0; j < row_len; j++) {
size_t c_len;
mp_obj_t *c_items;
mp_obj_get_array(row_items[i], &c_len, &c_items);
if (c_len == params->input_channels) {
if (params->input_datatype == LIBTF_DATATYPE_FLOAT) {
float *model_input_float = (float *) model_input;
for (size_t k = 0; k < c_len; k++) {
size_t index = (i * row_len) + (j * c_len) + k;
model_input_float[index] = mp_obj_get_float(c_items[index]);
}
} else {
uint8_t *model_input_8 = (uint8_t *) model_input;
for (size_t k = 0; k < c_len; k++) {
size_t index = (i * row_len) + (j * c_len) + k;
model_input_8[index] = fast_roundf((mp_obj_get_float(c_items[index]) /
params->input_scale) + params->input_zero_point);
}
}
} else {
mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Channel count mismatch!"));
}
}
} else {
mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Column count mismatch!"));
}
}
} else {
mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Row count mismatch!"));
}
}
typedef struct py_tf_predict_callback_data {
mp_obj_t model;
rectangle_t *roi;
mp_obj_t callback;
mp_obj_t *out;
} py_tf_predict_callback_data_t;
STATIC void py_tf_predict_output_callback(void *callback_data,
void *model_output,
libtf_parameters_t *params) {
py_tf_predict_callback_data_t *arg = (py_tf_predict_callback_data_t *) callback_data;
py_tf_model_obj_t *model = MP_OBJ_TO_PTR(arg->model);
mp_obj_t rect = mp_obj_new_tuple(4, (mp_obj_t []) {mp_obj_new_int(arg->roi->x),
mp_obj_new_int(arg->roi->y),
mp_obj_new_int(arg->roi->w),
mp_obj_new_int(arg->roi->h)});
// This will support multiple output tensors once the API is updated.
mp_obj_list_t *list = MP_OBJ_TO_PTR(mp_obj_new_list(0, NULL));
py_tf_model_output_obj_t *o = m_new_obj(py_tf_model_output_obj_t);
o->base.type = &py_tf_model_output_type;
o->model_output = model_output;
o->params = params;
o->output_size = params->output_height * params->output_width * params->output_channels;
mp_obj_list_append(list, o);
model->output_list = MP_OBJ_FROM_PTR(list);
*(arg->out) = mp_call_function_2(arg->callback, model, rect);
model->output_list = mp_const_none;
}
// TF Model Object.
static const mp_obj_type_t py_tf_model_type;
STATIC void py_tf_model_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
py_tf_model_obj_t *self = MP_OBJ_TO_PTR(self_in);
mp_printf(print,
"{\"len\":%d, \"ram\":%d, "
"\"input_height\":%d, \"input_width\":%d, \"input_channels\":%d, \"input_datatype\":\"%s\", "
"\"input_scale\":%f, \"input_zero_point\":%d, "
"\"output_height\":%d, \"output_width\":%d, \"output_channels\":%d, \"output_datatype\":\"%s\", "
"\"output_scale\":%f, \"output_zero_point\":%d}",
self->size, self->params.tensor_arena_size,
self->params.input_height, self->params.input_width, self->params.input_channels,
py_tf_map_datatype(self->params.input_datatype),
(double) self->params.input_scale, self->params.input_zero_point,
self->params.output_height, self->params.output_width, self->params.output_channels,
py_tf_map_datatype(self->params.output_datatype),
(double) self->params.output_scale, self->params.output_zero_point);
}
STATIC mp_obj_t py_tf_model_predict(uint n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
enum { ARG_roi, ARG_callback, ARG_scale, ARG_mean, ARG_stdev };
static const mp_arg_t allowed_args[] = {
{ MP_QSTR_roi, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE} },
{ MP_QSTR_callback, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE} },
{ MP_QSTR_scale, MP_ARG_INT | MP_ARG_KW_ONLY, {.u_int = PY_TF_SCALE_0_1} },
{ MP_QSTR_mean, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE} },
{ MP_QSTR_stdev, MP_ARG_OBJ | MP_ARG_KW_ONLY, {.u_rom_obj = MP_ROM_NONE} },
};
// Parse args.
mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
mp_arg_parse_all(n_args - 2, pos_args + 2, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
fb_alloc_mark();
py_tf_alloc_log_buffer();
py_tf_model_obj_t *model = MP_OBJ_TO_PTR(pos_args[0]);
uint8_t *tensor_arena = fb_alloc(model->params.tensor_arena_size, FB_ALLOC_PREFER_SPEED | FB_ALLOC_CACHE_ALIGN);
mp_obj_t output_callback_data;
int invoke_result;
if (MP_OBJ_IS_TYPE(pos_args[1], &mp_type_tuple) || MP_OBJ_IS_TYPE(pos_args[1], &mp_type_list)) {
invoke_result = libtf_invoke(model->data,
tensor_arena,
&model->params,
py_tf_regression_input_callback,
(void *) &pos_args[1],
py_tf_output_callback,
&output_callback_data);
} else {
image_t *image = py_helper_arg_to_image(pos_args[1], ARG_IMAGE_ANY);
rectangle_t roi = py_helper_arg_to_roi(args[ARG_roi].u_obj, image);
py_tf_input_callback_data_t py_tf_input_callback_data = {
.img = image,
.roi = &roi,
.scale = args[ARG_scale].u_int,
.mean = {0.0f, 0.0f, 0.0f},
.stdev = {1.0f, 1.0f, 1.0f}
};
py_helper_arg_to_float_array(args[ARG_mean].u_obj, py_tf_input_callback_data.mean, 3);
py_helper_arg_to_float_array(args[ARG_stdev].u_obj, py_tf_input_callback_data.stdev, 3);
if (args[ARG_callback].u_obj != mp_const_none) {
py_tf_predict_callback_data_t py_tf_predict_output_callback_data;
py_tf_predict_output_callback_data.model = model;
py_tf_predict_output_callback_data.roi = &roi;
py_tf_predict_output_callback_data.callback = args[ARG_callback].u_obj;
py_tf_predict_output_callback_data.out = &output_callback_data;
invoke_result = libtf_invoke(model->data,
tensor_arena,
&model->params,
py_tf_input_callback,
&py_tf_input_callback_data,
py_tf_predict_output_callback,
&py_tf_predict_output_callback_data);
} else {
invoke_result = libtf_invoke(model->data,
tensor_arena,
&model->params,
py_tf_input_callback,
&py_tf_input_callback_data,
py_tf_output_callback,
&output_callback_data);
}
}
if (invoke_result != 0) {
// Note can't use MP_ERROR_TEXT here.
mp_raise_msg(&mp_type_OSError, (mp_rom_error_text_t) py_tf_log_buffer);
}
fb_alloc_free_till_mark();
return output_callback_data;
}
STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_tf_model_predict_obj, 2, py_tf_model_predict);
STATIC void py_tf_model_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
py_tf_model_obj_t *self = MP_OBJ_TO_PTR(self_in);
const char *str;
if (dest[0] == MP_OBJ_NULL) {
// Load attribute.
switch (attr) {
case MP_QSTR_len:
dest[0] = mp_obj_new_int(self->size);
break;
case MP_QSTR_ram:
dest[0] = mp_obj_new_int(self->params.tensor_arena_size);
break;
case MP_QSTR_input_shape:
dest[0] = self->input_shape;
break;
case MP_QSTR_input_datatype:
str = py_tf_map_datatype(self->params.input_datatype);
dest[0] = mp_obj_new_str(str, strlen(str));
break;
case MP_QSTR_input_scale:
dest[0] = mp_obj_new_float(self->params.input_scale);
break;
case MP_QSTR_input_zero_point:
dest[0] = mp_obj_new_int(self->params.input_zero_point);
break;
case MP_QSTR_output_shape:
dest[0] = self->output_shape;
break;
case MP_QSTR_output_datatype:
str = py_tf_map_datatype(self->params.output_datatype);
dest[0] = mp_obj_new_str(str, strlen(str));
break;
case MP_QSTR_output_scale:
dest[0] = mp_obj_new_float(self->params.output_scale);
break;
case MP_QSTR_output_zero_point:
dest[0] = mp_obj_new_int(self->params.output_zero_point);
break;
case MP_QSTR_output:
dest[0] = self->output_list;
break;
default:
// Continue lookup in locals_dict.
dest[1] = MP_OBJ_SENTINEL;
break;
}
}
}
mp_obj_t py_tf_model_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *all_args) {
enum { ARG_path, ARG_load_to_fb };
static const mp_arg_t allowed_args[] = {
{ MP_QSTR_path, MP_ARG_REQUIRED | MP_ARG_OBJ },
{ MP_QSTR_load_to_fb, MP_ARG_INT | MP_ARG_KW_ONLY, {.u_bool = false } },
};
// Parse args.
mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
mp_arg_parse_all_kw_array(n_args, n_kw, all_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
fb_alloc_mark();
const char *path = mp_obj_str_get_str(args[ARG_path].u_obj);
py_tf_model_obj_t *model = m_new_obj_with_finaliser(py_tf_model_obj_t);
model->base.type = &py_tf_model_type;
model->data = NULL;
model->fb_alloc = args[ARG_load_to_fb].u_int;
mp_obj_list_t *labels = NULL;
for (int i = 0; i < MP_ARRAY_SIZE(libtf_builtin_models); i++) {
const libtf_builtin_model_t *_model = &libtf_builtin_models[i];
if (!strcmp(path, _model->name)) {
// Load model data.
model->size = _model->size;
model->data = (unsigned char *) _model->data;
// Load model labels
labels = MP_OBJ_TO_PTR(mp_obj_new_list(_model->n_labels, NULL));
for (int l = 0; l < _model->n_labels; l++) {
const char *label = _model->labels[l];
labels->items[l] = mp_obj_new_str(label, strlen(label));
}
break;
}
}
if (model->data == NULL) {
#if defined(IMLIB_ENABLE_IMAGE_FILE_IO)
FIL fp;
file_open(&fp, path, false, FA_READ | FA_OPEN_EXISTING);
model->size = f_size(&fp);
model->data = model->fb_alloc ? fb_alloc(model->size, FB_ALLOC_PREFER_SIZE) : xalloc(model->size);
file_read(&fp, model->data, model->size);
file_close(&fp);
#else
mp_raise_msg(&mp_type_OSError, MP_ERROR_TEXT("Image I/O is not supported"));
#endif
}
py_tf_alloc_log_buffer();
uint32_t tensor_arena_size;
uint8_t *tensor_arena = fb_alloc_all(&tensor_arena_size, FB_ALLOC_PREFER_SIZE);
if (libtf_get_parameters(model->data, tensor_arena, tensor_arena_size, &model->params) != 0) {
// Note can't use MP_ERROR_TEXT here...
mp_raise_msg(&mp_type_OSError, (mp_rom_error_text_t) py_tf_log_buffer);
}
fb_free(); // free tensor_arena
fb_free(); // free log buffer
model->input_shape = mp_obj_new_tuple(3, (mp_obj_t []) {mp_obj_new_int(model->params.input_height),
mp_obj_new_int(model->params.input_width),
mp_obj_new_int(model->params.input_channels)});
model->output_shape = mp_obj_new_tuple(3, (mp_obj_t []) {mp_obj_new_int(model->params.output_height),
mp_obj_new_int(model->params.output_width),
mp_obj_new_int(model->params.output_channels)});
model->output_list = mp_const_none;
if (model->fb_alloc) {
// The model data will Not be free'd on exceptions.
fb_alloc_mark_permanent();
} else {
fb_alloc_free_till_mark();
}
if (labels == NULL) {
return MP_OBJ_FROM_PTR(model);
} else {
return mp_obj_new_tuple(2, (mp_obj_t []) {MP_OBJ_FROM_PTR(labels), MP_OBJ_FROM_PTR(model)});
}
}
STATIC mp_obj_t py_tf_model_deinit(mp_obj_t self_in) {
py_tf_model_obj_t *model = MP_OBJ_TO_PTR(self_in);
if (model->fb_alloc) {
fb_alloc_free_till_mark_past_mark_permanent();
}
return mp_const_none;
}
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_model_deinit_obj, py_tf_model_deinit);
STATIC const mp_rom_map_elem_t py_tf_model_locals_dict_table[] = {
{ MP_ROM_QSTR(MP_QSTR___del__), MP_ROM_PTR(&py_tf_model_deinit_obj) },
{ MP_ROM_QSTR(MP_QSTR_predict), MP_ROM_PTR(&py_tf_model_predict_obj) },
};
STATIC MP_DEFINE_CONST_DICT(py_tf_model_locals_dict, py_tf_model_locals_dict_table);
STATIC MP_DEFINE_CONST_OBJ_TYPE(
py_tf_model_type,
MP_QSTR_tf_model,
MP_TYPE_FLAG_NONE,
attr, py_tf_model_attr,
print, py_tf_model_print,
make_new, py_tf_model_make_new,
locals_dict, &py_tf_model_locals_dict
);
extern const mp_obj_type_t py_tf_nms_type;
STATIC const mp_rom_map_elem_t py_tf_globals_dict_table[] = {
{ MP_ROM_QSTR(MP_QSTR___name__), MP_OBJ_NEW_QSTR(MP_QSTR_tf) },
{ MP_ROM_QSTR(MP_QSTR_SCALE_NONE), MP_ROM_INT(PY_TF_SCALE_NONE) },
{ MP_ROM_QSTR(MP_QSTR_SCALE_0_1), MP_ROM_INT(PY_TF_SCALE_0_1) },
{ MP_ROM_QSTR(MP_QSTR_SCALE_S1_1), MP_ROM_INT(PY_TF_SCALE_S1_1) },
{ MP_ROM_QSTR(MP_QSTR_SCALE_S128_127), MP_ROM_INT(PY_TF_SCALE_S128_127) },
{ MP_ROM_QSTR(MP_QSTR_Model), MP_ROM_PTR(&py_tf_model_type) },
{ MP_ROM_QSTR(MP_QSTR_NMS), MP_ROM_PTR(&py_tf_nms_type) },
};
STATIC MP_DEFINE_CONST_DICT(py_tf_globals_dict, py_tf_globals_dict_table);
const mp_obj_module_t tf_module = {
.base = { &mp_type_module },
.globals = (mp_obj_t) &py_tf_globals_dict
};
MP_REGISTER_MODULE(MP_QSTR_tf, tf_module);
#endif // IMLIB_ENABLE_TF

View File

@ -1,40 +0,0 @@
/*
* This file is part of the OpenMV project.
*
* Copyright (c) 2013-2021 Ibrahim Abdelkader <iabdalkader@openmv.io>
* Copyright (c) 2013-2021 Kwabena W. Agyeman <kwagyeman@openmv.io>
*
* This work is licensed under the MIT license, see the file LICENSE for details.
*
* Python Tensorflow library wrapper.
*/
#ifndef __PY_TF_H__
#define __PY_TF_H__
#include "libtf.h"
#include "imlib_config.h"
// TF Model Object.
typedef struct py_tf_model_obj {
mp_obj_base_t base;
unsigned int size;
unsigned char *data;
bool fb_alloc;
mp_obj_t input_shape;
mp_obj_t output_shape;
mp_obj_t output_list;
libtf_parameters_t params;
} py_tf_model_obj_t;
extern char *py_tf_log_buffer;
void py_tf_alloc_log_buffer();
// Functionality select
#if IMLIB_ENABLE_TF == IMLIB_TF_FULLOPS
#define libtf_get_parameters libtf_get_parameters_fullops
#define libtf_invoke libtf_invoke_fullops
#elif IMLIB_ENABLE_TF == IMLIB_TF_DEFAULT
#define libtf_get_parameters libtf_get_parameters_default
#define libtf_invoke libtf_invoke_default
#endif
#endif // __PY_TF_H__