Merge pull request #2133 from kwagyeman/kwabena/tensor_flow_update_2_2024

modules/py_tf: Refactor regression() code to handle arrays correctly.
This commit is contained in:
Ibrahim Abdelkader 2024-02-11 21:37:05 +02:00 committed by GitHub
commit 262eb98e43
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
17 changed files with 159 additions and 74 deletions

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -1,5 +1,5 @@
/* This file is part of the OpenMV project.
* Copyright (c) 2013-2023 Ibrahim Abdelkader <iabdalkader@openmv.io> & Kwabena W. Agyeman <kwagyeman@openmv.io>
* Copyright (c) 2013-2024 Ibrahim Abdelkader <iabdalkader@openmv.io> & Kwabena W. Agyeman <kwagyeman@openmv.io>
* This work is licensed under the MIT license, see the file LICENSE for details.
*/
@ -33,10 +33,18 @@ typedef struct libtf_parameters {
// Call this first to get the model parameters.
// Returns 0 on success and 1 on failure.
// Errors are printed to stdout.
int libtf_get_parameters(const unsigned char *model_data, // TensorFlow Lite binary model (8-bit quant).
unsigned char *tensor_arena, // As big as you can make it scratch buffer.
size_t tensor_arena_size, // Size of the above scratch buffer.
libtf_parameters_t *params); // Struct to hold model parameters.
int libtf_get_parameters_default(const unsigned char *model_data, // TensorFlow Lite binary model (8-bit quant).
unsigned char *tensor_arena, // As big as you can make it scratch buffer.
size_t tensor_arena_size, // Size of the above scratch buffer.
libtf_parameters_t *params); // Struct to hold model parameters.
// Call this first to get the model parameters.
// Returns 0 on success and 1 on failure.
// Errors are printed to stdout.
int libtf_get_parameters_fullops(const unsigned char *model_data, // TensorFlow Lite binary model (8-bit quant).
unsigned char *tensor_arena, // As big as you can make it scratch buffer.
size_t tensor_arena_size, // Size of the above scratch buffer.
libtf_parameters_t *params); // Struct to hold model parameters.
// Callback to populate the model input data byte array (laid out in [height][width][channel] order).
typedef void (*libtf_input_data_callback_t)(void *callback_data,
@ -50,13 +58,23 @@ typedef void (*libtf_output_data_callback_t)(void *callback_data,
// Returns 0 on success and 1 on failure.
// Errors are printed to stdout.
int libtf_invoke(const unsigned char *model_data, // TensorFlow Lite binary model (8-bit quant).
unsigned char *tensor_arena, // As big as you can make it scratch buffer.
libtf_parameters_t *params, // Struct with model parameters.
libtf_input_data_callback_t input_callback, // Callback to populate the model input data byte array.
void *input_callback_data, // User data structure passed to input callback.
libtf_output_data_callback_t output_callback, // Callback to use the model output data byte array.
void *output_callback_data); // User data structure passed to output callback.
int libtf_invoke_default(const unsigned char *model_data, // TensorFlow Lite binary model (8-bit quant).
unsigned char *tensor_arena, // As big as you can make it scratch buffer.
libtf_parameters_t *params, // Struct with model parameters.
libtf_input_data_callback_t input_callback, // Callback to populate the model input data byte array.
void *input_callback_data, // User data structure passed to input callback.
libtf_output_data_callback_t output_callback, // Callback to use the model output data byte array.
void *output_callback_data); // User data structure passed to output callback.
// Returns 0 on success and 1 on failure.
// Errors are printed to stdout.
int libtf_invoke_fullops(const unsigned char *model_data, // TensorFlow Lite binary model (8-bit quant).
unsigned char *tensor_arena, // As big as you can make it scratch buffer.
libtf_parameters_t *params, // Struct with model parameters.
libtf_input_data_callback_t input_callback, // Callback to populate the model input data byte array.
void *input_callback_data, // User data structure passed to input callback.
libtf_output_data_callback_t output_callback, // Callback to use the model output data byte array.
void *output_callback_data); // User data structure passed to output callback.
// Returns 0 on success and 1 on failure.
// Errors are printed to stdout.
@ -72,9 +90,6 @@ int libtf_generate_micro_features(const int16_t *input, // Audio samples
int8_t *output, // Slice data
size_t *num_samples_read); // Number of samples used
// runs regression on 2D/ 1D input(provided as array) and return 1D output
int libtf_regression(const unsigned char *model_data, uint8_t* tensor_arena, libtf_parameters_t* params, float* input_data, float* output_data);
#ifdef __cplusplus
}
#endif

View File

@ -130,7 +130,7 @@
// Enable Tensor Flow
#if !defined(CUBEAI)
#define IMLIB_ENABLE_TF
#define IMLIB_ENABLE_TF (IMLIB_TF_DEFAULT)
#endif
// Enable FAST (20+ KBs).

View File

@ -130,7 +130,7 @@
// Enable Tensor Flow
#if !defined(CUBEAI)
#define IMLIB_ENABLE_TF
#define IMLIB_ENABLE_TF (IMLIB_TF_DEFAULT)
#endif
// Enable FAST (20+ KBs).
@ -158,8 +158,8 @@
#define IMLIB_ENABLE_DMA2D
// Enable PNG encoder/decoder
#define IMLIB_ENABLE_PNG_ENCODER
#define IMLIB_ENABLE_PNG_DECODER
// #define IMLIB_ENABLE_PNG_ENCODER
// #define IMLIB_ENABLE_PNG_DECODER
// Stereo Imaging
// #define IMLIB_ENABLE_STEREO_DISPARITY

View File

@ -130,7 +130,7 @@
// Enable Tensor Flow
#if !defined(CUBEAI)
#define IMLIB_ENABLE_TF
#define IMLIB_ENABLE_TF (IMLIB_TF_DEFAULT)
#endif
// Enable FAST (20+ KBs).
@ -158,8 +158,8 @@
#define IMLIB_ENABLE_DMA2D
// Enable PNG encoder/decoder
#define IMLIB_ENABLE_PNG_ENCODER
#define IMLIB_ENABLE_PNG_DECODER
// #define IMLIB_ENABLE_PNG_ENCODER
// #define IMLIB_ENABLE_PNG_DECODER
// Stereo Imaging
// #define IMLIB_ENABLE_STEREO_DISPARITY

View File

@ -130,7 +130,7 @@
// Enable Tensor Flow
#if !defined(CUBEAI)
#define IMLIB_ENABLE_TF
#define IMLIB_ENABLE_TF (IMLIB_TF_DEFAULT)
#endif
// Enable FAST (20+ KBs).

View File

@ -130,7 +130,7 @@
// Enable Tensor Flow
#if !defined(CUBEAI)
#define IMLIB_ENABLE_TF
#define IMLIB_ENABLE_TF (IMLIB_TF_DEFAULT)
#endif
// Enable FAST (20+ KBs).

View File

@ -130,7 +130,7 @@
// Enable Tensor Flow
#if !defined(CUBEAI)
#define IMLIB_ENABLE_TF
#define IMLIB_ENABLE_TF (IMLIB_TF_DEFAULT)
#endif
// Enable FAST (20+ KBs).

View File

@ -130,7 +130,7 @@
// Enable Tensor Flow
#if !defined(CUBEAI)
#define IMLIB_ENABLE_TF
#define IMLIB_ENABLE_TF (IMLIB_TF_DEFAULT)
#endif
// Enable FAST (20+ KBs).

View File

@ -130,7 +130,7 @@
// Enable Tensor Flow
#if !defined(CUBEAI)
#define IMLIB_ENABLE_TF
#define IMLIB_ENABLE_TF (IMLIB_TF_DEFAULT)
#endif
// Enable FAST (20+ KBs).

View File

@ -129,7 +129,7 @@
// Enable Tensor Flow
#if !defined(CUBEAI)
#define IMLIB_ENABLE_TF
#define IMLIB_ENABLE_TF (IMLIB_TF_FULLOPS)
#endif
// Enable FAST (20+ KBs).

View File

@ -30,6 +30,11 @@
#include "imlib_config.h"
#include "omv_boardconfig.h"
// Enables 38 TensorFlow Lite operators.
#define IMLIB_TF_DEFAULT (1)
// Enables 78 TensofFlow Lite operators.
#define IMLIB_TF_FULLOPS (2)
#ifndef M_PI
#define M_PI 3.14159265f
#define M_PI_2 1.57079632f

View File

@ -18,9 +18,6 @@
#include "py_helper.h"
#include "imlib_config.h"
#include "ulab/code/ulab.h"
#include "ulab/code/ndarray.h"
#ifdef IMLIB_ENABLE_TF
#include "py_image.h"
#include "file_utils.h"
@ -268,49 +265,6 @@ STATIC py_tf_model_obj_t *py_tf_load_alloc(mp_obj_t path_obj) {
}
}
STATIC mp_obj_t py_tf_regression(uint n_args, const mp_obj_t *args, mp_map_t *kw_args) {
fb_alloc_mark();
py_tf_alloc_putchar_buffer();
// read model
py_tf_model_obj_t *model = py_tf_load_alloc(args[0]);
// read input(2D or 1D) and output size(1D)
size_t input_size_width = (&model->params)->input_width;
size_t input_size_height = (&model->params)->input_height;
size_t output_size = (&model->params)->output_channels;
// read input
ndarray_obj_t *arg_input_array = args[1];
// check for the input size
if ((input_size_width * input_size_height) != arg_input_array->len) {
mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Input array size is not same as model input size!"));
}
float *input_array = (float *) (arg_input_array->array);
uint8_t *tensor_arena = fb_alloc(model->params.tensor_arena_size, FB_ALLOC_PREFER_SPEED | FB_ALLOC_CACHE_ALIGN);
float output_data[output_size];
// predict the output using tflite model
if (libtf_regression(model->model_data,
tensor_arena, &model->params, input_array, output_data) != 0) {
mp_raise_msg(&mp_type_OSError, MP_ERROR_TEXT("Coundnt execute the model to predict the output"));
}
// read output
mp_obj_list_t *out = (mp_obj_list_t *) mp_obj_new_list(output_size, NULL);
for (size_t j = 0; j < (output_size); j++) {
out->items[j] = mp_obj_new_float(output_data[j]);
}
fb_alloc_free_till_mark();
return out;
}
STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_tf_regression_obj, 2, py_tf_regression);
typedef struct py_tf_input_callback_data {
image_t *img;
rectangle_t *roi;
@ -774,6 +728,107 @@ STATIC mp_obj_t py_tf_detect(uint n_args, const mp_obj_t *pos_args, mp_map_t *kw
}
STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_tf_detect_obj, 2, py_tf_detect);
STATIC void py_tf_regression_input_callback(void *callback_data,
void *model_input,
libtf_parameters_t *params) {
size_t len;
mp_obj_t *items;
mp_obj_get_array(*((mp_obj_t *) callback_data), &len, &items);
if (len == (params->input_height * params->input_width * params->input_channels)) {
if (params->input_datatype == LIBTF_DATATYPE_FLOAT) {
float *model_input_float = (float *) model_input;
for (size_t i = 0; i < len; i++) {
model_input_float[i] = mp_obj_get_float(items[i]);
}
} else {
uint8_t *model_input_8 = (uint8_t *) model_input;
for (size_t i = 0; i < len; i++) {
model_input_8[i] = fast_roundf((mp_obj_get_float(items[i]) /
params->input_scale) + params->input_zero_point);
}
}
} else if (len == params->input_height) {
for (size_t i = 0; i < len; i++) {
size_t row_len;
mp_obj_t *row_items;
mp_obj_get_array(items[i], &row_len, &row_items);
if (row_len == (params->input_width * params->input_channels)) {
if (params->input_datatype == LIBTF_DATATYPE_FLOAT) {
float *model_input_float = (float *) model_input;
for (size_t j = 0; j < row_len; j++) {
size_t index = (i * row_len) + j;
model_input_float[index] = mp_obj_get_float(row_items[index]);
}
} else {
uint8_t *model_input_8 = (uint8_t *) model_input;
for (size_t j = 0; j < row_len; j++) {
size_t index = (i * row_len) + j;
model_input_8[index] = fast_roundf((mp_obj_get_float(row_items[index]) /
params->input_scale) + params->input_zero_point);
}
}
} else if (row_len == params->input_height) {
for (size_t j = 0; j < row_len; j++) {
size_t c_len;
mp_obj_t *c_items;
mp_obj_get_array(row_items[i], &c_len, &c_items);
if (c_len == params->input_channels) {
if (params->input_datatype == LIBTF_DATATYPE_FLOAT) {
float *model_input_float = (float *) model_input;
for (size_t k = 0; k < c_len; k++) {
size_t index = (i * row_len) + (j * c_len) + k;
model_input_float[index] = mp_obj_get_float(c_items[index]);
}
} else {
uint8_t *model_input_8 = (uint8_t *) model_input;
for (size_t k = 0; k < c_len; k++) {
size_t index = (i * row_len) + (j * c_len) + k;
model_input_8[index] = fast_roundf((mp_obj_get_float(c_items[index]) /
params->input_scale) + params->input_zero_point);
}
}
} else {
mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Channel count mismatch!"));
}
}
} else {
mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Column count mismatch!"));
}
}
} else {
mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Row count mismatch!"));
}
}
STATIC mp_obj_t py_tf_regression(mp_obj_t model_obj, mp_obj_t array_obj) {
fb_alloc_mark();
py_tf_alloc_putchar_buffer();
py_tf_model_obj_t *model = py_tf_load_alloc(model_obj);
uint8_t *tensor_arena = fb_alloc(model->params.tensor_arena_size, FB_ALLOC_PREFER_SPEED | FB_ALLOC_CACHE_ALIGN);
mp_obj_t py_tf_classify_output_callback_data;
if (libtf_invoke(model->model_data,
tensor_arena,
&model->params,
py_tf_regression_input_callback,
&array_obj,
py_tf_classify_output_callback,
&py_tf_classify_output_callback_data) != 0) {
// Note can't use MP_ERROR_TEXT here.
mp_raise_msg(&mp_type_OSError, (mp_rom_error_text_t) py_tf_putchar_buffer);
}
fb_alloc_free_till_mark();
return py_tf_classify_output_callback_data;
}
STATIC MP_DEFINE_CONST_FUN_OBJ_2(py_tf_regression_obj, py_tf_regression);
mp_obj_t py_tf_len(mp_obj_t self_in) {
return mp_obj_new_int(((py_tf_model_obj_t *) self_in)->model_data_len);
}

View File

@ -11,6 +11,7 @@
#ifndef __PY_TF_H__
#define __PY_TF_H__
#include "libtf.h"
#include "imlib_config.h"
typedef struct py_tf_model_obj {
mp_obj_base_t base;
@ -26,4 +27,13 @@ extern size_t py_tf_putchar_buffer_index;
extern size_t py_tf_putchar_buffer_len;
void py_tf_alloc_putchar_buffer();
// Functionality select
#if IMLIB_ENABLE_TF == IMLIB_TF_FULLOPS
#define libtf_get_parameters libtf_get_parameters_fullops
#define libtf_invoke libtf_invoke_fullops
#elif IMLIB_ENABLE_TF == IMLIB_TF_DEFAULT
#define libtf_get_parameters libtf_get_parameters_default
#define libtf_invoke libtf_invoke_default
#endif
#endif // __PY_TF_H__