modules/py_ml: Fully vectorize all input and output scaling/datatypes.

This commit is contained in:
Kwabena W. Agyeman 2024-06-30 19:25:19 -07:00
parent 6247269e6e
commit d24d2136ea
3 changed files with 96 additions and 70 deletions

View File

@ -213,47 +213,58 @@ int ml_backend_init_model(py_ml_model_obj_t *model) {
mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Failed to allocate tensors")); mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Failed to allocate tensors"));
} }
// Check input data type. model->inputs_size = interpreter.inputs_size();
TfLiteTensor *input = interpreter.input(0); model->input_shape = (mp_obj_tuple_t *) MP_OBJ_TO_PTR(mp_obj_new_tuple(model->inputs_size, NULL));
if (!ml_backend_valid_dataype(input->type)) { model->input_scale = (mp_obj_tuple_t *) MP_OBJ_TO_PTR(mp_obj_new_tuple(model->inputs_size, NULL));
mp_raise_msg_varg(&mp_type_ValueError, MP_ERROR_TEXT("Unsupported input data type %d"), input->type); model->input_zero_point = (mp_obj_tuple_t *) MP_OBJ_TO_PTR(mp_obj_new_tuple(model->inputs_size, NULL));
} model->input_dtype = (mp_obj_tuple_t *) MP_OBJ_TO_PTR(mp_obj_new_tuple(model->inputs_size, NULL));
// Check output data type. for (size_t i=0; i<model->inputs_size; i++) {
TfLiteTensor *output = interpreter.output(0);
if (!ml_backend_valid_dataype(output->type)) {
mp_raise_msg_varg(&mp_type_ValueError, MP_ERROR_TEXT("Unsupported output data type %d"), output->type);
}
model->input_shape = (mp_obj_tuple_t *) MP_OBJ_TO_PTR(mp_obj_new_tuple(interpreter.inputs_size(), NULL));
for (size_t i=0; i<interpreter.inputs_size(); i++) {
TfLiteTensor *input = interpreter.input(i); TfLiteTensor *input = interpreter.input(i);
// Check input data type.
if (!ml_backend_valid_dataype(input->type)) {
mp_raise_msg_varg(&mp_type_ValueError, MP_ERROR_TEXT("Unsupported input data type %d"), input->type);
}
mp_obj_tuple_t *o = (mp_obj_tuple_t *) MP_OBJ_TO_PTR(mp_obj_new_tuple(input->dims->size, NULL)); mp_obj_tuple_t *o = (mp_obj_tuple_t *) MP_OBJ_TO_PTR(mp_obj_new_tuple(input->dims->size, NULL));
for (int j=0; j<input->dims->size; j++) { for (int j=0; j<input->dims->size; j++) {
o->items[j] = mp_obj_new_int(input->dims->data[j]); o->items[j] = mp_obj_new_int(input->dims->data[j]);
} }
float input_scale = input->params.scale;
model->input_shape->items[i] = MP_OBJ_FROM_PTR(o); model->input_shape->items[i] = MP_OBJ_FROM_PTR(o);
model->input_scale->items[i] = mp_obj_new_float((input_scale == 0.0f) ? 1.0f : input_scale);
model->input_zero_point->items[i] = mp_obj_new_int(input->params.zero_point);
model->input_dtype->items[i] = mp_obj_new_int(ml_backend_map_dtype(input->type));
} }
model->inputs_size = interpreter.inputs_size(); model->outputs_size = interpreter.outputs_size();
model->input_dtype = ml_backend_map_dtype(input->type); model->output_shape = (mp_obj_tuple_t *) MP_OBJ_TO_PTR(mp_obj_new_tuple(model->outputs_size, NULL));
model->input_scale = input->params.scale; model->output_scale = (mp_obj_tuple_t *) MP_OBJ_TO_PTR(mp_obj_new_tuple(model->outputs_size, NULL));
model->input_zero_point = input->params.zero_point; model->output_zero_point = (mp_obj_tuple_t *) MP_OBJ_TO_PTR(mp_obj_new_tuple(model->outputs_size, NULL));
model->output_dtype = (mp_obj_tuple_t *) MP_OBJ_TO_PTR(mp_obj_new_tuple(model->outputs_size, NULL));
model->output_shape = (mp_obj_tuple_t *) MP_OBJ_TO_PTR(mp_obj_new_tuple(interpreter.outputs_size(), NULL)); for (size_t i=0; i<model->outputs_size; i++) {
for (size_t i=0; i<interpreter.outputs_size(); i++) {
TfLiteTensor *output = interpreter.output(i); TfLiteTensor *output = interpreter.output(i);
// Check output data type.
if (!ml_backend_valid_dataype(output->type)) {
mp_raise_msg_varg(&mp_type_ValueError, MP_ERROR_TEXT("Unsupported output data type %d"), output->type);
}
mp_obj_tuple_t *o = (mp_obj_tuple_t *) MP_OBJ_TO_PTR(mp_obj_new_tuple(output->dims->size, NULL)); mp_obj_tuple_t *o = (mp_obj_tuple_t *) MP_OBJ_TO_PTR(mp_obj_new_tuple(output->dims->size, NULL));
for (int j=0; j<output->dims->size; j++) { for (int j=0; j<output->dims->size; j++) {
o->items[j] = mp_obj_new_int(output->dims->data[j]); o->items[j] = mp_obj_new_int(output->dims->data[j]);
} }
float output_scale = output->params.scale;
model->output_shape->items[i] = MP_OBJ_FROM_PTR(o); model->output_shape->items[i] = MP_OBJ_FROM_PTR(o);
model->output_scale->items[i] = mp_obj_new_float((output_scale == 0.0f) ? 1.0f : output_scale);
model->output_zero_point->items[i] = mp_obj_new_int(output->params.zero_point);
model->output_dtype->items[i] = mp_obj_new_int(ml_backend_map_dtype(output->type));
} }
model->outputs_size = interpreter.outputs_size();
model->output_dtype = ml_backend_map_dtype(output->type);
model->output_scale = output->params.scale;
model->output_zero_point = output->params.zero_point;
model->memory_size = interpreter.arena_used_bytes() + 1024; model->memory_size = interpreter.arena_used_bytes() + 1024;
// Free the temporary arena. // Free the temporary arena.

View File

@ -55,14 +55,17 @@ static void py_ml_process_input(py_ml_model_obj_t *model, mp_obj_t arg) {
void *input_buffer = ml_backend_get_input(model, i); void *input_buffer = ml_backend_get_input(model, i);
size_t input_size = py_ml_tuple_sum(MP_OBJ_TO_PTR(model->input_shape->items[i])); size_t input_size = py_ml_tuple_sum(MP_OBJ_TO_PTR(model->input_shape->items[i]));
mp_obj_tuple_t *input_shape = MP_OBJ_TO_PTR(model->input_shape->items[i]); mp_obj_tuple_t *input_shape = MP_OBJ_TO_PTR(model->input_shape->items[i]);
float input_scale = 1.0f / mp_obj_get_float(model->input_scale->items[i]);
int input_zero_point = mp_obj_get_int(model->input_zero_point->items[i]);
int input_dtype = mp_obj_get_int(model->input_dtype->items[i]);
mp_obj_t input_arg = input_list->items[i]; mp_obj_t input_arg = input_list->items[i];
if (mp_obj_is_callable(input_arg)) { if (mp_obj_is_callable(input_arg)) {
// Input is a callable. Call the object and pass the tensor buffer and dtype. // Input is a callable. Call the object and pass the tensor buffer and dtype.
mp_obj_t fargs[3] = { mp_obj_t fargs[3] = {
mp_obj_new_bytearray_by_ref(input_size * pl_ml_dtype_size(model->input_dtype), input_buffer), mp_obj_new_bytearray_by_ref(input_size * pl_ml_dtype_size(input_dtype), input_buffer),
MP_OBJ_FROM_PTR(input_shape), MP_OBJ_FROM_PTR(input_shape),
mp_obj_new_int(model->input_dtype) mp_obj_new_int(input_dtype)
}; };
mp_call_function_n_kw(input_arg, 3, 0, fargs); mp_call_function_n_kw(input_arg, 3, 0, fargs);
} else if (MP_OBJ_IS_TYPE(input_arg, &ulab_ndarray_type)) { } else if (MP_OBJ_IS_TYPE(input_arg, &ulab_ndarray_type)) {
@ -82,29 +85,29 @@ static void py_ml_process_input(py_ml_model_obj_t *model, mp_obj_t arg) {
} }
} }
if (model->input_dtype == 'f') { if (input_dtype == 'f') {
float *model_input_float = (float *) input_buffer; float *model_input_float = (float *) input_buffer;
for (size_t i = 0; i < input_array->len; i++) { for (size_t i = 0; i < input_array->len; i++) {
float value = ndarray_get_float_index(input_array->array, input_array->dtype, i); float value = ndarray_get_float_index(input_array->array, input_array->dtype, i);
model_input_float[i] = value; model_input_float[i] = value;
} }
} else if (model->input_dtype == 'b') { } else if (input_dtype == 'b') {
int8_t *model_input_8 = (int8_t *) input_buffer; int8_t *model_input_8 = (int8_t *) input_buffer;
for (size_t i = 0; i < input_array->len; i++) { for (size_t i = 0; i < input_array->len; i++) {
float value = ndarray_get_float_index(input_array->array, input_array->dtype, i); float value = ndarray_get_float_index(input_array->array, input_array->dtype, i);
model_input_8[i] = (int8_t) ((value / model->input_scale) + model->input_zero_point); model_input_8[i] = (int8_t) ((value * input_scale) + input_zero_point);
} }
} else if (model->input_dtype == 'B') { } else if (input_dtype == 'B') {
uint8_t *model_input_8 = (uint8_t *) input_buffer; uint8_t *model_input_8 = (uint8_t *) input_buffer;
for (size_t i = 0; i < input_array->len; i++) { for (size_t i = 0; i < input_array->len; i++) {
float value = ndarray_get_float_index(input_array->array, input_array->dtype, i); float value = ndarray_get_float_index(input_array->array, input_array->dtype, i);
model_input_8[i] = (uint8_t) ((value / model->input_scale) + model->input_zero_point); model_input_8[i] = (uint8_t) ((value * input_scale) + input_zero_point);
} }
} else { } else {
int16_t *model_input_16 = (int16_t *) input_buffer; int16_t *model_input_16 = (int16_t *) input_buffer;
for (size_t i = 0; i < input_array->len; i++) { for (size_t i = 0; i < input_array->len; i++) {
float value = ndarray_get_float_index(input_array->array, input_array->dtype, i); float value = ndarray_get_float_index(input_array->array, input_array->dtype, i);
model_input_16[i] = (int16_t) ((value / model->input_scale) + model->input_zero_point); model_input_16[i] = (int16_t) ((value * input_scale) + input_zero_point);
} }
} }
} else { } else {
@ -119,25 +122,28 @@ static mp_obj_t py_ml_process_output(py_ml_model_obj_t *model) {
void *model_output = ml_backend_get_output(model, i); void *model_output = ml_backend_get_output(model, i);
size_t size = py_ml_tuple_sum(MP_OBJ_TO_PTR(model->output_shape->items[i])); size_t size = py_ml_tuple_sum(MP_OBJ_TO_PTR(model->output_shape->items[i]));
mp_obj_tuple_t *output = MP_OBJ_TO_PTR(mp_obj_new_tuple(size, NULL)); mp_obj_tuple_t *output = MP_OBJ_TO_PTR(mp_obj_new_tuple(size, NULL));
float output_scale = mp_obj_get_float(model->output_scale->items[i]);
int output_zero_point = mp_obj_get_int(model->output_zero_point->items[i]);
int output_dtype = mp_obj_get_int(model->output_dtype->items[i]);
if (model->output_dtype == 'f') { if (output_dtype == 'f') {
for (size_t j = 0; j < size; j++) { for (size_t j = 0; j < size; j++) {
output->items[j] = mp_obj_new_float(((float *) model_output)[j]); output->items[j] = mp_obj_new_float(((float *) model_output)[j]);
} }
} else if (model->output_dtype == 'b') { } else if (output_dtype == 'b') {
for (size_t j = 0; j < size; j++) { for (size_t j = 0; j < size; j++) {
float v = (((int8_t *) model_output)[j] - model->output_zero_point); float v = (((int8_t *) model_output)[j] - output_zero_point);
output->items[j] = mp_obj_new_float(v * model->output_scale); output->items[j] = mp_obj_new_float(v * output_scale);
} }
} else if (model->output_dtype == 'B') { } else if (output_dtype == 'B') {
for (size_t j = 0; j < size; j++) { for (size_t j = 0; j < size; j++) {
float v = (((uint8_t *) model_output)[j] - model->output_zero_point); float v = (((uint8_t *) model_output)[j] - output_zero_point);
output->items[j] = mp_obj_new_float(v * model->output_scale); output->items[j] = mp_obj_new_float(v * output_scale);
} }
} else { } else {
for (size_t j = 0; j < size; j++) { for (size_t j = 0; j < size; j++) {
float v = (((int8_t *) model_output)[j] - model->output_zero_point); float v = (((int8_t *) model_output)[j] - output_zero_point);
output->items[j] = mp_obj_new_float(v * model->output_scale); output->items[j] = mp_obj_new_float(v * output_scale);
} }
} }
output_list->items[i] = MP_OBJ_FROM_PTR(output); output_list->items[i] = MP_OBJ_FROM_PTR(output);
@ -148,15 +154,35 @@ static mp_obj_t py_ml_process_output(py_ml_model_obj_t *model) {
// TF Model Object. // TF Model Object.
static const mp_obj_type_t py_ml_model_type; static const mp_obj_type_t py_ml_model_type;
static mp_obj_t py_ml_dtype_char_tuple(const mp_obj_tuple_t *dtype) {
mp_obj_tuple_t *r = (mp_obj_tuple_t *) MP_OBJ_TO_PTR(mp_obj_new_tuple(dtype->len, NULL));
for (size_t i = 0; i < dtype->len; i++) {
char d = mp_obj_get_int(dtype->items[i]);
r->items[i] = mp_obj_new_str(&d, 1);
}
return r;
}
static void py_ml_model_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) { static void py_ml_model_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
py_ml_model_obj_t *self = MP_OBJ_TO_PTR(self_in); py_ml_model_obj_t *self = MP_OBJ_TO_PTR(self_in);
mp_printf(print, mp_printf(print, "{size: %d, ram: %d", self->size, self->memory_size);
"{size: \"%d\", ram: \"%d\"," mp_printf(print, ", input_shape: ");
" inputs_size: \"%d\", input_dtype: \"%c\", input_scale: \"%f\", input_zero_point: \"%d\"," mp_obj_print_helper(print, self->input_shape, kind);
" outputs_size: \"%d\" output_dtype: \"%c\", output_scale: \"%f\", output_zero_point: \"%d\"}", mp_printf(print, ", input_scale: ");
self->size, self->memory_size, self->inputs_size, self->input_dtype, mp_obj_print_helper(print, self->input_scale, kind);
(double) self->input_scale, self->input_zero_point, self->outputs_size, self->output_dtype, mp_printf(print, ", input_zero_point: ");
(double) self->output_scale, self->output_zero_point); mp_obj_print_helper(print, self->input_zero_point, kind);
mp_printf(print, ", input_dtype: ");
mp_obj_print_helper(print, py_ml_dtype_char_tuple(self->input_dtype), kind);
mp_printf(print, ", output_shape: ");
mp_obj_print_helper(print, self->output_shape, kind);
mp_printf(print, ", output_scale: ");
mp_obj_print_helper(print, self->output_scale, kind);
mp_printf(print, ", output_zero_point: ");
mp_obj_print_helper(print, self->output_zero_point, kind);
mp_printf(print, ", output_dtype: ");
mp_obj_print_helper(print, py_ml_dtype_char_tuple(self->output_dtype), kind);
mp_printf(print, "}");
} }
static mp_obj_t py_ml_model_predict(uint n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { static mp_obj_t py_ml_model_predict(uint n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
@ -192,7 +218,6 @@ static MP_DEFINE_CONST_FUN_OBJ_KW(py_ml_model_predict_obj, 2, py_ml_model_predic
static void py_ml_model_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) { static void py_ml_model_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
py_ml_model_obj_t *self = MP_OBJ_TO_PTR(self_in); py_ml_model_obj_t *self = MP_OBJ_TO_PTR(self_in);
if (dest[0] == MP_OBJ_NULL) { if (dest[0] == MP_OBJ_NULL) {
// Load attribute. // Load attribute.
switch (attr) { switch (attr) {
@ -206,25 +231,25 @@ static void py_ml_model_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
dest[0] = MP_OBJ_FROM_PTR(self->input_shape); dest[0] = MP_OBJ_FROM_PTR(self->input_shape);
break; break;
case MP_QSTR_input_dtype: case MP_QSTR_input_dtype:
dest[0] = mp_obj_new_str(&self->input_dtype, 1); dest[0] = py_ml_dtype_char_tuple(self->input_dtype);
break; break;
case MP_QSTR_input_scale: case MP_QSTR_input_scale:
dest[0] = mp_obj_new_float(self->input_scale); dest[0] = MP_OBJ_FROM_PTR(self->input_scale);
break; break;
case MP_QSTR_input_zero_point: case MP_QSTR_input_zero_point:
dest[0] = mp_obj_new_int(self->input_zero_point); dest[0] = MP_OBJ_FROM_PTR(self->input_zero_point);
break; break;
case MP_QSTR_output_shape: case MP_QSTR_output_shape:
dest[0] = MP_OBJ_FROM_PTR(self->output_shape); dest[0] = MP_OBJ_FROM_PTR(self->output_shape);
break; break;
case MP_QSTR_output_dtype: case MP_QSTR_output_dtype:
dest[0] = mp_obj_new_str(&self->output_dtype, 1); dest[0] = py_ml_dtype_char_tuple(self->output_dtype);
break; break;
case MP_QSTR_output_scale: case MP_QSTR_output_scale:
dest[0] = mp_obj_new_float(self->output_scale); dest[0] = MP_OBJ_FROM_PTR(self->output_scale);
break; break;
case MP_QSTR_output_zero_point: case MP_QSTR_output_zero_point:
dest[0] = mp_obj_new_int(self->output_zero_point); dest[0] = MP_OBJ_FROM_PTR(self->output_zero_point);
break; break;
case MP_QSTR_labels: case MP_QSTR_labels:
dest[0] = self->labels; dest[0] = self->labels;
@ -298,17 +323,7 @@ mp_obj_t py_ml_model_make_new(const mp_obj_type_t *type, size_t n_args, size_t n
fb_alloc_free_till_mark(); fb_alloc_free_till_mark();
} }
ml_backend_init_model(model); ml_backend_init_model(model);
if (model->input_scale == 0.0f) {
model->input_scale = 1.0;
}
if (model->output_scale == 0.0f) {
model->output_scale = 1.0;
}
return MP_OBJ_FROM_PTR(model); return MP_OBJ_FROM_PTR(model);
} }

View File

@ -19,14 +19,14 @@ typedef struct py_ml_model_obj {
bool fb_alloc; bool fb_alloc;
size_t inputs_size; size_t inputs_size;
mp_obj_tuple_t *input_shape; mp_obj_tuple_t *input_shape;
float input_scale; mp_obj_tuple_t *input_scale;
int input_zero_point; mp_obj_tuple_t *input_zero_point;
char input_dtype; mp_obj_tuple_t *input_dtype;
size_t outputs_size; size_t outputs_size;
mp_obj_tuple_t *output_shape; mp_obj_tuple_t *output_shape;
float output_scale; mp_obj_tuple_t *output_scale;
int output_zero_point; mp_obj_tuple_t *output_zero_point;
char output_dtype; mp_obj_tuple_t *output_dtype;
mp_obj_t labels; mp_obj_t labels;
void *state; // Private context for the backend. void *state; // Private context for the backend.
} py_ml_model_obj_t; } py_ml_model_obj_t;