mirror of
https://github.com/openmv/openmv.git
synced 2025-11-04 14:49:50 +08:00
Add Objection Detection Support (#1462)
Support for segmentation networks has been added. You can now segment images using tensorflow into grayscale images that show a heatmap per class you are looking for. The segment() method will return a list of images of these grayscale heat maps. detect() will then do all the above but internally run find_blobs() on the heat maps to return instead a list of lists, where each sub list is the blobs detected per class. EdgeImpulse will have support for running segmentation networks thus enabling object detection and localization on Cortex-M processors. * PYTF now uses the optimal amount of memory for buffers versus all - buffers are placed in SRAM if they fit producing a massive speed boost. * Custom scaled/offset outputs now work. * Updated to the latest tensorflow library. * You have access to all input/output model parameters. * Person detection is now int8 and blazing fast - 20 FPS on the Arduino Portena. * Added m55 libs (m0plus libs coming soon once EdgeImpulse adds support for them in the tensorflow make file) * Classify/Segment/Detect work on all image types directly (JPG/BAYER/YUV/RGB565/GRAYSCALE/BINARY)
This commit is contained in:
parent
6a0bc6716e
commit
c86f01741f
@ -0,0 +1,51 @@
|
|||||||
|
# TensorFlow Lite Object Detection Example
|
||||||
|
#
|
||||||
|
# This example shows off object detection. Object detect is much more powerful than
|
||||||
|
# object classification. It can locate multiple objects in the image.
|
||||||
|
|
||||||
|
import sensor, image, time, os, tf
|
||||||
|
|
||||||
|
sensor.reset() # Reset and initialize the sensor.
|
||||||
|
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||||
|
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||||
|
sensor.set_windowing((240, 240)) # Set 240x240 window.
|
||||||
|
sensor.skip_frames(time=2000) # Let the camera adjust.
|
||||||
|
|
||||||
|
net = tf.load('<object_detection_network>', load_to_fb=True)
|
||||||
|
labels = []
|
||||||
|
|
||||||
|
try: # Load labels if they exist
|
||||||
|
labels = [line.rstrip('\n') for line in open("labels.txt")]
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
colors = [ # Add more colors if you are detecting more than 7 types of classes at once.
|
||||||
|
(255, 0, 0),
|
||||||
|
( 0, 255, 0),
|
||||||
|
(255, 255, 0),
|
||||||
|
( 0, 0, 255),
|
||||||
|
(255, 0, 255),
|
||||||
|
( 0, 255, 255),
|
||||||
|
(255, 255, 255),
|
||||||
|
]
|
||||||
|
|
||||||
|
clock = time.clock()
|
||||||
|
while(True):
|
||||||
|
clock.tick()
|
||||||
|
|
||||||
|
img = sensor.snapshot()
|
||||||
|
|
||||||
|
# detect() segments an object using the provided segmentation model. This produces mutliple
|
||||||
|
# grayscale images per object class that we are trying to detect. detect() then runs
|
||||||
|
# find_blobs() internally on the segmented images to find all blob locations and then returns
|
||||||
|
# the bound boxes of all blobs found per object class. So, detect() returns a list of lists of
|
||||||
|
# classification objects and the respective confidence level.
|
||||||
|
|
||||||
|
for i, detection_list in enumerate(net.detect(img, thresholds=[(128, 255)])):
|
||||||
|
if (i < len(labels)):
|
||||||
|
print("********** %s **********" % labels[i])
|
||||||
|
for d in detection_list:
|
||||||
|
print(d)
|
||||||
|
img.draw_rectangle(d.rect(), color=colors[i])
|
||||||
|
|
||||||
|
print(clock.fps(), "fps", end="\n\n")
|
||||||
@ -16,7 +16,7 @@ sensor.skip_frames(time=2000) # Let the camera adjust.
|
|||||||
|
|
||||||
# Load the built-in person detection network (the network is in your OpenMV Cam's firmware).
|
# Load the built-in person detection network (the network is in your OpenMV Cam's firmware).
|
||||||
net = tf.load('person_detection')
|
net = tf.load('person_detection')
|
||||||
labels = ['unsure', 'person', 'no_person']
|
labels = ['person', 'no_person']
|
||||||
|
|
||||||
clock = time.clock()
|
clock = time.clock()
|
||||||
while(True):
|
while(True):
|
||||||
|
|||||||
@ -16,7 +16,7 @@ sensor.skip_frames(time=2000) # Let the camera adjust.
|
|||||||
|
|
||||||
# Load the built-in person detection network (the network is in your OpenMV Cam's firmware).
|
# Load the built-in person detection network (the network is in your OpenMV Cam's firmware).
|
||||||
net = tf.load('person_detection')
|
net = tf.load('person_detection')
|
||||||
labels = ['unsure', 'person', 'no_person']
|
labels = ['person', 'no_person']
|
||||||
|
|
||||||
clock = time.clock()
|
clock = time.clock()
|
||||||
while(True):
|
while(True):
|
||||||
|
|||||||
@ -0,0 +1,51 @@
|
|||||||
|
# TensorFlow Lite Object Detection Example
|
||||||
|
#
|
||||||
|
# This example shows off object detection. Object detect is much more powerful than
|
||||||
|
# object classification. It can locate multiple objects in the image.
|
||||||
|
|
||||||
|
import sensor, image, time, os, tf
|
||||||
|
|
||||||
|
sensor.reset() # Reset and initialize the sensor.
|
||||||
|
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||||
|
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||||
|
sensor.set_windowing((240, 240)) # Set 240x240 window.
|
||||||
|
sensor.skip_frames(time=2000) # Let the camera adjust.
|
||||||
|
|
||||||
|
net = tf.load('<object_detection_network>', load_to_fb=True)
|
||||||
|
labels = []
|
||||||
|
|
||||||
|
try: # Load labels if they exist
|
||||||
|
labels = [line.rstrip('\n') for line in open("labels.txt")]
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
colors = [ # Add more colors if you are detecting more than 7 types of classes at once.
|
||||||
|
(255, 0, 0),
|
||||||
|
( 0, 255, 0),
|
||||||
|
(255, 255, 0),
|
||||||
|
( 0, 0, 255),
|
||||||
|
(255, 0, 255),
|
||||||
|
( 0, 255, 255),
|
||||||
|
(255, 255, 255),
|
||||||
|
]
|
||||||
|
|
||||||
|
clock = time.clock()
|
||||||
|
while(True):
|
||||||
|
clock.tick()
|
||||||
|
|
||||||
|
img = sensor.snapshot()
|
||||||
|
|
||||||
|
# detect() segments an object using the provided segmentation model. This produces mutliple
|
||||||
|
# grayscale images per object class that we are trying to detect. detect() then runs
|
||||||
|
# find_blobs() internally on the segmented images to find all blob locations and then returns
|
||||||
|
# the bound boxes of all blobs found per object class. So, detect() returns a list of lists of
|
||||||
|
# classification objects and the respective confidence level.
|
||||||
|
|
||||||
|
for i, detection_list in enumerate(net.detect(img, thresholds=[(128, 255)])):
|
||||||
|
if (i < len(labels)):
|
||||||
|
print("********** %s **********" % labels[i])
|
||||||
|
for d in detection_list:
|
||||||
|
print(d)
|
||||||
|
img.draw_rectangle(d.rect(), color=colors[i])
|
||||||
|
|
||||||
|
print(clock.fps(), "fps", end="\n\n")
|
||||||
@ -16,7 +16,7 @@ sensor.skip_frames(time=2000) # Let the camera adjust.
|
|||||||
|
|
||||||
# Load the built-in person detection network (the network is in your OpenMV Cam's firmware).
|
# Load the built-in person detection network (the network is in your OpenMV Cam's firmware).
|
||||||
net = tf.load('person_detection')
|
net = tf.load('person_detection')
|
||||||
labels = ['unsure', 'person', 'no_person']
|
labels = ['person', 'no_person']
|
||||||
|
|
||||||
clock = time.clock()
|
clock = time.clock()
|
||||||
while(True):
|
while(True):
|
||||||
|
|||||||
@ -16,7 +16,7 @@ sensor.skip_frames(time=2000) # Let the camera adjust.
|
|||||||
|
|
||||||
# Load the built-in person detection network (the network is in your OpenMV Cam's firmware).
|
# Load the built-in person detection network (the network is in your OpenMV Cam's firmware).
|
||||||
net = tf.load('person_detection')
|
net = tf.load('person_detection')
|
||||||
labels = ['unsure', 'person', 'no_person']
|
labels = ['person', 'no_person']
|
||||||
|
|
||||||
clock = time.clock()
|
clock = time.clock()
|
||||||
while(True):
|
while(True):
|
||||||
|
|||||||
@ -1,5 +1 @@
|
|||||||
You must link this library to your application with arm-none-eabi-gcc and have implemented putchar().
|
You must link this library to your application with arm-none-eabi-gcc and have implemented putchar().
|
||||||
|
|
||||||
C Compile Flags: -DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK -DNDEBUG -DTF_LITE_MCU_DEBUG_LOG -DTF_LITE_STATIC_MEMORY -MMD -O3 -Wall -Werror -Warray-bounds -Wextra -Wvla -Wno-missing-field-initializers -Wno-strict-aliasing -Wno-type-limits -Wno-unused-but-set-variable -Wno-unused-parameter -Wno-unused-variable -Wno-unused-value -Wno-error=sign-compare -Wno-error=nonnull -Wno-error=unused-value -fdata-sections -ffunction-sections -fmessage-length=0 -fomit-frame-pointer -funsigned-char -fshort-enums -fno-delete-null-pointer-checks -fno-exceptions -fno-unwind-tables -mabi=aapcs-linux -mfloat-abi=hard -mthumb -nostartfiles -nostdlib -std=c11 -DARM_MATH_CM4 -mcpu=cortex-m4 -mfpu=fpv4-sp-d16 -mtune=cortex-m4
|
|
||||||
|
|
||||||
CXX Compile Flags: -DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK -DNDEBUG -DTF_LITE_MCU_DEBUG_LOG -DTF_LITE_STATIC_MEMORY -MMD -O3 -Wall -Werror -Warray-bounds -Wextra -Wvla -Wno-missing-field-initializers -Wno-strict-aliasing -Wno-type-limits -Wno-unused-but-set-variable -Wno-unused-parameter -Wno-unused-variable -Wno-unused-value -Wno-error=sign-compare -Wno-error=nonnull -Wno-error=unused-value -fdata-sections -ffunction-sections -fmessage-length=0 -fomit-frame-pointer -funsigned-char -fshort-enums -fno-delete-null-pointer-checks -fno-exceptions -fno-unwind-tables -mabi=aapcs-linux -mfloat-abi=hard -mthumb -nostartfiles -nostdlib -std=c++11 -fno-rtti -fno-threadsafe-statics -fno-use-cxa-atexit -DARM_MATH_CM4 -mcpu=cortex-m4 -mfpu=fpv4-sp-d16 -mtune=cortex-m4
|
|
||||||
|
|||||||
Binary file not shown.
@ -6,57 +6,57 @@
|
|||||||
#ifndef __LIBTF_H
|
#ifndef __LIBTF_H
|
||||||
#define __LIBTF_H
|
#define __LIBTF_H
|
||||||
|
|
||||||
|
#define LIBTF_TENSOR_ARENA_ALIGNMENT 16
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Call this first to get the shape of the model input.
|
// Built-in person deteciton model.
|
||||||
// Returns 0 on success and 1 on failure.
|
extern const unsigned char g_person_detect_model_data[];
|
||||||
// Errors are printed to stdout.
|
extern const int g_person_detect_model_data_len;
|
||||||
int libtf_get_input_data_hwc(const unsigned char *model_data, // TensorFlow Lite binary model (8-bit quant).
|
|
||||||
unsigned char *tensor_arena, // As big as you can make it scratch buffer.
|
|
||||||
unsigned int tensor_arena_size, // Size of the above scratch buffer.
|
|
||||||
unsigned int *input_height, // Height for the model.
|
|
||||||
unsigned int *input_width, // Width for the model.
|
|
||||||
unsigned int *input_channels, // Channels for the model (1 for grayscale8 and 3 for rgb888).
|
|
||||||
bool *signed_or_unsigned, // True if input is int8_t ([0:255]->[-128:127]), False if input is uint8_t ([0:255]->[0:255]).
|
|
||||||
bool *is_float); // Actual is float32 (not optimal - network should be fixed). Input should be ([0:255]->[0.0f:+1.0f]).
|
|
||||||
|
|
||||||
// Call this second to get the shape of the model output.
|
typedef enum libtf_datatype {
|
||||||
|
LIBTF_DATATYPE_UINT8,
|
||||||
|
LIBTF_DATATYPE_INT8,
|
||||||
|
LIBTF_DATATYPE_FLOAT
|
||||||
|
} libtf_datatype_t;
|
||||||
|
|
||||||
|
typedef struct libtf_parameters {
|
||||||
|
size_t tensor_arena_size;
|
||||||
|
size_t input_height, input_width, input_channels;
|
||||||
|
libtf_datatype_t input_datatype;
|
||||||
|
float input_scale;
|
||||||
|
int input_zero_point;
|
||||||
|
size_t output_height, output_width, output_channels;
|
||||||
|
libtf_datatype_t output_datatype;
|
||||||
|
float output_scale;
|
||||||
|
int output_zero_point;
|
||||||
|
} libtf_parameters_t;
|
||||||
|
|
||||||
|
// Call this first to get the model parameters.
|
||||||
// Returns 0 on success and 1 on failure.
|
// Returns 0 on success and 1 on failure.
|
||||||
// Errors are printed to stdout.
|
// Errors are printed to stdout.
|
||||||
int libtf_get_output_data_hwc(const unsigned char *model_data, // TensorFlow Lite binary model (8-bit quant).
|
int libtf_get_parameters(const unsigned char *model_data, // TensorFlow Lite binary model (8-bit quant).
|
||||||
unsigned char *tensor_arena, // As big as you can make it scratch buffer.
|
unsigned char *tensor_arena, // As big as you can make it scratch buffer.
|
||||||
unsigned int tensor_arena_size, // Size of the above scratch buffer.
|
size_t tensor_arena_size, // Size of the above scratch buffer.
|
||||||
unsigned int *output_height, // Height for the model.
|
libtf_parameters_t *params); // Struct to hold model parameters.
|
||||||
unsigned int *output_width, // Width for the model.
|
|
||||||
unsigned int *output_channels, // Channels for the model (1 for grayscale8 and 3 for rgb888).
|
|
||||||
bool *signed_or_unsigned, // True if output is int8_t ([-128:127]->[0:255]->[0.0f:1.0f]), False if output is uint8_t ([0:255]->[0:255]->[0.0f:1.0f]).
|
|
||||||
bool *is_float); // Actual is float32 (not optimal - network should be fixed). Output is [0.0f:+1.0f].
|
|
||||||
|
|
||||||
// Callback to populate the model input data byte array (laid out in [height][width][channel] order).
|
// Callback to populate the model input data byte array (laid out in [height][width][channel] order).
|
||||||
typedef void (*libtf_input_data_callback_t)(void *callback_data,
|
typedef void (*libtf_input_data_callback_t)(void *callback_data,
|
||||||
void *model_input,
|
void *model_input,
|
||||||
const unsigned int input_height,
|
libtf_parameters_t *params);
|
||||||
const unsigned int input_width,
|
|
||||||
const unsigned int input_channels,
|
|
||||||
const bool signed_or_unsigned, // True if input is int8_t ([0:255]->[-128:127]), False if input is uint8_t ([0:255]->[0:255]).
|
|
||||||
const bool is_float); // Actual is float32 (not optimal - network should be fixed). Input should be ([0:255]->[0.0f:+1.0f]).
|
|
||||||
|
|
||||||
// Callback to use the model output data byte array (laid out in [height][width][channel] order).
|
// Callback to use the model output data byte array (laid out in [height][width][channel] order).
|
||||||
typedef void (*libtf_output_data_callback_t)(void *callback_data,
|
typedef void (*libtf_output_data_callback_t)(void *callback_data,
|
||||||
void *model_output,
|
void *model_output,
|
||||||
const unsigned int output_height,
|
libtf_parameters_t *params);
|
||||||
const unsigned int output_width,
|
|
||||||
const unsigned int output_channels,
|
|
||||||
const bool signed_or_unsigned, // True if output is int8_t ([-128:127]->[0:255]->[0.0f:1.0f]), False if output is uint8_t ([0:255]->[0:255]->[0.0f:1.0f]).
|
|
||||||
const bool is_float); // Actual is float32 (not optimal - network should be fixed). Output is [0.0f:+1.0f].
|
|
||||||
|
|
||||||
// Returns 0 on success and 1 on failure.
|
// Returns 0 on success and 1 on failure.
|
||||||
// Errors are printed to stdout.
|
// Errors are printed to stdout.
|
||||||
int libtf_invoke(const unsigned char *model_data, // TensorFlow Lite binary model (8-bit quant).
|
int libtf_invoke(const unsigned char *model_data, // TensorFlow Lite binary model (8-bit quant).
|
||||||
unsigned char *tensor_arena, // As big as you can make it scratch buffer.
|
unsigned char *tensor_arena, // As big as you can make it scratch buffer.
|
||||||
unsigned int tensor_arena_size, // Size of the above scratch buffer.
|
libtf_parameters_t *params, // Struct with model parameters.
|
||||||
libtf_input_data_callback_t input_callback, // Callback to populate the model input data byte array.
|
libtf_input_data_callback_t input_callback, // Callback to populate the model input data byte array.
|
||||||
void *input_callback_data, // User data structure passed to input callback.
|
void *input_callback_data, // User data structure passed to input callback.
|
||||||
libtf_output_data_callback_t output_callback, // Callback to use the model output data byte array.
|
libtf_output_data_callback_t output_callback, // Callback to use the model output data byte array.
|
||||||
@ -68,13 +68,13 @@ int libtf_initialize_micro_features();
|
|||||||
|
|
||||||
// Returns 0 on success and 1 on failure.
|
// Returns 0 on success and 1 on failure.
|
||||||
// Errors are printed to stdout.
|
// Errors are printed to stdout.
|
||||||
// Converts audio sample data into a more compact form that's
|
// Converts audio sample data into a more compact form
|
||||||
// appropriate for feeding into a neural network.
|
// that's appropriate for feeding into a neural network.
|
||||||
int libtf_generate_micro_features(const int16_t* input, // Audio samples
|
int libtf_generate_micro_features(const int16_t *input, // Audio samples
|
||||||
int input_size, // Audio samples size
|
int input_size, // Audio sample size
|
||||||
int output_size, // Slice size
|
int output_size, // Slice data size
|
||||||
int8_t* output, // Slice data
|
int8_t *output, // Slice data
|
||||||
size_t* num_samples_read); // Number of samples used.
|
size_t *num_samples_read); // Number of samples used
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|||||||
Binary file not shown.
@ -1,27 +0,0 @@
|
|||||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
==============================================================================*/
|
|
||||||
|
|
||||||
// This is a standard TensorFlow Lite model file that has been converted into a
|
|
||||||
// C data array, so it can be easily compiled into a binary for devices that
|
|
||||||
// don't have a file system. It was created using the command:
|
|
||||||
// xxd -i person_detect.tflite > person_detect_model_data.cc
|
|
||||||
|
|
||||||
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_PERSON_DETECT_MODEL_DATA_H_
|
|
||||||
#define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_PERSON_DETECT_MODEL_DATA_H_
|
|
||||||
|
|
||||||
extern const unsigned char g_person_detect_model_data[];
|
|
||||||
extern const int g_person_detect_model_data_len;
|
|
||||||
|
|
||||||
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_PERSON_DETECT_MODEL_DATA_H_
|
|
||||||
203
src/lib/libtf/cortex-m55/LICENSE
Normal file
203
src/lib/libtf/cortex-m55/LICENSE
Normal file
@ -0,0 +1,203 @@
|
|||||||
|
Copyright 2019 The TensorFlow Authors. All rights reserved.
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
1
src/lib/libtf/cortex-m55/README
Normal file
1
src/lib/libtf/cortex-m55/README
Normal file
@ -0,0 +1 @@
|
|||||||
|
You must link this library to your application with arm-none-eabi-gcc and have implemented putchar().
|
||||||
BIN
src/lib/libtf/cortex-m55/libtf.a
Normal file
BIN
src/lib/libtf/cortex-m55/libtf.a
Normal file
Binary file not shown.
83
src/lib/libtf/cortex-m55/libtf.h
Normal file
83
src/lib/libtf/cortex-m55/libtf.h
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
/* This file is part of the OpenMV project.
|
||||||
|
* Copyright (c) 2013-2021 Ibrahim Abdelkader <iabdalkader@openmv.io> & Kwabena W. Agyeman <kwagyeman@openmv.io>
|
||||||
|
* This work is licensed under the MIT license, see the file LICENSE for details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef __LIBTF_H
|
||||||
|
#define __LIBTF_H
|
||||||
|
|
||||||
|
#define LIBTF_TENSOR_ARENA_ALIGNMENT 16
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Built-in person deteciton model.
|
||||||
|
extern const unsigned char g_person_detect_model_data[];
|
||||||
|
extern const int g_person_detect_model_data_len;
|
||||||
|
|
||||||
|
typedef enum libtf_datatype {
|
||||||
|
LIBTF_DATATYPE_UINT8,
|
||||||
|
LIBTF_DATATYPE_INT8,
|
||||||
|
LIBTF_DATATYPE_FLOAT
|
||||||
|
} libtf_datatype_t;
|
||||||
|
|
||||||
|
typedef struct libtf_parameters {
|
||||||
|
size_t tensor_arena_size;
|
||||||
|
size_t input_height, input_width, input_channels;
|
||||||
|
libtf_datatype_t input_datatype;
|
||||||
|
float input_scale;
|
||||||
|
int input_zero_point;
|
||||||
|
size_t output_height, output_width, output_channels;
|
||||||
|
libtf_datatype_t output_datatype;
|
||||||
|
float output_scale;
|
||||||
|
int output_zero_point;
|
||||||
|
} libtf_parameters_t;
|
||||||
|
|
||||||
|
// Call this first to get the model parameters.
|
||||||
|
// Returns 0 on success and 1 on failure.
|
||||||
|
// Errors are printed to stdout.
|
||||||
|
int libtf_get_parameters(const unsigned char *model_data, // TensorFlow Lite binary model (8-bit quant).
|
||||||
|
unsigned char *tensor_arena, // As big as you can make it scratch buffer.
|
||||||
|
size_t tensor_arena_size, // Size of the above scratch buffer.
|
||||||
|
libtf_parameters_t *params); // Struct to hold model parameters.
|
||||||
|
|
||||||
|
// Callback to populate the model input data byte array (laid out in [height][width][channel] order).
|
||||||
|
typedef void (*libtf_input_data_callback_t)(void *callback_data,
|
||||||
|
void *model_input,
|
||||||
|
libtf_parameters_t *params);
|
||||||
|
|
||||||
|
// Callback to use the model output data byte array (laid out in [height][width][channel] order).
|
||||||
|
typedef void (*libtf_output_data_callback_t)(void *callback_data,
|
||||||
|
void *model_output,
|
||||||
|
libtf_parameters_t *params);
|
||||||
|
|
||||||
|
// Returns 0 on success and 1 on failure.
|
||||||
|
// Errors are printed to stdout.
|
||||||
|
int libtf_invoke(const unsigned char *model_data, // TensorFlow Lite binary model (8-bit quant).
|
||||||
|
unsigned char *tensor_arena, // As big as you can make it scratch buffer.
|
||||||
|
libtf_parameters_t *params, // Struct with model parameters.
|
||||||
|
libtf_input_data_callback_t input_callback, // Callback to populate the model input data byte array.
|
||||||
|
void *input_callback_data, // User data structure passed to input callback.
|
||||||
|
libtf_output_data_callback_t output_callback, // Callback to use the model output data byte array.
|
||||||
|
void *output_callback_data); // User data structure passed to output callback.
|
||||||
|
|
||||||
|
// Returns 0 on success and 1 on failure.
|
||||||
|
// Errors are printed to stdout.
|
||||||
|
int libtf_initialize_micro_features();
|
||||||
|
|
||||||
|
// Returns 0 on success and 1 on failure.
|
||||||
|
// Errors are printed to stdout.
|
||||||
|
// Converts audio sample data into a more compact form
|
||||||
|
// that's appropriate for feeding into a neural network.
|
||||||
|
int libtf_generate_micro_features(const int16_t *input, // Audio samples
|
||||||
|
int input_size, // Audio sample size
|
||||||
|
int output_size, // Slice data size
|
||||||
|
int8_t *output, // Slice data
|
||||||
|
size_t *num_samples_read); // Number of samples used
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif // __LIBTF_H
|
||||||
@ -1,5 +1 @@
|
|||||||
You must link this library to your application with arm-none-eabi-gcc and have implemented putchar().
|
You must link this library to your application with arm-none-eabi-gcc and have implemented putchar().
|
||||||
|
|
||||||
C Compile Flags: -DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK -DNDEBUG -DTF_LITE_MCU_DEBUG_LOG -DTF_LITE_STATIC_MEMORY -MMD -O3 -Wall -Werror -Warray-bounds -Wextra -Wvla -Wno-missing-field-initializers -Wno-strict-aliasing -Wno-type-limits -Wno-unused-but-set-variable -Wno-unused-parameter -Wno-unused-variable -Wno-unused-value -Wno-error=sign-compare -Wno-error=nonnull -Wno-error=unused-value -fdata-sections -ffunction-sections -fmessage-length=0 -fomit-frame-pointer -funsigned-char -fshort-enums -fno-delete-null-pointer-checks -fno-exceptions -fno-unwind-tables -mabi=aapcs-linux -mfloat-abi=hard -mthumb -nostartfiles -nostdlib -std=c11 -DARM_MATH_CM7 -mcpu=cortex-m7 -mfpu=fpv5-sp-d16 -mtune=cortex-m7
|
|
||||||
|
|
||||||
CXX Compile Flags: -DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK -DNDEBUG -DTF_LITE_MCU_DEBUG_LOG -DTF_LITE_STATIC_MEMORY -MMD -O3 -Wall -Werror -Warray-bounds -Wextra -Wvla -Wno-missing-field-initializers -Wno-strict-aliasing -Wno-type-limits -Wno-unused-but-set-variable -Wno-unused-parameter -Wno-unused-variable -Wno-unused-value -Wno-error=sign-compare -Wno-error=nonnull -Wno-error=unused-value -fdata-sections -ffunction-sections -fmessage-length=0 -fomit-frame-pointer -funsigned-char -fshort-enums -fno-delete-null-pointer-checks -fno-exceptions -fno-unwind-tables -mabi=aapcs-linux -mfloat-abi=hard -mthumb -nostartfiles -nostdlib -std=c++11 -fno-rtti -fno-threadsafe-statics -fno-use-cxa-atexit -DARM_MATH_CM7 -mcpu=cortex-m7 -mfpu=fpv5-sp-d16 -mtune=cortex-m7
|
|
||||||
|
|||||||
Binary file not shown.
@ -6,57 +6,57 @@
|
|||||||
#ifndef __LIBTF_H
|
#ifndef __LIBTF_H
|
||||||
#define __LIBTF_H
|
#define __LIBTF_H
|
||||||
|
|
||||||
|
#define LIBTF_TENSOR_ARENA_ALIGNMENT 16
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Call this first to get the shape of the model input.
|
// Built-in person deteciton model.
|
||||||
// Returns 0 on success and 1 on failure.
|
extern const unsigned char g_person_detect_model_data[];
|
||||||
// Errors are printed to stdout.
|
extern const int g_person_detect_model_data_len;
|
||||||
int libtf_get_input_data_hwc(const unsigned char *model_data, // TensorFlow Lite binary model (8-bit quant).
|
|
||||||
unsigned char *tensor_arena, // As big as you can make it scratch buffer.
|
|
||||||
unsigned int tensor_arena_size, // Size of the above scratch buffer.
|
|
||||||
unsigned int *input_height, // Height for the model.
|
|
||||||
unsigned int *input_width, // Width for the model.
|
|
||||||
unsigned int *input_channels, // Channels for the model (1 for grayscale8 and 3 for rgb888).
|
|
||||||
bool *signed_or_unsigned, // True if input is int8_t ([0:255]->[-128:127]), False if input is uint8_t ([0:255]->[0:255]).
|
|
||||||
bool *is_float); // Actual is float32 (not optimal - network should be fixed). Input should be ([0:255]->[0.0f:+1.0f]).
|
|
||||||
|
|
||||||
// Call this second to get the shape of the model output.
|
typedef enum libtf_datatype {
|
||||||
|
LIBTF_DATATYPE_UINT8,
|
||||||
|
LIBTF_DATATYPE_INT8,
|
||||||
|
LIBTF_DATATYPE_FLOAT
|
||||||
|
} libtf_datatype_t;
|
||||||
|
|
||||||
|
typedef struct libtf_parameters {
|
||||||
|
size_t tensor_arena_size;
|
||||||
|
size_t input_height, input_width, input_channels;
|
||||||
|
libtf_datatype_t input_datatype;
|
||||||
|
float input_scale;
|
||||||
|
int input_zero_point;
|
||||||
|
size_t output_height, output_width, output_channels;
|
||||||
|
libtf_datatype_t output_datatype;
|
||||||
|
float output_scale;
|
||||||
|
int output_zero_point;
|
||||||
|
} libtf_parameters_t;
|
||||||
|
|
||||||
|
// Call this first to get the model parameters.
|
||||||
// Returns 0 on success and 1 on failure.
|
// Returns 0 on success and 1 on failure.
|
||||||
// Errors are printed to stdout.
|
// Errors are printed to stdout.
|
||||||
int libtf_get_output_data_hwc(const unsigned char *model_data, // TensorFlow Lite binary model (8-bit quant).
|
int libtf_get_parameters(const unsigned char *model_data, // TensorFlow Lite binary model (8-bit quant).
|
||||||
unsigned char *tensor_arena, // As big as you can make it scratch buffer.
|
unsigned char *tensor_arena, // As big as you can make it scratch buffer.
|
||||||
unsigned int tensor_arena_size, // Size of the above scratch buffer.
|
size_t tensor_arena_size, // Size of the above scratch buffer.
|
||||||
unsigned int *output_height, // Height for the model.
|
libtf_parameters_t *params); // Struct to hold model parameters.
|
||||||
unsigned int *output_width, // Width for the model.
|
|
||||||
unsigned int *output_channels, // Channels for the model (1 for grayscale8 and 3 for rgb888).
|
|
||||||
bool *signed_or_unsigned, // True if output is int8_t ([-128:127]->[0:255]->[0.0f:1.0f]), False if output is uint8_t ([0:255]->[0:255]->[0.0f:1.0f]).
|
|
||||||
bool *is_float); // Actual is float32 (not optimal - network should be fixed). Output is [0.0f:+1.0f].
|
|
||||||
|
|
||||||
// Callback to populate the model input data byte array (laid out in [height][width][channel] order).
|
// Callback to populate the model input data byte array (laid out in [height][width][channel] order).
|
||||||
typedef void (*libtf_input_data_callback_t)(void *callback_data,
|
typedef void (*libtf_input_data_callback_t)(void *callback_data,
|
||||||
void *model_input,
|
void *model_input,
|
||||||
const unsigned int input_height,
|
libtf_parameters_t *params);
|
||||||
const unsigned int input_width,
|
|
||||||
const unsigned int input_channels,
|
|
||||||
const bool signed_or_unsigned, // True if input is int8_t ([0:255]->[-128:127]), False if input is uint8_t ([0:255]->[0:255]).
|
|
||||||
const bool is_float); // Actual is float32 (not optimal - network should be fixed). Input should be ([0:255]->[0.0f:+1.0f]).
|
|
||||||
|
|
||||||
// Callback to use the model output data byte array (laid out in [height][width][channel] order).
|
// Callback to use the model output data byte array (laid out in [height][width][channel] order).
|
||||||
typedef void (*libtf_output_data_callback_t)(void *callback_data,
|
typedef void (*libtf_output_data_callback_t)(void *callback_data,
|
||||||
void *model_output,
|
void *model_output,
|
||||||
const unsigned int output_height,
|
libtf_parameters_t *params);
|
||||||
const unsigned int output_width,
|
|
||||||
const unsigned int output_channels,
|
|
||||||
const bool signed_or_unsigned, // True if output is int8_t ([-128:127]->[0:255]->[0.0f:1.0f]), False if output is uint8_t ([0:255]->[0:255]->[0.0f:1.0f]).
|
|
||||||
const bool is_float); // Actual is float32 (not optimal - network should be fixed). Output is [0.0f:+1.0f].
|
|
||||||
|
|
||||||
// Returns 0 on success and 1 on failure.
|
// Returns 0 on success and 1 on failure.
|
||||||
// Errors are printed to stdout.
|
// Errors are printed to stdout.
|
||||||
int libtf_invoke(const unsigned char *model_data, // TensorFlow Lite binary model (8-bit quant).
|
int libtf_invoke(const unsigned char *model_data, // TensorFlow Lite binary model (8-bit quant).
|
||||||
unsigned char *tensor_arena, // As big as you can make it scratch buffer.
|
unsigned char *tensor_arena, // As big as you can make it scratch buffer.
|
||||||
unsigned int tensor_arena_size, // Size of the above scratch buffer.
|
libtf_parameters_t *params, // Struct with model parameters.
|
||||||
libtf_input_data_callback_t input_callback, // Callback to populate the model input data byte array.
|
libtf_input_data_callback_t input_callback, // Callback to populate the model input data byte array.
|
||||||
void *input_callback_data, // User data structure passed to input callback.
|
void *input_callback_data, // User data structure passed to input callback.
|
||||||
libtf_output_data_callback_t output_callback, // Callback to use the model output data byte array.
|
libtf_output_data_callback_t output_callback, // Callback to use the model output data byte array.
|
||||||
@ -68,13 +68,13 @@ int libtf_initialize_micro_features();
|
|||||||
|
|
||||||
// Returns 0 on success and 1 on failure.
|
// Returns 0 on success and 1 on failure.
|
||||||
// Errors are printed to stdout.
|
// Errors are printed to stdout.
|
||||||
// Converts audio sample data into a more compact form that's
|
// Converts audio sample data into a more compact form
|
||||||
// appropriate for feeding into a neural network.
|
// that's appropriate for feeding into a neural network.
|
||||||
int libtf_generate_micro_features(const int16_t* input, // Audio samples
|
int libtf_generate_micro_features(const int16_t *input, // Audio samples
|
||||||
int input_size, // Audio samples size
|
int input_size, // Audio sample size
|
||||||
int output_size, // Slice size
|
int output_size, // Slice data size
|
||||||
int8_t* output, // Slice data
|
int8_t *output, // Slice data
|
||||||
size_t* num_samples_read); // Number of samples used.
|
size_t *num_samples_read); // Number of samples used
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|||||||
Binary file not shown.
@ -1,27 +0,0 @@
|
|||||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
==============================================================================*/
|
|
||||||
|
|
||||||
// This is a standard TensorFlow Lite model file that has been converted into a
|
|
||||||
// C data array, so it can be easily compiled into a binary for devices that
|
|
||||||
// don't have a file system. It was created using the command:
|
|
||||||
// xxd -i person_detect.tflite > person_detect_model_data.cc
|
|
||||||
|
|
||||||
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_PERSON_DETECT_MODEL_DATA_H_
|
|
||||||
#define TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_PERSON_DETECT_MODEL_DATA_H_
|
|
||||||
|
|
||||||
extern const unsigned char g_person_detect_model_data[];
|
|
||||||
extern const int g_person_detect_model_data_len;
|
|
||||||
|
|
||||||
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_PERSON_DETECTION_PERSON_DETECT_MODEL_DATA_H_
|
|
||||||
@ -17,30 +17,47 @@
|
|||||||
#include "imlib_config.h"
|
#include "imlib_config.h"
|
||||||
|
|
||||||
#ifdef IMLIB_ENABLE_TF
|
#ifdef IMLIB_ENABLE_TF
|
||||||
#include "py_assert.h"
|
|
||||||
#include "py_image.h"
|
#include "py_image.h"
|
||||||
#include "ff_wrapper.h"
|
#include "ff_wrapper.h"
|
||||||
#include "libtf.h"
|
|
||||||
#include "libtf_person_detect_model_data.h"
|
|
||||||
#include "py_tf.h"
|
#include "py_tf.h"
|
||||||
|
|
||||||
|
#define GRAYSCALE_RANGE ((COLOR_GRAYSCALE_MAX) - (COLOR_GRAYSCALE_MIN))
|
||||||
|
#define GRAYSCALE_MID (((GRAYSCALE_RANGE) + 1) / 2)
|
||||||
|
|
||||||
void py_tf_alloc_putchar_buffer()
|
void py_tf_alloc_putchar_buffer()
|
||||||
{
|
{
|
||||||
py_tf_putchar_buffer = (char *) fb_alloc0(PY_TF_PUTCHAR_BUFFER_LEN + 1, FB_ALLOC_NO_HINT);
|
py_tf_putchar_buffer = (char *) fb_alloc0(PY_TF_PUTCHAR_BUFFER_LEN + 1, FB_ALLOC_NO_HINT);
|
||||||
|
py_tf_putchar_buffer_index = 0;
|
||||||
py_tf_putchar_buffer_len = PY_TF_PUTCHAR_BUFFER_LEN;
|
py_tf_putchar_buffer_len = PY_TF_PUTCHAR_BUFFER_LEN;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
STATIC const char *py_tf_map_datatype(libtf_datatype_t datatype)
|
||||||
|
{
|
||||||
|
if (datatype == LIBTF_DATATYPE_UINT8) {
|
||||||
|
return "uint8";
|
||||||
|
} else if (datatype == LIBTF_DATATYPE_INT8) {
|
||||||
|
return "int8";
|
||||||
|
} else {
|
||||||
|
return "float";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
STATIC void py_tf_model_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind)
|
STATIC void py_tf_model_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind)
|
||||||
{
|
{
|
||||||
py_tf_model_obj_t *self = self_in;
|
py_tf_model_obj_t *self = self_in;
|
||||||
mp_printf(print,
|
mp_printf(print,
|
||||||
"{\"len\":%d, \"height\":%d, \"width\":%d, \"channels\":%d, \"signed\":%d, \"is_float\":%d}",
|
"{\"len\":%d, \"ram\":%d, "
|
||||||
self->model_data_len,
|
"\"input_height\":%d, \"input_width\":%d, \"input_channels\":%d, \"input_datatype\":\"%s\", "
|
||||||
self->height,
|
"\"input_scale\":%f, \"input_zero_point\":%d, "
|
||||||
self->width,
|
"\"output_height\":%d, \"output_width\":%d, \"output_channels\":%d, \"output_datatype\":\"%s\", "
|
||||||
self->channels,
|
"\"output_scale\":%f, \"output_zero_point\":%d}",
|
||||||
self->signed_or_unsigned,
|
self->model_data_len, self->params.tensor_arena_size,
|
||||||
self->is_float);
|
self->params.input_height, self->params.input_width, self->params.input_channels,
|
||||||
|
py_tf_map_datatype(self->params.input_datatype),
|
||||||
|
(double) self->params.input_scale, self->params.input_zero_point,
|
||||||
|
self->params.output_height, self->params.output_width, self->params.output_channels,
|
||||||
|
py_tf_map_datatype(self->params.output_datatype),
|
||||||
|
(double) self->params.output_scale, self->params.output_zero_point);
|
||||||
}
|
}
|
||||||
|
|
||||||
// TF Classification Object
|
// TF Classification Object
|
||||||
@ -164,17 +181,9 @@ STATIC mp_obj_t int_py_tf_load(mp_obj_t path_obj, bool alloc_mode, bool helper_m
|
|||||||
uint32_t tensor_arena_size;
|
uint32_t tensor_arena_size;
|
||||||
uint8_t *tensor_arena = fb_alloc_all(&tensor_arena_size, FB_ALLOC_PREFER_SIZE);
|
uint8_t *tensor_arena = fb_alloc_all(&tensor_arena_size, FB_ALLOC_PREFER_SIZE);
|
||||||
|
|
||||||
if (libtf_get_input_data_hwc(tf_model->model_data,
|
if (libtf_get_parameters(tf_model->model_data, tensor_arena, tensor_arena_size, &tf_model->params) != 0) {
|
||||||
tensor_arena,
|
|
||||||
tensor_arena_size,
|
|
||||||
&tf_model->height,
|
|
||||||
&tf_model->width,
|
|
||||||
&tf_model->channels,
|
|
||||||
&tf_model->signed_or_unsigned,
|
|
||||||
&tf_model->is_float) != 0) {
|
|
||||||
// Note can't use MP_ERROR_TEXT here...
|
// Note can't use MP_ERROR_TEXT here...
|
||||||
mp_raise_msg(&mp_type_OSError, (mp_rom_error_text_t)
|
mp_raise_msg(&mp_type_OSError, (mp_rom_error_text_t) py_tf_putchar_buffer);
|
||||||
py_tf_putchar_buffer - (PY_TF_PUTCHAR_BUFFER_LEN - py_tf_putchar_buffer_len));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fb_free(); // free fb_alloc_all()
|
fb_free(); // free fb_alloc_all()
|
||||||
@ -227,29 +236,49 @@ typedef struct py_tf_input_data_callback_data {
|
|||||||
|
|
||||||
STATIC void py_tf_input_data_callback(void *callback_data,
|
STATIC void py_tf_input_data_callback(void *callback_data,
|
||||||
void *model_input,
|
void *model_input,
|
||||||
const unsigned int input_height,
|
libtf_parameters_t *params)
|
||||||
const unsigned int input_width,
|
|
||||||
const unsigned int input_channels,
|
|
||||||
const bool signed_or_unsigned,
|
|
||||||
const bool is_float)
|
|
||||||
{
|
{
|
||||||
py_tf_input_data_callback_data_t *arg = (py_tf_input_data_callback_data_t *) callback_data;
|
py_tf_input_data_callback_data_t *arg = (py_tf_input_data_callback_data_t *) callback_data;
|
||||||
int shift = signed_or_unsigned ? 128 : 0;
|
|
||||||
float fscale = 1.0f / 255.0f;
|
|
||||||
|
|
||||||
float xscale = input_width / ((float) arg->roi->w);
|
// Disable checking input scaling and zero-point. Nets can be all over the place on the input
|
||||||
float yscale = input_height / ((float) arg->roi->h);
|
// scaling and zero-point but still work with the code below.
|
||||||
|
|
||||||
|
// if (params->input_datatype == LIBTF_DATATYPE_UINT8) {
|
||||||
|
// if (fast_roundf(params->input_scale * GRAYSCALE_RANGE) != 1) {
|
||||||
|
// mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Expected model input scale to be 1/255!"));
|
||||||
|
// }
|
||||||
|
|
||||||
|
// if (params->input_zero_point != 0) {
|
||||||
|
// mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Expected model input zero point to be 0!"));
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
|
// if (params->input_datatype == LIBTF_DATATYPE_INT8) {
|
||||||
|
// if (fast_roundf(params->input_scale * GRAYSCALE_RANGE) != 1) {
|
||||||
|
// mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Expected model input scale to be 1/255!"));
|
||||||
|
// }
|
||||||
|
|
||||||
|
// if (params->input_zero_point != -GRAYSCALE_MID) {
|
||||||
|
// mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Expected model input zero point to be -128!"));
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
|
int shift = (params->input_datatype == LIBTF_DATATYPE_INT8) ? GRAYSCALE_MID : 0;
|
||||||
|
float fscale = 1.0f / GRAYSCALE_RANGE;
|
||||||
|
|
||||||
|
float xscale = params->input_width / ((float) arg->roi->w);
|
||||||
|
float yscale = params->input_height / ((float) arg->roi->h);
|
||||||
// MAX == KeepAspectRationByExpanding - MIN == KeepAspectRatio
|
// MAX == KeepAspectRationByExpanding - MIN == KeepAspectRatio
|
||||||
float scale = IM_MAX(xscale, yscale);
|
float scale = IM_MAX(xscale, yscale);
|
||||||
|
|
||||||
image_t dst_img;
|
image_t dst_img;
|
||||||
dst_img.w = input_width;
|
dst_img.w = params->input_width;
|
||||||
dst_img.h = input_height;
|
dst_img.h = params->input_height;
|
||||||
dst_img.data = (uint8_t *) model_input;
|
dst_img.data = (uint8_t *) model_input;
|
||||||
|
|
||||||
if (input_channels == 1) {
|
if (params->input_channels == 1) {
|
||||||
dst_img.pixfmt = PIXFORMAT_GRAYSCALE;
|
dst_img.pixfmt = PIXFORMAT_GRAYSCALE;
|
||||||
} else if (input_channels == 3) {
|
} else if (params->input_channels == 3) {
|
||||||
dst_img.pixfmt = PIXFORMAT_RGB565;
|
dst_img.pixfmt = PIXFORMAT_RGB565;
|
||||||
} else {
|
} else {
|
||||||
mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Expected model input channels to be 1 or 3!"));
|
mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Expected model input channels to be 1 or 3!"));
|
||||||
@ -259,10 +288,17 @@ STATIC void py_tf_input_data_callback(void *callback_data,
|
|||||||
-1, 256, NULL, NULL, IMAGE_HINT_BILINEAR | IMAGE_HINT_BLACK_BACKGROUND,
|
-1, 256, NULL, NULL, IMAGE_HINT_BILINEAR | IMAGE_HINT_BLACK_BACKGROUND,
|
||||||
NULL, NULL);
|
NULL, NULL);
|
||||||
|
|
||||||
int size = (input_width * input_height) - 1;
|
int size = (params->input_width * params->input_height) - 1; // must be int per countdown loop
|
||||||
|
|
||||||
if (input_channels == 1) { // GRAYSCALE
|
if (params->input_channels == 1) { // GRAYSCALE
|
||||||
if (!is_float) {
|
if (params->input_datatype == LIBTF_DATATYPE_FLOAT) { // convert u8 -> f32
|
||||||
|
uint8_t *model_input_u8 = (uint8_t *) model_input;
|
||||||
|
float *model_input_f32 = (float *) model_input;
|
||||||
|
|
||||||
|
for (; size >= 0; size -= 1) {
|
||||||
|
model_input_f32[size] = model_input_u8[size] * fscale;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
if (shift) { // convert u8 -> s8
|
if (shift) { // convert u8 -> s8
|
||||||
uint8_t *model_input_8 = (uint8_t *) model_input;
|
uint8_t *model_input_8 = (uint8_t *) model_input;
|
||||||
|
|
||||||
@ -273,31 +309,14 @@ STATIC void py_tf_input_data_callback(void *callback_data,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
for (; size >= 0; size -= 1) {
|
for (; size >= 0; size -= 1) {
|
||||||
model_input_8[size] ^= 0x80;
|
model_input_8[size] ^= GRAYSCALE_MID;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else { // convert u8 -> f32
|
|
||||||
uint8_t *model_input_u8 = (uint8_t *) model_input;
|
|
||||||
float *model_input_f32 = (float *) model_input;
|
|
||||||
|
|
||||||
for (; size >= 0; size -= 1) {
|
|
||||||
model_input_f32[size] = model_input_u8[size] * fscale;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} else if (input_channels == 3) { // RGB888
|
} else if (params->input_channels == 3) { // RGB888
|
||||||
int rgb_size = size * 3;
|
int rgb_size = size * 3; // must be int per countdown loop
|
||||||
|
|
||||||
if (!is_float) {
|
if (params->input_datatype == LIBTF_DATATYPE_FLOAT) {
|
||||||
uint16_t *model_input_u16 = (uint16_t *) model_input;
|
|
||||||
uint8_t *model_input_8 = (uint8_t *) model_input;
|
|
||||||
|
|
||||||
for (; size >= 0; size -= 1, rgb_size -= 3) {
|
|
||||||
int pixel = model_input_u16[size];
|
|
||||||
model_input_8[rgb_size] = COLOR_RGB565_TO_R8(pixel) ^ shift;
|
|
||||||
model_input_8[rgb_size + 1] = COLOR_RGB565_TO_G8(pixel) ^ shift;
|
|
||||||
model_input_8[rgb_size + 2] = COLOR_RGB565_TO_B8(pixel) ^ shift;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
uint16_t *model_input_u16 = (uint16_t *) model_input;
|
uint16_t *model_input_u16 = (uint16_t *) model_input;
|
||||||
float *model_input_f32 = (float *) model_input;
|
float *model_input_f32 = (float *) model_input;
|
||||||
|
|
||||||
@ -307,6 +326,16 @@ STATIC void py_tf_input_data_callback(void *callback_data,
|
|||||||
model_input_f32[rgb_size + 1] = COLOR_RGB565_TO_G8(pixel) * fscale;
|
model_input_f32[rgb_size + 1] = COLOR_RGB565_TO_G8(pixel) * fscale;
|
||||||
model_input_f32[rgb_size + 2] = COLOR_RGB565_TO_B8(pixel) * fscale;
|
model_input_f32[rgb_size + 2] = COLOR_RGB565_TO_B8(pixel) * fscale;
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
uint16_t *model_input_u16 = (uint16_t *) model_input;
|
||||||
|
uint8_t *model_input_8 = (uint8_t *) model_input;
|
||||||
|
|
||||||
|
for (; size >= 0; size -= 1, rgb_size -= 3) {
|
||||||
|
int pixel = model_input_u16[size];
|
||||||
|
model_input_8[rgb_size] = COLOR_RGB565_TO_R8(pixel) ^ shift;
|
||||||
|
model_input_8[rgb_size + 1] = COLOR_RGB565_TO_G8(pixel) ^ shift;
|
||||||
|
model_input_8[rgb_size + 2] = COLOR_RGB565_TO_B8(pixel) ^ shift;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -317,24 +346,29 @@ typedef struct py_tf_classify_output_data_callback_data {
|
|||||||
|
|
||||||
STATIC void py_tf_classify_output_data_callback(void *callback_data,
|
STATIC void py_tf_classify_output_data_callback(void *callback_data,
|
||||||
void *model_output,
|
void *model_output,
|
||||||
const unsigned int output_height,
|
libtf_parameters_t *params)
|
||||||
const unsigned int output_width,
|
|
||||||
const unsigned int output_channels,
|
|
||||||
const bool signed_or_unsigned,
|
|
||||||
const bool is_float)
|
|
||||||
{
|
{
|
||||||
py_tf_classify_output_data_callback_data_t *arg = (py_tf_classify_output_data_callback_data_t *) callback_data;
|
py_tf_classify_output_data_callback_data_t *arg = (py_tf_classify_output_data_callback_data_t *) callback_data;
|
||||||
int shift = signed_or_unsigned ? 128 : 0;
|
|
||||||
|
|
||||||
PY_ASSERT_TRUE_MSG(output_height == 1, "Expected model output height to be 1!");
|
if (params->output_height != 1) {
|
||||||
PY_ASSERT_TRUE_MSG(output_width == 1, "Expected model output width to be 1!");
|
mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Expected model output height to be 1!"));
|
||||||
|
}
|
||||||
|
|
||||||
arg->out = mp_obj_new_list(output_channels, NULL);
|
if (params->output_width != 1) {
|
||||||
for (unsigned int i = 0; i < output_channels; i++) {
|
mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Expected model output width to be 1!"));
|
||||||
if (!is_float) {
|
}
|
||||||
((mp_obj_list_t *) arg->out)->items[i] = mp_obj_new_float((((uint8_t *) model_output)[i] ^ shift) / 255.0f);
|
|
||||||
} else {
|
arg->out = mp_obj_new_list(params->output_channels, NULL);
|
||||||
((mp_obj_list_t *) arg->out)->items[i] = mp_obj_new_float(((float *) model_output)[i]);
|
|
||||||
|
if (params->output_datatype == LIBTF_DATATYPE_FLOAT) {
|
||||||
|
for (int i = 0, ii = params->output_channels; i < ii; i++) {
|
||||||
|
((mp_obj_list_t *) arg->out)->items[i] =
|
||||||
|
mp_obj_new_float(((float *) model_output)[i]);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for (int i = 0, ii = params->output_channels; i < ii; i++) {
|
||||||
|
((mp_obj_list_t *) arg->out)->items[i] =
|
||||||
|
mp_obj_new_float((((uint8_t *) model_output)[i] - params->output_zero_point) * params->output_scale);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -351,21 +385,30 @@ STATIC mp_obj_t py_tf_classify(uint n_args, const mp_obj_t *args, mp_map_t *kw_a
|
|||||||
py_helper_keyword_rectangle_roi(arg_img, n_args, args, 2, kw_args, &roi);
|
py_helper_keyword_rectangle_roi(arg_img, n_args, args, 2, kw_args, &roi);
|
||||||
|
|
||||||
float arg_min_scale = py_helper_keyword_float(n_args, args, 3, kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_min_scale), 1.0f);
|
float arg_min_scale = py_helper_keyword_float(n_args, args, 3, kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_min_scale), 1.0f);
|
||||||
PY_ASSERT_TRUE_MSG((0.0f < arg_min_scale) && (arg_min_scale <= 1.0f), "0 < min_scale <= 1");
|
|
||||||
|
if ((arg_min_scale <= 0.0f) || (1.0f < arg_min_scale)) {
|
||||||
|
mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("0 < min_scale <= 1"));
|
||||||
|
}
|
||||||
|
|
||||||
float arg_scale_mul = py_helper_keyword_float(n_args, args, 4, kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_scale_mul), 0.5f);
|
float arg_scale_mul = py_helper_keyword_float(n_args, args, 4, kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_scale_mul), 0.5f);
|
||||||
PY_ASSERT_TRUE_MSG((0.0f <= arg_scale_mul) && (arg_scale_mul < 1.0f), "0 <= scale_mul < 1");
|
|
||||||
|
if ((arg_scale_mul < 0.0f) || (1.0f <= arg_scale_mul)) {
|
||||||
|
mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("0 <= scale_mul < 1"));
|
||||||
|
}
|
||||||
|
|
||||||
float arg_x_overlap = py_helper_keyword_float(n_args, args, 5, kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_x_overlap), 0.0f);
|
float arg_x_overlap = py_helper_keyword_float(n_args, args, 5, kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_x_overlap), 0.0f);
|
||||||
PY_ASSERT_TRUE_MSG(((0.0f <= arg_x_overlap) && (arg_x_overlap < 1.0f))
|
|
||||||
|| (arg_x_overlap == -1.0f), "0 <= x_overlap < 1");
|
if ((arg_x_overlap != -1.f) && ((arg_x_overlap < 0.0f) || (1.0f <= arg_x_overlap))) {
|
||||||
|
mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("0 <= x_overlap < 1"));
|
||||||
|
}
|
||||||
|
|
||||||
float arg_y_overlap = py_helper_keyword_float(n_args, args, 6, kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_y_overlap), 0.0f);
|
float arg_y_overlap = py_helper_keyword_float(n_args, args, 6, kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_y_overlap), 0.0f);
|
||||||
PY_ASSERT_TRUE_MSG(((0.0f <= arg_y_overlap) && (arg_y_overlap < 1.0f))
|
|
||||||
|| (arg_y_overlap == -1.0f), "0 <= y_overlap < 1");
|
|
||||||
|
|
||||||
uint32_t tensor_arena_size = fb_avail() - (arg_model->width * arg_model->channels * 3);
|
if ((arg_y_overlap != -1.0f) && ((arg_y_overlap < 0.0f) || (1.0f <= arg_y_overlap))) {
|
||||||
uint8_t *tensor_arena = fb_alloc(tensor_arena_size, FB_ALLOC_PREFER_SPEED);
|
mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("0 <= y_overlap < 1"));
|
||||||
|
}
|
||||||
|
|
||||||
|
uint8_t *tensor_arena = fb_alloc(arg_model->params.tensor_arena_size, FB_ALLOC_PREFER_SPEED | FB_ALLOC_CACHE_ALIGN);
|
||||||
|
|
||||||
mp_obj_t objects_list = mp_obj_new_list(0, NULL);
|
mp_obj_t objects_list = mp_obj_new_list(0, NULL);
|
||||||
|
|
||||||
@ -400,14 +443,13 @@ STATIC mp_obj_t py_tf_classify(uint n_args, const mp_obj_t *args, mp_map_t *kw_a
|
|||||||
|
|
||||||
if (libtf_invoke(arg_model->model_data,
|
if (libtf_invoke(arg_model->model_data,
|
||||||
tensor_arena,
|
tensor_arena,
|
||||||
tensor_arena_size,
|
&arg_model->params,
|
||||||
py_tf_input_data_callback,
|
py_tf_input_data_callback,
|
||||||
&py_tf_input_data_callback_data,
|
&py_tf_input_data_callback_data,
|
||||||
py_tf_classify_output_data_callback,
|
py_tf_classify_output_data_callback,
|
||||||
&py_tf_classify_output_data_callback_data) != 0) {
|
&py_tf_classify_output_data_callback_data) != 0) {
|
||||||
// Note can't use MP_ERROR_TEXT here.
|
// Note can't use MP_ERROR_TEXT here.
|
||||||
mp_raise_msg(&mp_type_OSError, (mp_rom_error_text_t)
|
mp_raise_msg(&mp_type_OSError, (mp_rom_error_text_t) py_tf_putchar_buffer);
|
||||||
py_tf_putchar_buffer - (PY_TF_PUTCHAR_BUFFER_LEN - py_tf_putchar_buffer_len));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
py_tf_classification_obj_t *o = m_new_obj(py_tf_classification_obj_t);
|
py_tf_classification_obj_t *o = m_new_obj(py_tf_classification_obj_t);
|
||||||
@ -435,40 +477,68 @@ typedef struct py_tf_segment_output_data_callback_data {
|
|||||||
|
|
||||||
STATIC void py_tf_segment_output_data_callback(void *callback_data,
|
STATIC void py_tf_segment_output_data_callback(void *callback_data,
|
||||||
void *model_output,
|
void *model_output,
|
||||||
const unsigned int output_height,
|
libtf_parameters_t *params)
|
||||||
const unsigned int output_width,
|
|
||||||
const unsigned int output_channels,
|
|
||||||
const bool signed_or_unsigned,
|
|
||||||
const bool is_float)
|
|
||||||
{
|
{
|
||||||
py_tf_segment_output_data_callback_data_t *arg = (py_tf_segment_output_data_callback_data_t *) callback_data;
|
py_tf_segment_output_data_callback_data_t *arg = (py_tf_segment_output_data_callback_data_t *) callback_data;
|
||||||
int shift = signed_or_unsigned ? 128 : 0;
|
|
||||||
|
|
||||||
arg->out = mp_obj_new_list(output_channels, NULL);
|
// Disable checking output scaling and zero-point. Nets can be all over the place on the output
|
||||||
for (unsigned int i = 0; i < output_channels; i++) {
|
// scaling and zero-point but still work with the code below.
|
||||||
|
|
||||||
|
// if (params->output_datatype == LIBTF_DATATYPE_UINT8) {
|
||||||
|
// if (fast_roundf(params->output_scale * GRAYSCALE_RANGE) != 1) {
|
||||||
|
// mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Expected model output scale to be 1/255!"));
|
||||||
|
// }
|
||||||
|
|
||||||
|
// if (params->output_zero_point != 0) {
|
||||||
|
// mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Expected model output zero point to be 0!"));
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
|
// if (params->output_datatype == LIBTF_DATATYPE_INT8) {
|
||||||
|
// if (fast_roundf(params->output_scale * GRAYSCALE_RANGE) != 1) {
|
||||||
|
// mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Expected model output scale to be 1/255!"));
|
||||||
|
// }
|
||||||
|
|
||||||
|
// if (params->output_zero_point != -GRAYSCALE_MID) {
|
||||||
|
// mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Expected model output zero point to be -128!"));
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
|
int shift = (params->output_datatype == LIBTF_DATATYPE_INT8) ? GRAYSCALE_MID : 0;
|
||||||
|
|
||||||
|
arg->out = mp_obj_new_list(params->output_channels, NULL);
|
||||||
|
|
||||||
|
for (int i = 0, ii = params->output_channels; i < ii; i++) {
|
||||||
|
|
||||||
image_t img = {
|
image_t img = {
|
||||||
.w = output_width,
|
.w = params->output_width,
|
||||||
.h = output_height,
|
.h = params->output_height,
|
||||||
.pixfmt = PIXFORMAT_GRAYSCALE,
|
.pixfmt = PIXFORMAT_GRAYSCALE,
|
||||||
.pixels = xalloc(output_width * output_height * sizeof(uint8_t))
|
.pixels = xalloc(params->output_width * params->output_height * sizeof(uint8_t))
|
||||||
};
|
};
|
||||||
|
|
||||||
((mp_obj_list_t *) arg->out)->items[i] = py_image_from_struct(&img);
|
((mp_obj_list_t *) arg->out)->items[i] = py_image_from_struct(&img);
|
||||||
for (unsigned int y = 0; i < output_height; y++) {
|
|
||||||
unsigned int row = y * output_width * output_channels;
|
for (int y = 0, yy = params->output_height, xx = params->output_width; y < yy; y++) {
|
||||||
|
int row = y * xx * ii;
|
||||||
uint8_t *row_ptr = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(&img, y);
|
uint8_t *row_ptr = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(&img, y);
|
||||||
for (unsigned int x = 0; i < output_width; x++) {
|
|
||||||
unsigned int col = x * output_channels;
|
for (int x = 0; x < xx; x++) {
|
||||||
if (!is_float) {
|
int col = x * ii;
|
||||||
IMAGE_PUT_GRAYSCALE_PIXEL_FAST(row_ptr, x, ((uint8_t *) model_output)[row + col + i] ^ shift);
|
|
||||||
|
if (params->output_datatype == LIBTF_DATATYPE_FLOAT) {
|
||||||
|
IMAGE_PUT_GRAYSCALE_PIXEL_FAST(row_ptr, x,
|
||||||
|
((float *) model_output)[row + col + i] * GRAYSCALE_RANGE);
|
||||||
} else {
|
} else {
|
||||||
IMAGE_PUT_GRAYSCALE_PIXEL_FAST(row_ptr, x, ((float *) model_output)[row + col + i] * 255);
|
IMAGE_PUT_GRAYSCALE_PIXEL_FAST(row_ptr, x,
|
||||||
|
((uint8_t *) model_output)[row + col + i] ^ shift);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
STATIC mp_obj_t py_tf_segment(uint n_args, const mp_obj_t *args, mp_map_t *kw_args)
|
STATIC mp_obj_t int_py_tf_segment(bool detecting_mode, uint n_args, const mp_obj_t *args, mp_map_t *kw_args)
|
||||||
{
|
{
|
||||||
fb_alloc_mark();
|
fb_alloc_mark();
|
||||||
py_tf_alloc_putchar_buffer();
|
py_tf_alloc_putchar_buffer();
|
||||||
@ -479,8 +549,7 @@ STATIC mp_obj_t py_tf_segment(uint n_args, const mp_obj_t *args, mp_map_t *kw_ar
|
|||||||
rectangle_t roi;
|
rectangle_t roi;
|
||||||
py_helper_keyword_rectangle_roi(arg_img, n_args, args, 2, kw_args, &roi);
|
py_helper_keyword_rectangle_roi(arg_img, n_args, args, 2, kw_args, &roi);
|
||||||
|
|
||||||
uint32_t tensor_arena_size = fb_avail() - (arg_model->width * arg_model->channels * 3);
|
uint8_t *tensor_arena = fb_alloc(arg_model->params.tensor_arena_size, FB_ALLOC_PREFER_SPEED | FB_ALLOC_CACHE_ALIGN);
|
||||||
uint8_t *tensor_arena = fb_alloc(tensor_arena_size, FB_ALLOC_PREFER_SPEED);
|
|
||||||
|
|
||||||
py_tf_input_data_callback_data_t py_tf_input_data_callback_data;
|
py_tf_input_data_callback_data_t py_tf_input_data_callback_data;
|
||||||
py_tf_input_data_callback_data.img = arg_img;
|
py_tf_input_data_callback_data.img = arg_img;
|
||||||
@ -490,45 +559,206 @@ STATIC mp_obj_t py_tf_segment(uint n_args, const mp_obj_t *args, mp_map_t *kw_ar
|
|||||||
|
|
||||||
if (libtf_invoke(arg_model->model_data,
|
if (libtf_invoke(arg_model->model_data,
|
||||||
tensor_arena,
|
tensor_arena,
|
||||||
tensor_arena_size,
|
&arg_model->params,
|
||||||
py_tf_input_data_callback,
|
py_tf_input_data_callback,
|
||||||
&py_tf_input_data_callback_data,
|
&py_tf_input_data_callback_data,
|
||||||
py_tf_segment_output_data_callback,
|
py_tf_segment_output_data_callback,
|
||||||
&py_tf_segment_output_data_callback_data) != 0) {
|
&py_tf_segment_output_data_callback_data) != 0) {
|
||||||
// Note can't use MP_ERROR_TEXT here.
|
// Note can't use MP_ERROR_TEXT here.
|
||||||
mp_raise_msg(&mp_type_OSError, (mp_rom_error_text_t)
|
mp_raise_msg(&mp_type_OSError, (mp_rom_error_text_t) py_tf_putchar_buffer);
|
||||||
py_tf_putchar_buffer - (PY_TF_PUTCHAR_BUFFER_LEN - py_tf_putchar_buffer_len));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fb_alloc_free_till_mark();
|
fb_alloc_free_till_mark();
|
||||||
|
|
||||||
return py_tf_segment_output_data_callback_data.out;
|
if (!detecting_mode) {
|
||||||
|
return py_tf_segment_output_data_callback_data.out;
|
||||||
|
}
|
||||||
|
|
||||||
|
list_t thresholds;
|
||||||
|
list_init(&thresholds, sizeof(color_thresholds_list_lnk_data_t));
|
||||||
|
py_helper_keyword_thresholds(n_args, args, 3, kw_args, &thresholds);
|
||||||
|
|
||||||
|
if (!list_size(&thresholds)) {
|
||||||
|
color_thresholds_list_lnk_data_t lnk_data;
|
||||||
|
lnk_data.LMin = GRAYSCALE_MID;
|
||||||
|
lnk_data.LMax = GRAYSCALE_RANGE;
|
||||||
|
lnk_data.AMin = COLOR_A_MIN;
|
||||||
|
lnk_data.AMax = COLOR_A_MAX;
|
||||||
|
lnk_data.BMin = COLOR_B_MIN;
|
||||||
|
lnk_data.BMax = COLOR_B_MAX;
|
||||||
|
list_push_back(&thresholds, &lnk_data);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool invert = py_helper_keyword_int(n_args, args, 4, kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_invert), false);
|
||||||
|
|
||||||
|
mp_obj_list_t *img_list = (mp_obj_list_t *) py_tf_segment_output_data_callback_data.out;
|
||||||
|
mp_obj_list_t *out_list = mp_obj_new_list(img_list->len, NULL);
|
||||||
|
|
||||||
|
fb_alloc_mark();
|
||||||
|
|
||||||
|
float fscale = 1.f / GRAYSCALE_RANGE;
|
||||||
|
for (int i = 0, ii = img_list->len; i < ii; i++) {
|
||||||
|
image_t *img = py_image_cobj(img_list->items[i]);
|
||||||
|
float x_scale = roi.w / ((float) img->w);
|
||||||
|
float y_scale = roi.h / ((float) img->h);
|
||||||
|
|
||||||
|
list_t out;
|
||||||
|
imlib_find_blobs(&out, img, &((rectangle_t) {0, 0, img->w, img->h}), 1, 1,
|
||||||
|
&thresholds, invert, 1, 1, false, 0,
|
||||||
|
NULL, NULL, NULL, NULL, 0, 0);
|
||||||
|
|
||||||
|
mp_obj_list_t *objects_list = mp_obj_new_list(list_size(&out), NULL);
|
||||||
|
for (int j = 0, jj = list_size(&out); j < jj; j++) {
|
||||||
|
find_blobs_list_lnk_data_t lnk_data;
|
||||||
|
list_pop_front(&out, &lnk_data);
|
||||||
|
|
||||||
|
histogram_t hist;
|
||||||
|
hist.LBinCount = GRAYSCALE_RANGE + 1;
|
||||||
|
hist.ABinCount = 0;
|
||||||
|
hist.BBinCount = 0;
|
||||||
|
hist.LBins = fb_alloc(hist.LBinCount * sizeof(float), FB_ALLOC_NO_HINT);
|
||||||
|
hist.ABins = NULL;
|
||||||
|
hist.BBins = NULL;
|
||||||
|
imlib_get_histogram(&hist, img, &lnk_data.rect, &thresholds, invert, NULL);
|
||||||
|
|
||||||
|
statistics_t stats;
|
||||||
|
imlib_get_statistics(&stats, img->pixfmt, &hist);
|
||||||
|
fb_free(); // fb_alloc(hist.LBinCount * sizeof(float), FB_ALLOC_NO_HINT);
|
||||||
|
|
||||||
|
py_tf_classification_obj_t *o = m_new_obj(py_tf_classification_obj_t);
|
||||||
|
o->base.type = &py_tf_classification_type;
|
||||||
|
o->x = mp_obj_new_int(fast_floorf(lnk_data.rect.x * x_scale) + roi.x);
|
||||||
|
o->y = mp_obj_new_int(fast_floorf(lnk_data.rect.y * y_scale) + roi.y);
|
||||||
|
o->w = mp_obj_new_int(fast_floorf(lnk_data.rect.w * x_scale));
|
||||||
|
o->h = mp_obj_new_int(fast_floorf(lnk_data.rect.h * y_scale));
|
||||||
|
o->output = mp_obj_new_float(stats.LMean * fscale);
|
||||||
|
objects_list->items[j] = o;
|
||||||
|
}
|
||||||
|
|
||||||
|
out_list->items[i] = objects_list;
|
||||||
|
}
|
||||||
|
|
||||||
|
fb_alloc_free_till_mark();
|
||||||
|
|
||||||
|
return out_list;
|
||||||
|
}
|
||||||
|
|
||||||
|
STATIC mp_obj_t py_tf_segment(uint n_args, const mp_obj_t *args, mp_map_t *kw_args)
|
||||||
|
{
|
||||||
|
return int_py_tf_segment(false, n_args, args, kw_args);
|
||||||
}
|
}
|
||||||
STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_tf_segment_obj, 2, py_tf_segment);
|
STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_tf_segment_obj, 2, py_tf_segment);
|
||||||
|
|
||||||
mp_obj_t py_tf_len(mp_obj_t self_in) { return mp_obj_new_int(((py_tf_model_obj_t *) self_in)->model_data_len); }
|
STATIC mp_obj_t py_tf_detect(uint n_args, const mp_obj_t *args, mp_map_t *kw_args)
|
||||||
mp_obj_t py_tf_height(mp_obj_t self_in) { return mp_obj_new_int(((py_tf_model_obj_t *) self_in)->height); }
|
{
|
||||||
mp_obj_t py_tf_width(mp_obj_t self_in) { return mp_obj_new_int(((py_tf_model_obj_t *) self_in)->width); }
|
return int_py_tf_segment(true, n_args, args, kw_args);
|
||||||
mp_obj_t py_tf_channels(mp_obj_t self_in) { return mp_obj_new_int(((py_tf_model_obj_t *) self_in)->channels); }
|
}
|
||||||
mp_obj_t py_tf_signed(mp_obj_t self_in) { return mp_obj_new_int(((py_tf_model_obj_t *) self_in)->signed_or_unsigned); }
|
STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_tf_detect_obj, 2, py_tf_detect);
|
||||||
mp_obj_t py_tf_is_float(mp_obj_t self_in) { return mp_obj_new_int(((py_tf_model_obj_t *) self_in)->is_float); }
|
|
||||||
|
|
||||||
|
mp_obj_t py_tf_len(mp_obj_t self_in)
|
||||||
|
{
|
||||||
|
return mp_obj_new_int(((py_tf_model_obj_t *) self_in)->model_data_len);
|
||||||
|
}
|
||||||
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_len_obj, py_tf_len);
|
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_len_obj, py_tf_len);
|
||||||
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_height_obj, py_tf_height);
|
|
||||||
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_width_obj, py_tf_width);
|
mp_obj_t py_tf_ram(mp_obj_t self_in)
|
||||||
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_channels_obj, py_tf_channels);
|
{
|
||||||
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_signed_obj, py_tf_signed);
|
return mp_obj_new_int(((py_tf_model_obj_t *) self_in)->params.tensor_arena_size);
|
||||||
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_is_float_obj, py_tf_is_float);
|
}
|
||||||
|
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_ram_obj, py_tf_ram);
|
||||||
|
|
||||||
|
mp_obj_t py_tf_input_height(mp_obj_t self_in)
|
||||||
|
{
|
||||||
|
return mp_obj_new_int(((py_tf_model_obj_t *) self_in)->params.input_height);
|
||||||
|
}
|
||||||
|
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_input_height_obj, py_tf_input_height);
|
||||||
|
|
||||||
|
mp_obj_t py_tf_input_width(mp_obj_t self_in)
|
||||||
|
{
|
||||||
|
return mp_obj_new_int(((py_tf_model_obj_t *) self_in)->params.input_width);
|
||||||
|
}
|
||||||
|
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_input_width_obj, py_tf_input_width);
|
||||||
|
|
||||||
|
mp_obj_t py_tf_input_channels(mp_obj_t self_in)
|
||||||
|
{
|
||||||
|
return mp_obj_new_int(((py_tf_model_obj_t *) self_in)->params.input_channels);
|
||||||
|
}
|
||||||
|
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_input_channels_obj, py_tf_input_channels);
|
||||||
|
|
||||||
|
mp_obj_t py_tf_input_datatype(mp_obj_t self_in)
|
||||||
|
{
|
||||||
|
const char *str = py_tf_map_datatype(((py_tf_model_obj_t *) self_in)->params.input_datatype);
|
||||||
|
return mp_obj_new_str(str, strlen(str));
|
||||||
|
}
|
||||||
|
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_input_datatype_obj, py_tf_input_datatype);
|
||||||
|
|
||||||
|
mp_obj_t py_tf_input_scale(mp_obj_t self_in)
|
||||||
|
{
|
||||||
|
return mp_obj_new_float(((py_tf_model_obj_t *) self_in)->params.input_scale);
|
||||||
|
}
|
||||||
|
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_input_scale_obj, py_tf_input_scale);
|
||||||
|
|
||||||
|
mp_obj_t py_tf_input_zero_point(mp_obj_t self_in)
|
||||||
|
{
|
||||||
|
return mp_obj_new_int(((py_tf_model_obj_t *) self_in)->params.input_zero_point);
|
||||||
|
}
|
||||||
|
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_input_zero_point_obj, py_tf_input_zero_point);
|
||||||
|
|
||||||
|
mp_obj_t py_tf_output_height(mp_obj_t self_in)
|
||||||
|
{
|
||||||
|
return mp_obj_new_int(((py_tf_model_obj_t *) self_in)->params.output_height);
|
||||||
|
}
|
||||||
|
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_output_height_obj, py_tf_output_height);
|
||||||
|
|
||||||
|
mp_obj_t py_tf_output_width(mp_obj_t self_in)
|
||||||
|
{
|
||||||
|
return mp_obj_new_int(((py_tf_model_obj_t *) self_in)->params.output_width);
|
||||||
|
}
|
||||||
|
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_output_width_obj, py_tf_output_width);
|
||||||
|
|
||||||
|
mp_obj_t py_tf_output_channels(mp_obj_t self_in)
|
||||||
|
{
|
||||||
|
return mp_obj_new_int(((py_tf_model_obj_t *) self_in)->params.output_channels);
|
||||||
|
}
|
||||||
|
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_output_channels_obj, py_tf_output_channels);
|
||||||
|
|
||||||
|
mp_obj_t py_tf_output_datatype(mp_obj_t self_in)
|
||||||
|
{
|
||||||
|
const char *str = py_tf_map_datatype(((py_tf_model_obj_t *) self_in)->params.output_datatype);
|
||||||
|
return mp_obj_new_str(str, strlen(str));
|
||||||
|
}
|
||||||
|
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_output_datatype_obj, py_tf_output_datatype);
|
||||||
|
|
||||||
|
mp_obj_t py_tf_output_scale(mp_obj_t self_in)
|
||||||
|
{
|
||||||
|
return mp_obj_new_float(((py_tf_model_obj_t *) self_in)->params.output_scale);
|
||||||
|
}
|
||||||
|
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_output_scale_obj, py_tf_output_scale);
|
||||||
|
|
||||||
|
mp_obj_t py_tf_output_zero_point(mp_obj_t self_in)
|
||||||
|
{
|
||||||
|
return mp_obj_new_int(((py_tf_model_obj_t *) self_in)->params.output_zero_point);
|
||||||
|
}
|
||||||
|
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_output_zero_point_obj, py_tf_output_zero_point);
|
||||||
|
|
||||||
STATIC const mp_rom_map_elem_t locals_dict_table[] = {
|
STATIC const mp_rom_map_elem_t locals_dict_table[] = {
|
||||||
{ MP_ROM_QSTR(MP_QSTR_len), MP_ROM_PTR(&py_tf_len_obj) },
|
{ MP_ROM_QSTR(MP_QSTR_len), MP_ROM_PTR(&py_tf_len_obj) },
|
||||||
{ MP_ROM_QSTR(MP_QSTR_height), MP_ROM_PTR(&py_tf_height_obj) },
|
{ MP_ROM_QSTR(MP_QSTR_ram), MP_ROM_PTR(&py_tf_ram_obj) },
|
||||||
{ MP_ROM_QSTR(MP_QSTR_width), MP_ROM_PTR(&py_tf_width_obj) },
|
{ MP_ROM_QSTR(MP_QSTR_input_height), MP_ROM_PTR(&py_tf_input_height_obj) },
|
||||||
{ MP_ROM_QSTR(MP_QSTR_channels), MP_ROM_PTR(&py_tf_channels_obj) },
|
{ MP_ROM_QSTR(MP_QSTR_input_width), MP_ROM_PTR(&py_tf_input_width_obj) },
|
||||||
{ MP_ROM_QSTR(MP_QSTR_signed), MP_ROM_PTR(&py_tf_signed_obj) },
|
{ MP_ROM_QSTR(MP_QSTR_input_channels), MP_ROM_PTR(&py_tf_input_channels_obj) },
|
||||||
{ MP_ROM_QSTR(MP_QSTR_is_float), MP_ROM_PTR(&py_tf_is_float_obj) },
|
{ MP_ROM_QSTR(MP_QSTR_input_datatype), MP_ROM_PTR(&py_tf_input_datatype_obj) },
|
||||||
{ MP_ROM_QSTR(MP_QSTR_classify), MP_ROM_PTR(&py_tf_classify_obj) },
|
{ MP_ROM_QSTR(MP_QSTR_input_scale), MP_ROM_PTR(&py_tf_input_scale_obj) },
|
||||||
{ MP_ROM_QSTR(MP_QSTR_segment), MP_ROM_PTR(&py_tf_segment_obj) }
|
{ MP_ROM_QSTR(MP_QSTR_input_zero_point), MP_ROM_PTR(&py_tf_input_zero_point_obj) },
|
||||||
|
{ MP_ROM_QSTR(MP_QSTR_output_height), MP_ROM_PTR(&py_tf_output_height_obj) },
|
||||||
|
{ MP_ROM_QSTR(MP_QSTR_output_width), MP_ROM_PTR(&py_tf_output_width_obj) },
|
||||||
|
{ MP_ROM_QSTR(MP_QSTR_output_channels), MP_ROM_PTR(&py_tf_output_channels_obj) },
|
||||||
|
{ MP_ROM_QSTR(MP_QSTR_output_datatype), MP_ROM_PTR(&py_tf_output_datatype_obj) },
|
||||||
|
{ MP_ROM_QSTR(MP_QSTR_output_scale), MP_ROM_PTR(&py_tf_output_scale_obj) },
|
||||||
|
{ MP_ROM_QSTR(MP_QSTR_output_zero_point), MP_ROM_PTR(&py_tf_output_zero_point_obj) },
|
||||||
|
{ MP_ROM_QSTR(MP_QSTR_classify), MP_ROM_PTR(&py_tf_classify_obj) },
|
||||||
|
{ MP_ROM_QSTR(MP_QSTR_segment), MP_ROM_PTR(&py_tf_segment_obj) },
|
||||||
|
{ MP_ROM_QSTR(MP_QSTR_detect), MP_ROM_PTR(&py_tf_detect_obj) }
|
||||||
};
|
};
|
||||||
|
|
||||||
STATIC MP_DEFINE_CONST_DICT(locals_dict, locals_dict_table);
|
STATIC MP_DEFINE_CONST_DICT(locals_dict, locals_dict_table);
|
||||||
@ -549,11 +779,13 @@ STATIC const mp_rom_map_elem_t globals_dict_table[] = {
|
|||||||
{ MP_ROM_QSTR(MP_QSTR_free_from_fb), MP_ROM_PTR(&py_tf_free_from_fb_obj) },
|
{ MP_ROM_QSTR(MP_QSTR_free_from_fb), MP_ROM_PTR(&py_tf_free_from_fb_obj) },
|
||||||
{ MP_ROM_QSTR(MP_QSTR_classify), MP_ROM_PTR(&py_tf_classify_obj) },
|
{ MP_ROM_QSTR(MP_QSTR_classify), MP_ROM_PTR(&py_tf_classify_obj) },
|
||||||
{ MP_ROM_QSTR(MP_QSTR_segment), MP_ROM_PTR(&py_tf_segment_obj) },
|
{ MP_ROM_QSTR(MP_QSTR_segment), MP_ROM_PTR(&py_tf_segment_obj) },
|
||||||
|
{ MP_ROM_QSTR(MP_QSTR_detect), MP_ROM_PTR(&py_tf_detect_obj) },
|
||||||
#else
|
#else
|
||||||
{ MP_ROM_QSTR(MP_QSTR_load), MP_ROM_PTR(&py_func_unavailable_obj) },
|
{ MP_ROM_QSTR(MP_QSTR_load), MP_ROM_PTR(&py_func_unavailable_obj) },
|
||||||
{ MP_ROM_QSTR(MP_QSTR_free_from_fb), MP_ROM_PTR(&py_func_unavailable_obj) },
|
{ MP_ROM_QSTR(MP_QSTR_free_from_fb), MP_ROM_PTR(&py_func_unavailable_obj) },
|
||||||
{ MP_ROM_QSTR(MP_QSTR_classify), MP_ROM_PTR(&py_func_unavailable_obj) },
|
{ MP_ROM_QSTR(MP_QSTR_classify), MP_ROM_PTR(&py_func_unavailable_obj) },
|
||||||
{ MP_ROM_QSTR(MP_QSTR_segment), MP_ROM_PTR(&py_func_unavailable_obj) }
|
{ MP_ROM_QSTR(MP_QSTR_segment), MP_ROM_PTR(&py_func_unavailable_obj) },
|
||||||
|
{ MP_ROM_QSTR(MP_QSTR_detect), MP_ROM_PTR(&py_func_unavailable_obj) }
|
||||||
#endif // IMLIB_ENABLE_TF
|
#endif // IMLIB_ENABLE_TF
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@ -10,19 +10,19 @@
|
|||||||
*/
|
*/
|
||||||
#ifndef __PY_TF_H__
|
#ifndef __PY_TF_H__
|
||||||
#define __PY_TF_H__
|
#define __PY_TF_H__
|
||||||
|
#include "libtf.h"
|
||||||
|
|
||||||
// PyTF model object handle
|
|
||||||
typedef struct py_tf_model_obj {
|
typedef struct py_tf_model_obj {
|
||||||
mp_obj_base_t base;
|
mp_obj_base_t base;
|
||||||
unsigned char *model_data;
|
unsigned char *model_data;
|
||||||
unsigned int model_data_len, height, width, channels;
|
unsigned int model_data_len;
|
||||||
bool signed_or_unsigned;
|
libtf_parameters_t params;
|
||||||
bool is_float;
|
|
||||||
} py_tf_model_obj_t;
|
} py_tf_model_obj_t;
|
||||||
|
|
||||||
// Log buffer
|
// Log buffer
|
||||||
#define PY_TF_PUTCHAR_BUFFER_LEN 1023
|
#define PY_TF_PUTCHAR_BUFFER_LEN 1023
|
||||||
extern char *py_tf_putchar_buffer;
|
extern char *py_tf_putchar_buffer;
|
||||||
|
extern size_t py_tf_putchar_buffer_index;
|
||||||
extern size_t py_tf_putchar_buffer_len;
|
extern size_t py_tf_putchar_buffer_len;
|
||||||
void py_tf_alloc_putchar_buffer();
|
void py_tf_alloc_putchar_buffer();
|
||||||
|
|
||||||
|
|||||||
@ -111,8 +111,7 @@ mp_obj_t py_micro_speech_audio_callback(mp_obj_t self_in, mp_obj_t buf_in)
|
|||||||
}
|
}
|
||||||
STATIC MP_DEFINE_CONST_FUN_OBJ_2(py_micro_speech_audio_callback_obj, py_micro_speech_audio_callback);
|
STATIC MP_DEFINE_CONST_FUN_OBJ_2(py_micro_speech_audio_callback_obj, py_micro_speech_audio_callback);
|
||||||
|
|
||||||
STATIC void py_tf_input_callback(void *callback_data, void *model_input, const unsigned int input_height,
|
STATIC void py_tf_input_callback(void *callback_data, void *model_input, libtf_parameters_t *params)
|
||||||
const unsigned int input_width, const unsigned int input_channels, const bool is_signed, const bool is_float)
|
|
||||||
{
|
{
|
||||||
// Copy feature buffer to input tensor
|
// Copy feature buffer to input tensor
|
||||||
for (int i = 0; i < kFeatureElementCount; i++) {
|
for (int i = 0; i < kFeatureElementCount; i++) {
|
||||||
@ -120,17 +119,25 @@ STATIC void py_tf_input_callback(void *callback_data, void *model_input, const u
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
STATIC void py_tf_output_callback(void *callback_data, void *model_output, const unsigned int output_height,
|
STATIC void py_tf_output_callback(void *callback_data, void *model_output, libtf_parameters_t *params)
|
||||||
const unsigned int output_width, const unsigned int output_channels, const bool is_signed, const bool is_float)
|
|
||||||
{
|
{
|
||||||
uint8_t *scores = (uint8_t *) callback_data;
|
uint8_t *scores = (uint8_t *) callback_data;
|
||||||
PY_ASSERT_TRUE_MSG(output_height == 1, "Expected model output height to be 1!");
|
|
||||||
PY_ASSERT_TRUE_MSG(output_width == 1, "Expected model output width to be 1!");
|
|
||||||
PY_ASSERT_TRUE_MSG(output_channels == 4, "Expected model output channels to be 4!");
|
|
||||||
|
|
||||||
for (int i=0; i<output_channels; i++) {
|
if (params->output_height != 1) {
|
||||||
scores[i] = (((uint8_t *) model_output)[i] ^ (is_signed ? 128 : 0));
|
mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Expected model output height to be 1!"));
|
||||||
debug_printf("%.2f ", (double)((((uint8_t *) model_output)[i] ^ (is_signed ? 128 : 0)) / 255.0f));
|
}
|
||||||
|
|
||||||
|
if (params->output_width != 1) {
|
||||||
|
mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Expected model output width to be 1!"));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (params->output_channels != 4) {
|
||||||
|
mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("Expected model output channels to be 4!"));
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int i = 0, ii = params->output_channels; i < ii; i++) {
|
||||||
|
scores[i] = ((uint8_t *) model_output)[i] - params->output_zero_point;
|
||||||
|
debug_printf("%.2f ", (double) ((((uint8_t *) model_output)[i] - params->output_zero_point) * params->output_scale));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -149,6 +156,15 @@ STATIC mp_obj_t py_micro_speech_listen(uint n_args, const mp_obj_t *args, mp_map
|
|||||||
|
|
||||||
uint32_t tensor_arena_size;
|
uint32_t tensor_arena_size;
|
||||||
uint8_t *tensor_arena = fb_alloc_all(&tensor_arena_size, FB_ALLOC_PREFER_SIZE);
|
uint8_t *tensor_arena = fb_alloc_all(&tensor_arena_size, FB_ALLOC_PREFER_SIZE);
|
||||||
|
libtf_parameters_t params;
|
||||||
|
|
||||||
|
if (libtf_get_parameters(arg_model->model_data, tensor_arena, tensor_arena_size, ¶ms) != 0) {
|
||||||
|
mp_raise_msg(&mp_type_OSError, (mp_rom_error_text_t) py_tf_putchar_buffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
fb_free(); // free fb_alloc_all()
|
||||||
|
|
||||||
|
tensor_arena = fb_alloc(params.tensor_arena_size, FB_ALLOC_PREFER_SPEED | FB_ALLOC_CACHE_ALIGN);
|
||||||
int8_t spectrogram[kFeatureElementCount];
|
int8_t spectrogram[kFeatureElementCount];
|
||||||
|
|
||||||
uint32_t return_label = 0;
|
uint32_t return_label = 0;
|
||||||
@ -175,13 +191,12 @@ STATIC mp_obj_t py_micro_speech_listen(uint n_args, const mp_obj_t *args, mp_map
|
|||||||
// Run model on updated spectrogram
|
// Run model on updated spectrogram
|
||||||
if (libtf_invoke(arg_model->model_data,
|
if (libtf_invoke(arg_model->model_data,
|
||||||
tensor_arena,
|
tensor_arena,
|
||||||
tensor_arena_size,
|
¶ms,
|
||||||
py_tf_input_callback,
|
py_tf_input_callback,
|
||||||
spectrogram,
|
spectrogram,
|
||||||
py_tf_output_callback,
|
py_tf_output_callback,
|
||||||
previous_scores[results_count]) != 0) {
|
previous_scores[results_count]) != 0) {
|
||||||
mp_raise_msg(&mp_type_OSError, (mp_rom_error_text_t)
|
mp_raise_msg(&mp_type_OSError, (mp_rom_error_text_t) py_tf_putchar_buffer);
|
||||||
py_tf_putchar_buffer - (PY_TF_PUTCHAR_BUFFER_LEN - py_tf_putchar_buffer_len));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we have enough samples calculate average scores.
|
// If we have enough samples calculate average scores.
|
||||||
@ -272,4 +287,5 @@ const mp_obj_module_t micro_speech_module = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
MP_REGISTER_MODULE(MP_QSTR_micro_speech, micro_speech_module, MICROPY_PY_MICRO_SPEECH);
|
MP_REGISTER_MODULE(MP_QSTR_micro_speech, micro_speech_module, MICROPY_PY_MICRO_SPEECH);
|
||||||
#endif //MICROPY_PY_MICRO_SPEECH
|
|
||||||
|
#endif // MICROPY_PY_MICRO_SPEECH
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user