mirror of
https://github.com/openmv/openmv.git
synced 2025-09-26 23:09:13 +08:00
Add support for TensorFlow Lite for Microcontrollers
This has been a long-time coming, but, it's finally here. TensorFlow lite runs on the OpenMV Cam now. Better yet, a person detection model is now built-into all OpenMV Cams too! Our default code does image classification and supports multi-scale object detection using a sliding window. In a coming PR I will add mobilenet examples for the H7 with SDRAM and image segmentation support.
This commit is contained in:
parent
1915b9142f
commit
6a9c0a370e
@ -0,0 +1,49 @@
|
||||
# TensorFlow Lite Person Dection Example
|
||||
#
|
||||
# Google's Person Detection Model detects if a person is in view.
|
||||
#
|
||||
# In this example we slide the detector window over the image and get a list
|
||||
# of activations. Note that use a CNN with a sliding window is extremely compute
|
||||
# expensive so for an exhaustive search do not expect the CNN to be real-time.
|
||||
|
||||
import sensor, image, time, os, tf
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||
sensor.set_windowing((240, 240)) # Set 240x240 window.
|
||||
sensor.skip_frames(time=2000) # Let the camera adjust.
|
||||
|
||||
# Load the built-in person detection network (the network is in your OpenMV Cam's firmware).
|
||||
net = tf.load('person_detection')
|
||||
labels = ['unsure', 'person', 'no_person']
|
||||
|
||||
clock = time.clock()
|
||||
while(True):
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot()
|
||||
|
||||
# net.classify() will run the network on an roi in the image (or on the whole image if the roi is not
|
||||
# specified). A classification score output vector will be generated for each location. At each scale the
|
||||
# detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide.
|
||||
# If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note
|
||||
# the computational work load goes WAY up the more overlap. Finally, for multi-scale matching after
|
||||
# sliding the network around in the x/y dimensions the detection window will shrink by scale_mul (0-1)
|
||||
# down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%.
|
||||
# Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small...
|
||||
|
||||
# Setting x_overlap=-1 forces the window to stay centered in the ROI in the x direction always. If
|
||||
# y_overlap is not -1 the method will search in all vertical positions.
|
||||
|
||||
# Setting y_overlap=-1 forces the window to stay centered in the ROI in the y direction always. If
|
||||
# x_overlap is not -1 the method will serach in all horizontal positions.
|
||||
|
||||
# default settings just do one detection... change them to search the image...
|
||||
for obj in net.classify(img, min_scale=0.5, scale_mul=0.5, x_overlap=-1, y_overlap=-1):
|
||||
print("**********\nDetections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect())
|
||||
for i in range(len(obj.output())):
|
||||
print("%s = %f" % (labels[i], obj.output()[i]))
|
||||
img.draw_rectangle(obj.rect())
|
||||
img.draw_string(obj.x()+3, obj.y()-1, labels[obj.output().index(max(obj.output()))], mono_space = False)
|
||||
print(clock.fps(), "fps")
|
@ -0,0 +1,43 @@
|
||||
# TensorFlow Lite Person Dection Example
|
||||
#
|
||||
# Google's Person Detection Model detects if a person is in view.
|
||||
#
|
||||
# In this example we slide the detector window over the image and get a list
|
||||
# of activations. Note that use a CNN with a sliding window is extremely compute
|
||||
# expensive so for an exhaustive search do not expect the CNN to be real-time.
|
||||
|
||||
import sensor, image, time, os, tf
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||
sensor.set_windowing((240, 240)) # Set 240x240 window.
|
||||
sensor.skip_frames(time=2000) # Let the camera adjust.
|
||||
|
||||
# Load the built-in person detection network (the network is in your OpenMV Cam's firmware).
|
||||
net = tf.load('person_detection')
|
||||
labels = ['unsure', 'person', 'no_person']
|
||||
|
||||
clock = time.clock()
|
||||
while(True):
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot()
|
||||
|
||||
# net.classify() will run the network on an roi in the image (or on the whole image if the roi is not
|
||||
# specified). A classification score output vector will be generated for each location. At each scale the
|
||||
# detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide.
|
||||
# If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note
|
||||
# the computational work load goes WAY up the more overlap. Finally, for multi-scale matching after
|
||||
# sliding the network around in the x/y dimensions the detection window will shrink by scale_mul (0-1)
|
||||
# down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%.
|
||||
# Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small...
|
||||
|
||||
# default settings just do one detection... change them to search the image...
|
||||
for obj in net.classify(img, min_scale=1.0, scale_mul=0.5, x_overlap=0.0, y_overlap=0.0):
|
||||
print("**********\nDetections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect())
|
||||
for i in range(len(obj.output())):
|
||||
print("%s = %f" % (labels[i], obj.output()[i]))
|
||||
img.draw_rectangle(obj.rect())
|
||||
img.draw_string(obj.x()+3, obj.y()-1, labels[obj.output().index(max(obj.output()))], mono_space = False)
|
||||
print(clock.fps(), "fps")
|
@ -16,6 +16,7 @@ endif
|
||||
|
||||
# Commands
|
||||
CC = $(Q)arm-none-eabi-gcc
|
||||
CXX = $(Q)arm-none-eabi-g++
|
||||
AS = $(Q)arm-none-eabi-as
|
||||
LD = $(Q)arm-none-eabi-ld
|
||||
AR = $(Q)arm-none-eabi-ar
|
||||
@ -50,6 +51,7 @@ MICROPY_DIR=micropython
|
||||
OMV_DIR=omv
|
||||
LEPTON_DIR=lepton
|
||||
MLX_DIR=mlx
|
||||
TENSORFLOW_DIR=libtf
|
||||
WINC1500_DIR=winc1500
|
||||
BOOTLDR_DIR=bootloader
|
||||
UVC_DIR=uvc
|
||||
@ -119,6 +121,7 @@ OMV_CFLAGS += -I$(TOP_DIR)/$(OMV_DIR)/img/
|
||||
OMV_CFLAGS += -I$(OMV_BOARD_CONFIG_DIR)
|
||||
OMV_CFLAGS += -I$(TOP_DIR)/$(LEPTON_DIR)/include/
|
||||
OMV_CFLAGS += -I$(TOP_DIR)/$(MLX_DIR)/include/
|
||||
OMV_CFLAGS += -I$(TOP_DIR)/$(TENSORFLOW_DIR)/$(CPU)/
|
||||
OMV_CFLAGS += -I$(TOP_DIR)/$(WINC1500_DIR)/include/
|
||||
|
||||
UVC_CFLAGS = $(CFLAGS)
|
||||
@ -261,6 +264,7 @@ FIRM_OBJ += $(addprefix $(BUILD)/$(OMV_DIR)/py/, \
|
||||
py_winc.o \
|
||||
py_cpufreq.o \
|
||||
py_nn.o \
|
||||
py_tf.o \
|
||||
)
|
||||
|
||||
|
||||
@ -448,6 +452,8 @@ ifeq ($(CUBEAI), 1)
|
||||
include $(TOP_DIR)/stm32cubeai/cube.mk
|
||||
endif
|
||||
|
||||
FIRM_OBJ += $(wildcard $(TOP_DIR)/$(TENSORFLOW_DIR)/$(CPU)/*.a)
|
||||
|
||||
# Bootloader object files
|
||||
BOOT_OBJ += $(wildcard $(BUILD)/$(BOOTLDR_DIR)/src/*.o)
|
||||
BOOT_OBJ += $(wildcard $(BUILD)/$(STHAL_DIR)/src/*.o)
|
||||
|
203
src/libtf/cortex-m4/LICENSE
Normal file
203
src/libtf/cortex-m4/LICENSE
Normal file
@ -0,0 +1,203 @@
|
||||
Copyright 2019 The TensorFlow Authors. All rights reserved.
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
5
src/libtf/cortex-m4/README
Normal file
5
src/libtf/cortex-m4/README
Normal file
@ -0,0 +1,5 @@
|
||||
You must link this library to your application with arm-none-eabi-g++ and have implemented puts().
|
||||
|
||||
C Compile Flags: -D __FPU_PRESENT=1 -DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK -DNDEBUG -DTF_LITE_DISABLE_X86_NEON -DTF_LITE_MCU_DEBUG_LOG -DTF_LITE_STATIC_MEMORY -MMD -O3 -Wall -Wextra -Wvla -Wno-missing-field-initializers -Wno-parentheses -Wno-sign-compare -Wno-strict-aliasing -Wno-type-limits -Wno-unused-but-set-variable -Wno-unused-parameter -Wno-unused-variable -Wno-write-strings -fdata-sections -ffunction-sections -fmessage-length=0 -fomit-frame-pointer -funsigned-char -fno-builtin -fno-delete-null-pointer-checks -fno-exceptions -fno-unwind-tables -mfloat-abi=hard -mlittle-endian -mthumb -mno-unaligned-access -nostdlib -Wno-pointer-sign -DARM_CMSIS_NN_M4 -DARM_MATH_CM4 -mcpu=cortex-m4 -mfpu=fpv4-sp-d16 -mtune=cortex-m4
|
||||
|
||||
CXX Compile Flags: -D __FPU_PRESENT=1 -DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK -DNDEBUG -DTF_LITE_DISABLE_X86_NEON -DTF_LITE_MCU_DEBUG_LOG -DTF_LITE_STATIC_MEMORY -MMD -O3 -Wall -Wextra -Wvla -Wno-missing-field-initializers -Wno-parentheses -Wno-sign-compare -Wno-strict-aliasing -Wno-type-limits -Wno-unused-but-set-variable -Wno-unused-parameter -Wno-unused-variable -Wno-write-strings -fdata-sections -ffunction-sections -fmessage-length=0 -fomit-frame-pointer -funsigned-char -fno-builtin -fno-delete-null-pointer-checks -fno-exceptions -fno-unwind-tables -mfloat-abi=hard -mlittle-endian -mthumb -mno-unaligned-access -nostdlib -std=c++11 -std=gnu++11 -fno-rtti -fpermissive -DARM_CMSIS_NN_M4 -DARM_MATH_CM4 -mcpu=cortex-m4 -mfpu=fpv4-sp-d16 -mtune=cortex-m4
|
BIN
src/libtf/cortex-m4/libtf.a
Normal file
BIN
src/libtf/cortex-m4/libtf.a
Normal file
Binary file not shown.
61
src/libtf/cortex-m4/libtf.h
Normal file
61
src/libtf/cortex-m4/libtf.h
Normal file
@ -0,0 +1,61 @@
|
||||
/* This file is part of the OpenMV project.
|
||||
* Copyright (c) 2013-2019 Ibrahim Abdelkader <iabdalkader@openmv.io> & Kwabena W. Agyeman <kwagyeman@openmv.io>
|
||||
* This work is licensed under the MIT license, see the file LICENSE for details.
|
||||
*/
|
||||
|
||||
#ifndef __LIBTF_H
|
||||
#define __LIBTF_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
// Call this first to get the shape of the model input.
|
||||
// Returns 0 on success and 1 on failure.
|
||||
// Errors are printed to stdout.
|
||||
int libtf_get_input_data_hwc(const unsigned char *model_data, // TensorFlow Lite binary model (8-bit quant).
|
||||
unsigned char *tensor_arena, // As big as you can make it scratch buffer.
|
||||
const unsigned int tensor_arena_size, // Size of the above scratch buffer.
|
||||
unsigned int *input_height, // Height for the model.
|
||||
unsigned int *input_width, // Width for the model.
|
||||
unsigned int *input_channels); // Channels for the model (1 for grayscale8 and 3 for rgb888).
|
||||
|
||||
// Call this second to get the shape of the model output.
|
||||
// Returns 0 on success and 1 on failure.
|
||||
// Errors are printed to stdout.
|
||||
int libtf_get_output_data_hwc(const unsigned char *model_data, // TensorFlow Lite binary model (8-bit quant).
|
||||
unsigned char *tensor_arena, // As big as you can make it scratch buffer.
|
||||
const unsigned int tensor_arena_size, // Size of the above scratch buffer.
|
||||
unsigned int *output_height, // Height for the model.
|
||||
unsigned int *output_width, // Width for the model.
|
||||
unsigned int *output_channels); // Channels for the model (1 for grayscale8 and 3 for rgb888).
|
||||
|
||||
// Callback to populate the model input data byte array (laid out in [height][width][channel] order).
|
||||
typedef void (*libtf_input_data_callback_t)(void *callback_data,
|
||||
unsigned char *model_input,
|
||||
const unsigned int input_height,
|
||||
const unsigned int input_width,
|
||||
const unsigned int input_channels);
|
||||
|
||||
// Callback to use the model output data byte array (laid out in [height][width][channel] order).
|
||||
typedef void (*libtf_output_data_callback_t)(void *callback_data,
|
||||
unsigned char *model_output,
|
||||
const unsigned int output_height,
|
||||
const unsigned int output_width,
|
||||
const unsigned int output_channels);
|
||||
|
||||
// Returns 0 on success and 1 on failure.
|
||||
// Errors are printed to stdout.
|
||||
int libtf_invoke(const unsigned char *model_data, // TensorFlow Lite binary model (8-bit quant).
|
||||
unsigned char *tensor_arena, // As big as you can make it scratch buffer.
|
||||
const unsigned int tensor_arena_size, // Size of the above scratch buffer.
|
||||
libtf_input_data_callback_t input_callback, // Callback to populate the model input data byte array.
|
||||
void *input_callback_data, // User data structure passed to input callback.
|
||||
libtf_output_data_callback_t output_callback, // Callback to use the model output data byte array.
|
||||
void *output_callback_data); // User data structure passed to output callback.
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // __LIBTF_H
|
BIN
src/libtf/cortex-m4/libtf_person_detect_model_data.a
Normal file
BIN
src/libtf/cortex-m4/libtf_person_detect_model_data.a
Normal file
Binary file not shown.
27
src/libtf/cortex-m4/libtf_person_detect_model_data.h
Normal file
27
src/libtf/cortex-m4/libtf_person_detect_model_data.h
Normal file
@ -0,0 +1,27 @@
|
||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
|
||||
// This is a standard TensorFlow Lite model file that has been converted into a
|
||||
// C data array, so it can be easily compiled into a binary for devices that
|
||||
// don't have a file system. It was created using the command:
|
||||
// xxd -i person_detect.tflite > person_detect_model_data.cc
|
||||
|
||||
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_PERSON_DETECTION_PERSON_DETECT_MODEL_DATA_H_
|
||||
#define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_PERSON_DETECTION_PERSON_DETECT_MODEL_DATA_H_
|
||||
|
||||
extern const unsigned char g_person_detect_model_data[];
|
||||
extern const int g_person_detect_model_data_len;
|
||||
|
||||
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_PERSON_DETECTION_PERSON_DETECT_MODEL_DATA_H_
|
203
src/libtf/cortex-m7/LICENSE
Normal file
203
src/libtf/cortex-m7/LICENSE
Normal file
@ -0,0 +1,203 @@
|
||||
Copyright 2019 The TensorFlow Authors. All rights reserved.
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
5
src/libtf/cortex-m7/README
Normal file
5
src/libtf/cortex-m7/README
Normal file
@ -0,0 +1,5 @@
|
||||
You must link this library to your application with arm-none-eabi-g++ and have implemented puts().
|
||||
|
||||
C Compile Flags: -D __FPU_PRESENT=1 -DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK -DNDEBUG -DTF_LITE_DISABLE_X86_NEON -DTF_LITE_MCU_DEBUG_LOG -DTF_LITE_STATIC_MEMORY -MMD -O3 -Wall -Wextra -Wvla -Wno-missing-field-initializers -Wno-parentheses -Wno-sign-compare -Wno-strict-aliasing -Wno-type-limits -Wno-unused-but-set-variable -Wno-unused-parameter -Wno-unused-variable -Wno-write-strings -fdata-sections -ffunction-sections -fmessage-length=0 -fomit-frame-pointer -funsigned-char -fno-builtin -fno-delete-null-pointer-checks -fno-exceptions -fno-unwind-tables -mfloat-abi=hard -mlittle-endian -mthumb -mno-unaligned-access -nostdlib -Wno-pointer-sign -DARM_CMSIS_NN_M7 -DARM_MATH_CM7 -mcpu=cortex-m7 -mfpu=fpv5-sp-d16 -mtune=cortex-m7
|
||||
|
||||
CXX Compile Flags: -D __FPU_PRESENT=1 -DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK -DNDEBUG -DTF_LITE_DISABLE_X86_NEON -DTF_LITE_MCU_DEBUG_LOG -DTF_LITE_STATIC_MEMORY -MMD -O3 -Wall -Wextra -Wvla -Wno-missing-field-initializers -Wno-parentheses -Wno-sign-compare -Wno-strict-aliasing -Wno-type-limits -Wno-unused-but-set-variable -Wno-unused-parameter -Wno-unused-variable -Wno-write-strings -fdata-sections -ffunction-sections -fmessage-length=0 -fomit-frame-pointer -funsigned-char -fno-builtin -fno-delete-null-pointer-checks -fno-exceptions -fno-unwind-tables -mfloat-abi=hard -mlittle-endian -mthumb -mno-unaligned-access -nostdlib -std=c++11 -std=gnu++11 -fno-rtti -fpermissive -DARM_CMSIS_NN_M7 -DARM_MATH_CM7 -mcpu=cortex-m7 -mfpu=fpv5-sp-d16 -mtune=cortex-m7
|
BIN
src/libtf/cortex-m7/libtf.a
Normal file
BIN
src/libtf/cortex-m7/libtf.a
Normal file
Binary file not shown.
61
src/libtf/cortex-m7/libtf.h
Normal file
61
src/libtf/cortex-m7/libtf.h
Normal file
@ -0,0 +1,61 @@
|
||||
/* This file is part of the OpenMV project.
|
||||
* Copyright (c) 2013-2019 Ibrahim Abdelkader <iabdalkader@openmv.io> & Kwabena W. Agyeman <kwagyeman@openmv.io>
|
||||
* This work is licensed under the MIT license, see the file LICENSE for details.
|
||||
*/
|
||||
|
||||
#ifndef __LIBTF_H
|
||||
#define __LIBTF_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
// Call this first to get the shape of the model input.
|
||||
// Returns 0 on success and 1 on failure.
|
||||
// Errors are printed to stdout.
|
||||
int libtf_get_input_data_hwc(const unsigned char *model_data, // TensorFlow Lite binary model (8-bit quant).
|
||||
unsigned char *tensor_arena, // As big as you can make it scratch buffer.
|
||||
const unsigned int tensor_arena_size, // Size of the above scratch buffer.
|
||||
unsigned int *input_height, // Height for the model.
|
||||
unsigned int *input_width, // Width for the model.
|
||||
unsigned int *input_channels); // Channels for the model (1 for grayscale8 and 3 for rgb888).
|
||||
|
||||
// Call this second to get the shape of the model output.
|
||||
// Returns 0 on success and 1 on failure.
|
||||
// Errors are printed to stdout.
|
||||
int libtf_get_output_data_hwc(const unsigned char *model_data, // TensorFlow Lite binary model (8-bit quant).
|
||||
unsigned char *tensor_arena, // As big as you can make it scratch buffer.
|
||||
const unsigned int tensor_arena_size, // Size of the above scratch buffer.
|
||||
unsigned int *output_height, // Height for the model.
|
||||
unsigned int *output_width, // Width for the model.
|
||||
unsigned int *output_channels); // Channels for the model (1 for grayscale8 and 3 for rgb888).
|
||||
|
||||
// Callback to populate the model input data byte array (laid out in [height][width][channel] order).
|
||||
typedef void (*libtf_input_data_callback_t)(void *callback_data,
|
||||
unsigned char *model_input,
|
||||
const unsigned int input_height,
|
||||
const unsigned int input_width,
|
||||
const unsigned int input_channels);
|
||||
|
||||
// Callback to use the model output data byte array (laid out in [height][width][channel] order).
|
||||
typedef void (*libtf_output_data_callback_t)(void *callback_data,
|
||||
unsigned char *model_output,
|
||||
const unsigned int output_height,
|
||||
const unsigned int output_width,
|
||||
const unsigned int output_channels);
|
||||
|
||||
// Returns 0 on success and 1 on failure.
|
||||
// Errors are printed to stdout.
|
||||
int libtf_invoke(const unsigned char *model_data, // TensorFlow Lite binary model (8-bit quant).
|
||||
unsigned char *tensor_arena, // As big as you can make it scratch buffer.
|
||||
const unsigned int tensor_arena_size, // Size of the above scratch buffer.
|
||||
libtf_input_data_callback_t input_callback, // Callback to populate the model input data byte array.
|
||||
void *input_callback_data, // User data structure passed to input callback.
|
||||
libtf_output_data_callback_t output_callback, // Callback to use the model output data byte array.
|
||||
void *output_callback_data); // User data structure passed to output callback.
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // __LIBTF_H
|
BIN
src/libtf/cortex-m7/libtf_person_detect_model_data.a
Normal file
BIN
src/libtf/cortex-m7/libtf_person_detect_model_data.a
Normal file
Binary file not shown.
27
src/libtf/cortex-m7/libtf_person_detect_model_data.h
Normal file
27
src/libtf/cortex-m7/libtf_person_detect_model_data.h
Normal file
@ -0,0 +1,27 @@
|
||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
|
||||
// This is a standard TensorFlow Lite model file that has been converted into a
|
||||
// C data array, so it can be easily compiled into a binary for devices that
|
||||
// don't have a file system. It was created using the command:
|
||||
// xxd -i person_detect.tflite > person_detect_model_data.cc
|
||||
|
||||
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_PERSON_DETECTION_PERSON_DETECT_MODEL_DATA_H_
|
||||
#define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_PERSON_DETECTION_PERSON_DETECT_MODEL_DATA_H_
|
||||
|
||||
extern const unsigned char g_person_detect_model_data[];
|
||||
extern const int g_person_detect_model_data_len;
|
||||
|
||||
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_EXAMPLES_PERSON_DETECTION_PERSON_DETECT_MODEL_DATA_H_
|
@ -1 +1 @@
|
||||
Subproject commit 00069f12cf48d12f6a84c242cc33966008637dfa
|
||||
Subproject commit 452495f6c51233991eed7ac6b0f9ecd8011892ca
|
@ -105,6 +105,7 @@ SRCS += $(addprefix py/, \
|
||||
py_winc.c \
|
||||
py_cpufreq.c \
|
||||
py_nn.c \
|
||||
py_tf.c \
|
||||
)
|
||||
|
||||
OBJS = $(addprefix $(BUILD)/, $(SRCS:.c=.o))
|
||||
|
@ -83,4 +83,7 @@
|
||||
// Enable LENET (200+ KB).
|
||||
//#define IMLIB_ENABLE_LENET
|
||||
|
||||
// Enable Tensor Flow
|
||||
//#define IMLIB_ENABLE_TF
|
||||
|
||||
#endif //__IMLIB_CONFIG_H__
|
||||
|
@ -128,6 +128,9 @@
|
||||
// Enable LENET (200+ KB).
|
||||
//#define IMLIB_ENABLE_LENET
|
||||
|
||||
// Enable Tensor Flow
|
||||
//#define IMLIB_ENABLE_TF
|
||||
|
||||
// Enable FAST (20+ KBs).
|
||||
//#define IMLIB_ENABLE_FAST
|
||||
|
||||
|
@ -134,6 +134,9 @@
|
||||
// Enable CMSIS NN
|
||||
#define IMLIB_ENABLE_CNN
|
||||
|
||||
// Enable Tensor Flow
|
||||
#define IMLIB_ENABLE_TF
|
||||
|
||||
// Enable FAST (20+ KBs).
|
||||
#define IMLIB_ENABLE_FAST
|
||||
|
||||
|
@ -69,7 +69,7 @@
|
||||
#define OMV_MSC_BUF_SIZE (2K) // USB MSC bot data
|
||||
#define OMV_VFS_BUF_SIZE (1K) // VFS sturct + FATFS file buffer (624 bytes)
|
||||
#define OMV_FFS_BUF_SIZE (32K) // Flash filesystem cache
|
||||
#define OMV_JPEG_BUF_SIZE (23 * 1024) // IDE JPEG buffer (header + data).
|
||||
#define OMV_JPEG_BUF_SIZE (22 * 1024) // IDE JPEG buffer (header + data).
|
||||
|
||||
#define OMV_BOOT_ORIGIN 0x08000000
|
||||
#define OMV_BOOT_LENGTH 32K
|
||||
|
@ -134,6 +134,9 @@
|
||||
// Enable CMSIS NN
|
||||
#define IMLIB_ENABLE_CNN
|
||||
|
||||
// Enable Tensor Flow
|
||||
#define IMLIB_ENABLE_TF
|
||||
|
||||
// Enable FAST (20+ KBs).
|
||||
#define IMLIB_ENABLE_FAST
|
||||
|
||||
|
@ -72,8 +72,8 @@
|
||||
|
||||
#define OMV_FB_SIZE (400K) // FB memory: header + VGA/GS image
|
||||
#define OMV_FB_ALLOC_SIZE (96K) // minimum fb alloc size
|
||||
#define OMV_STACK_SIZE (7K)
|
||||
#define OMV_HEAP_SIZE (240K)
|
||||
#define OMV_STACK_SIZE (8K)
|
||||
#define OMV_HEAP_SIZE (237K)
|
||||
|
||||
#define OMV_LINE_BUF_SIZE (3K) // Image line buffer round(640 * 2BPP * 2 buffers).
|
||||
#define OMV_MSC_BUF_SIZE (12K) // USB MSC bot data
|
||||
|
@ -134,6 +134,9 @@
|
||||
// Enable CMSIS NN
|
||||
#define IMLIB_ENABLE_CNN
|
||||
|
||||
// Enable Tensor Flow
|
||||
#define IMLIB_ENABLE_TF
|
||||
|
||||
// Enable FAST (20+ KBs).
|
||||
#define IMLIB_ENABLE_FAST
|
||||
|
||||
|
@ -75,8 +75,8 @@
|
||||
|
||||
#define OMV_FB_SIZE (30M) // FB memory: header + VGA/GS image
|
||||
#define OMV_FB_ALLOC_SIZE (1M) // minimum fb alloc size
|
||||
#define OMV_STACK_SIZE (7K)
|
||||
#define OMV_HEAP_SIZE (240K)
|
||||
#define OMV_STACK_SIZE (8K)
|
||||
#define OMV_HEAP_SIZE (237K)
|
||||
|
||||
#define OMV_LINE_BUF_SIZE (11K) // Image line buffer round(2592 * 2BPP * 2 buffers).
|
||||
#define OMV_MSC_BUF_SIZE (12K) // USB MSC bot data
|
||||
|
443
src/omv/py/py_tf.c
Normal file
443
src/omv/py/py_tf.c
Normal file
@ -0,0 +1,443 @@
|
||||
/* This file is part of the OpenMV project.
|
||||
* Copyright (c) 2013-2019 Ibrahim Abdelkader <iabdalkader@openmv.io> & Kwabena W. Agyeman <kwagyeman@openmv.io>
|
||||
* This work is licensed under the MIT license, see the file LICENSE for details.
|
||||
*/
|
||||
|
||||
#include <mp.h>
|
||||
#include "py_helper.h"
|
||||
#include "ff_wrapper.h"
|
||||
#include "libtf.h"
|
||||
#include "libtf_person_detect_model_data.h"
|
||||
|
||||
#ifdef IMLIB_ENABLE_TF
|
||||
|
||||
// TF Model Object
|
||||
typedef struct py_tf_model_obj {
|
||||
mp_obj_base_t base;
|
||||
unsigned char *model_data;
|
||||
unsigned int model_data_len, height, width, channels;
|
||||
} py_tf_model_obj_t;
|
||||
|
||||
STATIC void py_tf_model_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind)
|
||||
{
|
||||
py_tf_model_obj_t *self = self_in;
|
||||
mp_printf(print,
|
||||
"{\"len\":%d, \"height\":%d, \"width\":%d, \"channels\":%d}",
|
||||
self->model_data_len,
|
||||
self->height,
|
||||
self->width,
|
||||
self->channels);
|
||||
}
|
||||
|
||||
// TF Classification Object
|
||||
#define py_tf_classification_obj_size 5
|
||||
typedef struct py_tf_classification_obj {
|
||||
mp_obj_base_t base;
|
||||
mp_obj_t x, y, w, h, output;
|
||||
} py_tf_classification_obj_t;
|
||||
|
||||
STATIC void py_tf_classification_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind)
|
||||
{
|
||||
py_tf_classification_obj_t *self = self_in;
|
||||
mp_printf(print,
|
||||
"{\"x\":%d, \"y\":%d, \"w\":%d, \"h\":%d, \"output\":",
|
||||
mp_obj_get_int(self->x),
|
||||
mp_obj_get_int(self->y),
|
||||
mp_obj_get_int(self->w),
|
||||
mp_obj_get_int(self->h));
|
||||
mp_obj_print_helper(print, self->output, kind);
|
||||
mp_printf(print, "}");
|
||||
}
|
||||
|
||||
STATIC mp_obj_t py_tf_classification_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value)
|
||||
{
|
||||
if (value == MP_OBJ_SENTINEL) { // load
|
||||
py_tf_classification_obj_t *self = self_in;
|
||||
if (MP_OBJ_IS_TYPE(index, &mp_type_slice)) {
|
||||
mp_bound_slice_t slice;
|
||||
if (!mp_seq_get_fast_slice_indexes(py_tf_classification_obj_size, index, &slice)) {
|
||||
nlr_raise(mp_obj_new_exception_msg(&mp_type_OSError, "only slices with step=1 (aka None) are supported"));
|
||||
}
|
||||
mp_obj_tuple_t *result = mp_obj_new_tuple(slice.stop - slice.start, NULL);
|
||||
mp_seq_copy(result->items, &(self->x) + slice.start, result->len, mp_obj_t);
|
||||
return result;
|
||||
}
|
||||
switch (mp_get_index(self->base.type, py_tf_classification_obj_size, index, false)) {
|
||||
case 0: return self->x;
|
||||
case 1: return self->y;
|
||||
case 2: return self->w;
|
||||
case 3: return self->h;
|
||||
case 4: return self->output;
|
||||
}
|
||||
}
|
||||
return MP_OBJ_NULL; // op not supported
|
||||
}
|
||||
|
||||
mp_obj_t py_tf_classification_rect(mp_obj_t self_in)
|
||||
{
|
||||
return mp_obj_new_tuple(4, (mp_obj_t []) {((py_tf_classification_obj_t *) self_in)->x,
|
||||
((py_tf_classification_obj_t *) self_in)->y,
|
||||
((py_tf_classification_obj_t *) self_in)->w,
|
||||
((py_tf_classification_obj_t *) self_in)->h});
|
||||
}
|
||||
|
||||
mp_obj_t py_tf_classification_x(mp_obj_t self_in) { return ((py_tf_classification_obj_t *) self_in)->x; }
|
||||
mp_obj_t py_tf_classification_y(mp_obj_t self_in) { return ((py_tf_classification_obj_t *) self_in)->y; }
|
||||
mp_obj_t py_tf_classification_w(mp_obj_t self_in) { return ((py_tf_classification_obj_t *) self_in)->w; }
|
||||
mp_obj_t py_tf_classification_h(mp_obj_t self_in) { return ((py_tf_classification_obj_t *) self_in)->h; }
|
||||
mp_obj_t py_tf_classification_output(mp_obj_t self_in) { return ((py_tf_classification_obj_t *) self_in)->output; }
|
||||
|
||||
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_classification_rect_obj, py_tf_classification_rect);
|
||||
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_classification_x_obj, py_tf_classification_x);
|
||||
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_classification_y_obj, py_tf_classification_y);
|
||||
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_classification_w_obj, py_tf_classification_w);
|
||||
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_classification_h_obj, py_tf_classification_h);
|
||||
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_classification_output_obj, py_tf_classification_output);
|
||||
|
||||
STATIC const mp_rom_map_elem_t py_tf_classification_locals_dict_table[] = {
|
||||
{ MP_ROM_QSTR(MP_QSTR_rect), MP_ROM_PTR(&py_tf_classification_rect_obj) },
|
||||
{ MP_ROM_QSTR(MP_QSTR_x), MP_ROM_PTR(&py_tf_classification_x_obj) },
|
||||
{ MP_ROM_QSTR(MP_QSTR_y), MP_ROM_PTR(&py_tf_classification_y_obj) },
|
||||
{ MP_ROM_QSTR(MP_QSTR_w), MP_ROM_PTR(&py_tf_classification_w_obj) },
|
||||
{ MP_ROM_QSTR(MP_QSTR_h), MP_ROM_PTR(&py_tf_classification_h_obj) },
|
||||
{ MP_ROM_QSTR(MP_QSTR_output), MP_ROM_PTR(&py_tf_classification_output_obj) }
|
||||
};
|
||||
|
||||
STATIC MP_DEFINE_CONST_DICT(py_tf_classification_locals_dict, py_tf_classification_locals_dict_table);
|
||||
|
||||
static const mp_obj_type_t py_tf_classification_type = {
|
||||
{ &mp_type_type },
|
||||
.name = MP_QSTR_tf_classification,
|
||||
.print = py_tf_classification_print,
|
||||
.subscr = py_tf_classification_subscr,
|
||||
.locals_dict = (mp_obj_t) &py_tf_classification_locals_dict
|
||||
};
|
||||
|
||||
static const mp_obj_type_t py_tf_model_type;
|
||||
|
||||
typedef struct py_tf_input_data_callback_data {
|
||||
image_t *img;
|
||||
rectangle_t *roi;
|
||||
} py_tf_input_data_callback_data_t;
|
||||
|
||||
STATIC void py_tf_input_data_callback(void *callback_data,
|
||||
unsigned char *model_input,
|
||||
const unsigned int input_height,
|
||||
const unsigned int input_width,
|
||||
const unsigned int input_channels)
|
||||
{
|
||||
py_tf_input_data_callback_data_t *arg = (py_tf_input_data_callback_data_t *) callback_data;
|
||||
|
||||
float xscale = input_width / ((float) arg->roi->w);
|
||||
float yscale = input_height / ((float) arg->roi->h);
|
||||
// MAX == KeepAspectRationByExpanding - MIN == KeepAspectRatio
|
||||
float scale = IM_MAX(xscale, yscale), scale_inv = 1 / scale;
|
||||
float x_offset = ((arg->roi->w * scale) - input_width) / 2;
|
||||
float y_offset = ((arg->roi->h * scale) - input_height) / 2;
|
||||
|
||||
switch (arg->img->bpp) {
|
||||
case IMAGE_BPP_BINARY: {
|
||||
for (int y = 0, yy = input_height; y < yy; y++) {
|
||||
uint32_t *row_ptr = IMAGE_COMPUTE_BINARY_PIXEL_ROW_PTR(arg->img, fast_floorf((y + y_offset) * scale_inv) + arg->roi->y);
|
||||
int row = input_width * y;
|
||||
for (int x = 0, xx = input_width; x < xx; x++) {
|
||||
int pixel = IMAGE_GET_BINARY_PIXEL_FAST(row_ptr, fast_floorf((x + x_offset) * scale_inv) + arg->roi->x);
|
||||
int index = row + x;
|
||||
switch (input_channels) {
|
||||
case 1: {
|
||||
model_input[index] = COLOR_BINARY_TO_GRAYSCALE(pixel);
|
||||
break;
|
||||
}
|
||||
case 3: {
|
||||
int index_3 = index * 3;
|
||||
pixel = COLOR_BINARY_TO_RGB565(pixel);
|
||||
model_input[index_3 + 0] = COLOR_RGB565_TO_R8(pixel);
|
||||
model_input[index_3 + 1] = COLOR_RGB565_TO_G8(pixel);
|
||||
model_input[index_3 + 2] = COLOR_RGB565_TO_B8(pixel);
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case IMAGE_BPP_GRAYSCALE: {
|
||||
for (int y = 0, yy = input_height; y < yy; y++) {
|
||||
uint8_t *row_ptr = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(arg->img, fast_floorf((y + y_offset) * scale_inv) + arg->roi->y);
|
||||
int row = input_width * y;
|
||||
for (int x = 0, xx = input_width; x < xx; x++) {
|
||||
int pixel = IMAGE_GET_GRAYSCALE_PIXEL_FAST(row_ptr, fast_floorf((x + x_offset) * scale_inv) + arg->roi->x);
|
||||
int index = row + x;
|
||||
switch (input_channels) {
|
||||
case 1: {
|
||||
model_input[index] = pixel;
|
||||
break;
|
||||
}
|
||||
case 3: {
|
||||
int index_3 = index * 3;
|
||||
pixel = COLOR_GRAYSCALE_TO_RGB565(pixel);
|
||||
model_input[index_3 + 0] = COLOR_RGB565_TO_R8(pixel);
|
||||
model_input[index_3 + 1] = COLOR_RGB565_TO_G8(pixel);
|
||||
model_input[index_3 + 2] = COLOR_RGB565_TO_B8(pixel);
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case IMAGE_BPP_RGB565: {
|
||||
for (int y = 0, yy = input_height; y < yy; y++) {
|
||||
uint16_t *row_ptr = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(arg->img, fast_floorf((y + y_offset) * scale_inv) + arg->roi->y);
|
||||
int row = input_width * y;
|
||||
for (int x = 0, xx = input_width; x < xx; x++) {
|
||||
int pixel = IMAGE_GET_RGB565_PIXEL_FAST(row_ptr, fast_floorf((x + x_offset) * scale_inv) + arg->roi->x);
|
||||
int index = row + x;
|
||||
switch (input_channels) {
|
||||
case 1: {
|
||||
model_input[index] = COLOR_RGB565_TO_GRAYSCALE(pixel);
|
||||
break;
|
||||
}
|
||||
case 3: {
|
||||
int index_3 = index * 3;
|
||||
model_input[index_3 + 0] = COLOR_RGB565_TO_R8(pixel);
|
||||
model_input[index_3 + 1] = COLOR_RGB565_TO_G8(pixel);
|
||||
model_input[index_3 + 2] = COLOR_RGB565_TO_B8(pixel);
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
typedef struct py_tf_classify_output_data_callback_data {
|
||||
mp_obj_t out;
|
||||
} py_tf_classify_output_data_callback_data_t;
|
||||
|
||||
STATIC void py_tf_classify_output_data_callback(void *callback_data,
|
||||
unsigned char *model_output,
|
||||
const unsigned int output_height,
|
||||
const unsigned int output_width,
|
||||
const unsigned int output_channels)
|
||||
{
|
||||
py_tf_classify_output_data_callback_data_t *arg = (py_tf_classify_output_data_callback_data_t *) callback_data;
|
||||
|
||||
PY_ASSERT_TRUE_MSG(output_height == 1, "Expected model output height to be 1!");
|
||||
PY_ASSERT_TRUE_MSG(output_width == 1, "Expected model output width to be 1!");
|
||||
|
||||
arg->out = mp_obj_new_list(output_channels, NULL);
|
||||
for (unsigned int i = 0; i < output_channels; i++) {
|
||||
((mp_obj_list_t *) arg->out)->items[i] = mp_obj_new_float(model_output[i] / 255.0f);
|
||||
}
|
||||
}
|
||||
|
||||
STATIC mp_obj_t py_tf_classify(uint n_args, const mp_obj_t *args, mp_map_t *kw_args)
|
||||
{
|
||||
py_tf_model_obj_t *arg_model;
|
||||
image_t *arg_img = py_helper_arg_to_image_mutable(args[1]);
|
||||
|
||||
rectangle_t roi;
|
||||
py_helper_keyword_rectangle_roi(arg_img, n_args, args, 2, kw_args, &roi);
|
||||
|
||||
float arg_min_scale = py_helper_keyword_float(n_args, args, 4, kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_min_scale), 1.0f);
|
||||
PY_ASSERT_TRUE_MSG((0.0f < arg_min_scale) && (arg_min_scale <= 1.0f), "0 < min_scale <= 1");
|
||||
|
||||
float arg_scale_mul = py_helper_keyword_float(n_args, args, 5, kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_scale_mul), 0.5f);
|
||||
PY_ASSERT_TRUE_MSG((0.0f <= arg_scale_mul) && (arg_scale_mul < 1.0f), "0 <= scale_mul < 1");
|
||||
|
||||
float arg_x_overlap = py_helper_keyword_float(n_args, args, 6, kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_x_overlap), 0.0f);
|
||||
PY_ASSERT_TRUE_MSG(((0.0f <= arg_x_overlap) && (arg_x_overlap < 1.0f)) || (arg_x_overlap == -1.0f), "0 <= x_overlap < 1");
|
||||
|
||||
float arg_y_overlap = py_helper_keyword_float(n_args, args, 7, kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_y_overlap), 0.0f);
|
||||
PY_ASSERT_TRUE_MSG(((0.0f <= arg_y_overlap) && (arg_y_overlap < 1.0f)) || (arg_y_overlap == -1.0f), "0 <= y_overlap < 1");
|
||||
|
||||
fb_alloc_mark();
|
||||
|
||||
if (MP_OBJ_IS_TYPE(args[0], &py_tf_model_type)) {
|
||||
arg_model = (py_tf_model_obj_t *) args[0];
|
||||
} else {
|
||||
const char *path = mp_obj_str_get_str(args[0]);
|
||||
arg_model = m_new_obj(py_tf_model_obj_t);
|
||||
arg_model->base.type = &py_tf_model_type;
|
||||
|
||||
if (!strcmp(path, "person_detection")) {
|
||||
arg_model->model_data = (unsigned char *) g_person_detect_model_data;
|
||||
arg_model->model_data_len = g_person_detect_model_data_len;
|
||||
} else {
|
||||
FIL fp;
|
||||
file_read_open(&fp, path);
|
||||
arg_model->model_data_len = f_size(&fp);
|
||||
arg_model->model_data = fb_alloc(arg_model->model_data_len, FB_ALLOC_NO_HINT);
|
||||
read_data(&fp, arg_model->model_data, arg_model->model_data_len);
|
||||
file_close(&fp);
|
||||
}
|
||||
|
||||
uint32_t tensor_arena_size;
|
||||
uint8_t *tensor_arena = fb_alloc_all(&tensor_arena_size, FB_ALLOC_PREFER_SIZE);
|
||||
|
||||
PY_ASSERT_FALSE_MSG(libtf_get_input_data_hwc(arg_model->model_data,
|
||||
tensor_arena,
|
||||
tensor_arena_size,
|
||||
&arg_model->height,
|
||||
&arg_model->width,
|
||||
&arg_model->channels),
|
||||
"Unable to read model height, width, and channels!");
|
||||
|
||||
fb_free(); // Free just the fb_alloc_all() - not fb_alloc() above.
|
||||
}
|
||||
|
||||
uint32_t tensor_arena_size;
|
||||
uint8_t *tensor_arena = fb_alloc_all(&tensor_arena_size, FB_ALLOC_PREFER_SIZE);
|
||||
|
||||
mp_obj_t objects_list = mp_obj_new_list(0, NULL);
|
||||
|
||||
for (float scale = 1.0f; scale >= arg_min_scale; scale *= arg_scale_mul) {
|
||||
// Either provide a subtle offset to center multiple detection windows or center the only detection window.
|
||||
for (int y = roi.y + ((arg_y_overlap != -1.0f) ? (fmodf(roi.h, (roi.h * scale)) / 2.0f) : ((roi.h - (roi.h * scale)) / 2.0f));
|
||||
// Finish when the detection window is outside of the ROI.
|
||||
(y + (roi.h * scale)) <= (roi.y + roi.h);
|
||||
// Step by an overlap amount accounting for scale or just terminate after one iteration.
|
||||
y += ((arg_y_overlap != -1.0f) ? (roi.h * scale * (1.0f - arg_y_overlap)) : roi.h)) {
|
||||
// Either provide a subtle offset to center multiple detection windows or center the only detection window.
|
||||
for (int x = roi.x + ((arg_x_overlap != -1.0f) ? (fmodf(roi.w, (roi.w * scale)) / 2.0f) : ((roi.w - (roi.w * scale)) / 2.0f));
|
||||
// Finish when the detection window is outside of the ROI.
|
||||
(x + (roi.w * scale)) <= (roi.x + roi.w);
|
||||
// Step by an overlap amount accounting for scale or just terminate after one iteration.
|
||||
x += ((arg_x_overlap != -1.0f) ? (roi.w * scale * (1.0f - arg_x_overlap)) : roi.w)) {
|
||||
|
||||
rectangle_t new_roi;
|
||||
rectangle_init(&new_roi, x, y, roi.w * scale, roi.h * scale);
|
||||
|
||||
if (rectangle_overlap(&roi, &new_roi)) { // Check if new_roi is null...
|
||||
|
||||
py_tf_input_data_callback_data_t py_tf_input_data_callback_data;
|
||||
py_tf_input_data_callback_data.img = arg_img;
|
||||
py_tf_input_data_callback_data.roi = &new_roi;
|
||||
|
||||
py_tf_classify_output_data_callback_data_t py_tf_classify_output_data_callback_data;
|
||||
|
||||
PY_ASSERT_FALSE_MSG(libtf_invoke(arg_model->model_data,
|
||||
tensor_arena,
|
||||
tensor_arena_size,
|
||||
py_tf_input_data_callback,
|
||||
&py_tf_input_data_callback_data,
|
||||
py_tf_classify_output_data_callback,
|
||||
&py_tf_classify_output_data_callback_data),
|
||||
"Model classification failed!");
|
||||
|
||||
py_tf_classification_obj_t *o = m_new_obj(py_tf_classification_obj_t);
|
||||
o->base.type = &py_tf_classification_type;
|
||||
o->x = mp_obj_new_int(new_roi.x);
|
||||
o->y = mp_obj_new_int(new_roi.y);
|
||||
o->w = mp_obj_new_int(new_roi.w);
|
||||
o->h = mp_obj_new_int(new_roi.h);
|
||||
o->output = py_tf_classify_output_data_callback_data.out;
|
||||
mp_obj_list_append(objects_list, o);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fb_alloc_free_till_mark();
|
||||
|
||||
return objects_list;
|
||||
}
|
||||
STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_tf_classify_obj, 2, py_tf_classify);
|
||||
|
||||
mp_obj_t py_tf_len(mp_obj_t self_in) { return mp_obj_new_int(((py_tf_model_obj_t *) self_in)->model_data_len); }
|
||||
mp_obj_t py_tf_height(mp_obj_t self_in) { return mp_obj_new_int(((py_tf_model_obj_t *) self_in)->height); }
|
||||
mp_obj_t py_tf_width(mp_obj_t self_in) { return mp_obj_new_int(((py_tf_model_obj_t *) self_in)->width); }
|
||||
mp_obj_t py_tf_channels(mp_obj_t self_in) { return mp_obj_new_int(((py_tf_model_obj_t *) self_in)->channels); }
|
||||
|
||||
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_len_obj, py_tf_len);
|
||||
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_height_obj, py_tf_height);
|
||||
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_width_obj, py_tf_width);
|
||||
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_channels_obj, py_tf_channels);
|
||||
|
||||
STATIC const mp_rom_map_elem_t locals_dict_table[] = {
|
||||
{ MP_ROM_QSTR(MP_QSTR_len), MP_ROM_PTR(&py_tf_len_obj) },
|
||||
{ MP_ROM_QSTR(MP_QSTR_height), MP_ROM_PTR(&py_tf_height_obj) },
|
||||
{ MP_ROM_QSTR(MP_QSTR_width), MP_ROM_PTR(&py_tf_width_obj) },
|
||||
{ MP_ROM_QSTR(MP_QSTR_channels), MP_ROM_PTR(&py_tf_channels_obj) },
|
||||
{ MP_ROM_QSTR(MP_QSTR_classify), MP_ROM_PTR(&py_tf_classify_obj) }
|
||||
};
|
||||
|
||||
STATIC MP_DEFINE_CONST_DICT(locals_dict, locals_dict_table);
|
||||
|
||||
STATIC const mp_obj_type_t py_tf_model_type = {
|
||||
{ &mp_type_type },
|
||||
.name = MP_QSTR_tf_model,
|
||||
.print = py_tf_model_print,
|
||||
.locals_dict = (mp_obj_t) &locals_dict
|
||||
};
|
||||
|
||||
STATIC mp_obj_t py_tf_load(mp_obj_t path_obj)
|
||||
{
|
||||
const char *path = mp_obj_str_get_str(path_obj);
|
||||
py_tf_model_obj_t *tf_model = m_new_obj(py_tf_model_obj_t);
|
||||
tf_model->base.type = &py_tf_model_type;
|
||||
|
||||
if (!strcmp(path, "person_detection")) {
|
||||
tf_model->model_data = (unsigned char *) g_person_detect_model_data;
|
||||
tf_model->model_data_len = g_person_detect_model_data_len;
|
||||
} else {
|
||||
FIL fp;
|
||||
file_read_open(&fp, path);
|
||||
tf_model->model_data_len = f_size(&fp);
|
||||
tf_model->model_data = xalloc(tf_model->model_data_len);
|
||||
read_data(&fp, tf_model->model_data, tf_model->model_data_len);
|
||||
file_close(&fp);
|
||||
}
|
||||
|
||||
fb_alloc_mark();
|
||||
uint32_t tensor_arena_size;
|
||||
uint8_t *tensor_arena = fb_alloc_all(&tensor_arena_size, FB_ALLOC_PREFER_SIZE);
|
||||
|
||||
PY_ASSERT_FALSE_MSG(libtf_get_input_data_hwc(tf_model->model_data,
|
||||
tensor_arena,
|
||||
tensor_arena_size,
|
||||
&tf_model->height,
|
||||
&tf_model->width,
|
||||
&tf_model->channels),
|
||||
"Unable to read model height, width, and channels!");
|
||||
|
||||
fb_alloc_free_till_mark();
|
||||
|
||||
return tf_model;
|
||||
}
|
||||
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_load_obj, py_tf_load);
|
||||
|
||||
#endif // IMLIB_ENABLE_TF
|
||||
|
||||
STATIC const mp_rom_map_elem_t globals_dict_table[] = {
|
||||
{ MP_ROM_QSTR(MP_QSTR___name__), MP_OBJ_NEW_QSTR(MP_QSTR_tf) },
|
||||
#ifdef IMLIB_ENABLE_TF
|
||||
{ MP_ROM_QSTR(MP_QSTR_load), MP_ROM_PTR(&py_tf_load_obj) },
|
||||
{ MP_ROM_QSTR(MP_QSTR_classify), MP_ROM_PTR(&py_tf_classify_obj) },
|
||||
#else
|
||||
{ MP_ROM_QSTR(MP_QSTR_load), MP_ROM_PTR(&py_func_unavailable_obj) },
|
||||
{ MP_ROM_QSTR(MP_QSTR_classify), MP_ROM_PTR(&py_func_unavailable_obj) }
|
||||
#endif // IMLIB_ENABLE_TF
|
||||
};
|
||||
|
||||
STATIC MP_DEFINE_CONST_DICT(globals_dict, globals_dict_table);
|
||||
|
||||
const mp_obj_module_t tf_module = {
|
||||
.base = { &mp_type_module },
|
||||
.globals = (mp_obj_t) &globals_dict
|
||||
};
|
@ -1174,3 +1174,30 @@ Q(draw_ir)
|
||||
// duplicate Q(scale)
|
||||
Q(pixformat)
|
||||
// duplciate Q(copy_to_fb)
|
||||
|
||||
// TensorFlow Module
|
||||
Q(tf)
|
||||
// duplicate Q(load)
|
||||
Q(classify)
|
||||
// Model Object
|
||||
Q(tf_model)
|
||||
// duplicate Q(len)
|
||||
// duplicate Q(height)
|
||||
// duplicate Q(width)
|
||||
Q(channels)
|
||||
|
||||
// Classify
|
||||
// duplicate Q(classify)
|
||||
// duplicate Q(roi)
|
||||
// duplicate Q(min_scale)
|
||||
// duplicate Q(scale_mul)
|
||||
// duplicate Q(x_overlap)
|
||||
// duplicate Q(y_overlap)
|
||||
|
||||
// Class Object
|
||||
Q(tf_classification)
|
||||
// duplicate Q(x)
|
||||
// duplicate Q(y)
|
||||
// duplicate Q(w)
|
||||
// duplicate Q(h)
|
||||
Q(output)
|
||||
|
Loading…
Reference in New Issue
Block a user