Merge pull request #1035 from openmv/remove_outdated_code

Remove outdated CMSIS-NN code.
This commit is contained in:
Ibrahim Abd Elkader 2020-12-16 22:33:04 +02:00 committed by GitHub
commit a8a8a268c9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 1 additions and 1198 deletions

View File

@ -160,7 +160,6 @@ endif
OMV_CFLAGS += -I$(TOP_DIR)/$(OMV_DIR)/
OMV_CFLAGS += -I$(TOP_DIR)/$(OMV_DIR)/py/
OMV_CFLAGS += -I$(TOP_DIR)/$(OMV_DIR)/nn/
OMV_CFLAGS += -I$(TOP_DIR)/$(OMV_DIR)/img/
OMV_CFLAGS += -I$(OMV_BOARD_CONFIG_DIR)
OMV_CFLAGS += -I$(TOP_DIR)/$(LEPTON_DIR)/include/
@ -303,10 +302,6 @@ FIRM_OBJ += $(addprefix $(BUILD)/$(OMV_DIR)/img/,\
selective_search.o \
)
FIRM_OBJ += $(addprefix $(BUILD)/$(OMV_DIR)/nn/,\
nn.o \
)
FIRM_OBJ += $(addprefix $(BUILD)/$(OMV_DIR)/py/, \
py_helper.o \
py_omv.o \
@ -322,7 +317,6 @@ FIRM_OBJ += $(addprefix $(BUILD)/$(OMV_DIR)/py/, \
py_mjpeg.o \
py_winc.o \
py_cpufreq.o \
py_nn.o \
py_tf.o \
py_imu.o \
py_audio.o \

@ -1 +1 @@
Subproject commit 43e91cacbb58da82f9faa3e5dcba6ad97856984f
Subproject commit ccd8cd3b4fc684500d56189c081056215aeddca7

View File

@ -85,11 +85,6 @@ SRCS += $(addprefix img/, \
selective_search.c \
)
SRCS += $(addprefix nn/, \
nn.c \
)
SRCS += $(addprefix py/, \
py_helper.c \
py_omv.c \
@ -105,7 +100,6 @@ SRCS += $(addprefix py/, \
py_mjpeg.c \
py_winc.c \
py_cpufreq.c \
py_nn.c \
py_tf.c \
py_imu.c \
py_audio.c \

View File

@ -1,659 +0,0 @@
/*
* This file is part of the OpenMV project.
*
* Copyright (c) 2013-2019 Ibrahim Abdelkader <iabdalkader@openmv.io>
* Copyright (c) 2013-2019 Kwabena W. Agyeman <kwagyeman@openmv.io>
*
* This work is licensed under the MIT license, see the file LICENSE for details.
*
* CNN code.
*/
#include <stdio.h>
#include "nn.h"
#include "imlib.h"
#include "common.h"
#include "ff_wrapper.h"
#include "arm_math.h"
#include "arm_nnfunctions.h"
#include "omv_boardconfig.h"
#ifdef IMLIB_ENABLE_CNN
static const char *layer_to_str(layer_type_t type)
{
static const char *layers[] = {
"DATA", "CONV", "RELU", "POOL", "IP"
};
if (type > sizeof(layers)/sizeof(layers[0])) {
return "Unknown layer";
} else {
return layers[type];
}
}
int nn_dump_network(nn_t *net)
{
layer_t *layer = net->layers;
printf("Net type: %4s Num layers: %lu Max layer: %lu Max col buf: %lu Max scratch buf: %lu\n",
net->type, net->n_layers, net->max_layer_size, net->max_colbuf_size, net->max_scrbuf_size);
while (layer != NULL) {
printf("Layer: %s Shape: [%lu, %lu, %lu, %lu] ",
layer_to_str(layer->type), layer->n, layer->c, layer->h, layer->w);
switch (layer->type) {
case LAYER_TYPE_DATA: {
data_layer_t *data_layer = (data_layer_t *) layer;
printf("r_mean: %lu g_mean: %lu b_mean: %lu scale: %lu\n",
data_layer->r_mean, data_layer->g_mean, data_layer->b_mean, data_layer->scale);
break;
}
case LAYER_TYPE_CONV: {
conv_layer_t *conv_layer = (conv_layer_t *) layer;
printf("l_shift: %lu r_shift:%lu k_size: %lu k_stride: %lu k_padding: %lu\n",
conv_layer->l_shift, conv_layer->r_shift,
conv_layer->krn_dim, conv_layer->krn_str, conv_layer->krn_pad);
break;
}
case LAYER_TYPE_RELU: {
// Nothing to read for RELU layer
printf("\n");
//relu_layer_t *relu_layer = layer;
break;
}
case LAYER_TYPE_POOL: {
pool_layer_t *pool_layer = (pool_layer_t *) layer;
printf("k_size: %lu k_stride: %lu k_padding: %lu\n",
pool_layer->krn_dim, pool_layer->krn_str, pool_layer->krn_pad);
break;
}
case LAYER_TYPE_IP: {
ip_layer_t *ip_layer = (ip_layer_t*) layer;
printf("l_shift: %lu r_shift:%lu\n", ip_layer->l_shift, ip_layer->r_shift);
break;
}
}
layer = layer->next;
}
return 0;
}
int nn_load_network(nn_t *net, const char *path)
{
FIL fp;
int res = 0;
file_read_open(&fp, path);
file_buffer_on(&fp);
// Read network type
read_data(&fp, net->type, 4);
// Read number of layers
read_data(&fp, &net->n_layers, 4);
layer_t *prev_layer = NULL;
for (int i=0; i<net->n_layers; i++) {
layer_t *layer;
uint32_t layer_type;
// Read layer type
read_data(&fp, &layer_type, 4);
switch (layer_type) {
case LAYER_TYPE_DATA:
layer = xalloc0(sizeof(data_layer_t));
break;
case LAYER_TYPE_CONV:
layer = xalloc0(sizeof(conv_layer_t));
break;
case LAYER_TYPE_RELU:
layer = xalloc0(sizeof(relu_layer_t));
break;
case LAYER_TYPE_POOL:
layer = xalloc0(sizeof(pool_layer_t));
break;
case LAYER_TYPE_IP:
layer = xalloc0(sizeof(ip_layer_t));
break;
default:
res = -1;
goto error;
}
if (prev_layer == NULL) { // First layer
net->layers = layer;
} else {
layer->prev = prev_layer;
prev_layer->next = layer;
}
prev_layer = layer;
// Set type
layer->type = layer_type;
// Read layer shape (NCHW)
read_data(&fp, &layer->n, 4);
read_data(&fp, &layer->c, 4);
read_data(&fp, &layer->h, 4);
read_data(&fp, &layer->w, 4);
switch (layer_type) {
case LAYER_TYPE_DATA: {
data_layer_t *data_layer = (data_layer_t *) layer;
// Read data layer R, G, B mean and input scale
read_data(&fp, &data_layer->r_mean, 4);
read_data(&fp, &data_layer->g_mean, 4);
read_data(&fp, &data_layer->b_mean, 4);
read_data(&fp, &data_layer->scale, 4);
break;
}
case LAYER_TYPE_CONV: {
conv_layer_t *conv_layer = (conv_layer_t *) layer;
// Read layer l_shift, r_shift
read_data(&fp, &conv_layer->l_shift, 4);
read_data(&fp, &conv_layer->r_shift, 4);
// Read krnel dim, stride and padding
read_data(&fp, &conv_layer->krn_dim, 4);
read_data(&fp, &conv_layer->krn_pad, 4);
read_data(&fp, &conv_layer->krn_str, 4);
// Alloc and read weights array
read_data(&fp, &conv_layer->w_size, 4);
conv_layer->wt = xalloc(conv_layer->w_size);
read_data(&fp, conv_layer->wt, conv_layer->w_size);
// Alloc and read bias array
read_data(&fp, &conv_layer->b_size, 4);
conv_layer->bias = xalloc(conv_layer->b_size);
read_data(&fp, conv_layer->bias, conv_layer->b_size);
break;
}
case LAYER_TYPE_RELU: {
// Nothing to read for RELU layer
break;
}
case LAYER_TYPE_POOL: {
pool_layer_t *pool_layer = (pool_layer_t *) layer;
// Read pooling layer type
read_data(&fp, &pool_layer->ptype, 4);
// Read krnel dim, stride and padding
read_data(&fp, &pool_layer->krn_dim, 4);
read_data(&fp, &pool_layer->krn_pad, 4);
read_data(&fp, &pool_layer->krn_str, 4);
break;
}
case LAYER_TYPE_IP: {
ip_layer_t *ip_layer = (ip_layer_t *) layer;
// Read layer l_shift, r_shift
read_data(&fp, &ip_layer->l_shift, 4);
read_data(&fp, &ip_layer->r_shift, 4);
// Alloc and read weights array
read_data(&fp, &ip_layer->w_size, 4);
ip_layer->wt = xalloc(ip_layer->w_size);
read_data(&fp, ip_layer->wt, ip_layer->w_size);
// Alloc and read bias array
read_data(&fp, &ip_layer->b_size, 4);
ip_layer->bias = xalloc(ip_layer->b_size);
read_data(&fp, ip_layer->bias, ip_layer->b_size);
break;
}
}
}
layer_t *layer = net->layers;
while (layer != NULL) {
// First layer is DATA will be skipped, so prev_layer *should* not be NULL.
prev_layer = layer->prev;
if (layer->type == LAYER_TYPE_IP) {
uint32_t fc_buffer_size = 2 * prev_layer->c * prev_layer->w * prev_layer->h;
net->max_colbuf_size = IM_MAX(net->max_colbuf_size, fc_buffer_size);
}
if (layer->type == LAYER_TYPE_CONV) {
conv_layer_t *conv_layer = (conv_layer_t *) layer;
uint32_t im2col_buffer_size = 2 * 2 * conv_layer->c * conv_layer->krn_dim * conv_layer->krn_dim;
net->max_colbuf_size = IM_MAX(net->max_colbuf_size, im2col_buffer_size);
}
if (layer->type == LAYER_TYPE_IP) {
uint32_t buffer_size = layer->c;
if (prev_layer->type == LAYER_TYPE_IP) {
buffer_size = buffer_size + prev_layer->c;
} else if (prev_layer->type == LAYER_TYPE_CONV || prev_layer->type == LAYER_TYPE_POOL) {
buffer_size = buffer_size + prev_layer->c * prev_layer->h * prev_layer->w;
}
net->max_scrbuf_size = IM_MAX(net->max_scrbuf_size, buffer_size);
}
if (layer->type == LAYER_TYPE_CONV || layer->type == LAYER_TYPE_POOL) {
uint32_t buffer_size = layer->c * layer->h * layer->w + prev_layer->c * prev_layer->h * prev_layer->w;
net->max_scrbuf_size = IM_MAX(net->max_scrbuf_size, buffer_size);
}
uint32_t layer_size = layer->c * layer->h * layer->w;
net->max_layer_size = IM_MAX(net->max_layer_size, layer_size);
if (layer->next == NULL) {
net->output_size = layer->c;
}
layer = layer->next;
}
// Alloc output buffer.
net->output_data = xalloc(net->output_size);
error:
file_buffer_off(&fp);
file_close(&fp);
return res;
}
#ifndef __SSAT
#define __SSAT(a, b) ({ __typeof__ (a) _a = (a); \
__typeof__ (b) _b = (b); \
_b = 1 << (_b - 1); \
_a = _a < (_b - 1) ? _a : (_b - 1); \
_a > (-_b) ? _a : (-_b); })
#endif
void nn_transform_input(data_layer_t *data_layer, image_t *img, q7_t *input_data, rectangle_t *roi)
{
int input_scale = data_layer->scale;
// Scale, convert and normalize input image.
int x_ratio = (int)((roi->w<<16)/data_layer->w)+1;
int y_ratio = (int)((roi->h<<16)/data_layer->h)+1;
if ((img->bpp == 2) && (data_layer->c == 3)) { // RGB565 to RGB888
for (int y=0, i=0; y<data_layer->h; y++) {
int sy = (y*y_ratio)>>16;
for (int x=0; x<data_layer->w; x++, i+=3) {
int sx = (x*x_ratio)>>16;
uint16_t p = IM_GET_RGB565_PIXEL(img, sx+roi->x, sy+roi->y);
input_data[i+0] = (q7_t)__SSAT((((((int) COLOR_RGB565_TO_R8(p))
- (int) data_layer->r_mean)<<7) + (1<<(input_scale-1))) >> input_scale, 8);
input_data[i+1] = (q7_t)__SSAT((((((int) COLOR_RGB565_TO_G8(p))
- (int) data_layer->g_mean)<<7) + (1<<(input_scale-1))) >> input_scale, 8);
input_data[i+2] = (q7_t)__SSAT((((((int) COLOR_RGB565_TO_B8(p))
- (int) data_layer->b_mean)<<7) + (1<<(input_scale-1))) >> input_scale, 8);
}
}
} else if ((img->bpp == 2) && (data_layer->c == 1)) { // RGB565 to GS
for (int y=0, i=0; y<data_layer->h; y++) {
int sy = (y*y_ratio)>>16;
for (int x=0; x<data_layer->w; x++, i++) {
int sx = (x*x_ratio)>>16;
uint16_t p = IM_GET_RGB565_PIXEL(img, sx+roi->x, sy+roi->y);
input_data[i] = (q7_t)__SSAT((((((int) COLOR_RGB565_TO_GRAYSCALE(p))
- (int) data_layer->r_mean)<<7) + (1<<(input_scale-1))) >> input_scale, 8);
}
}
} else if ((img->bpp == 1) && (data_layer->c == 3)) { // GS to RGB88
int mean = (int) ((0.30f * data_layer->r_mean) +
(0.59f * data_layer->g_mean) +
(0.11f * data_layer->b_mean));
for (int y=0, i=0; y<data_layer->h; y++) {
int sy = (y*y_ratio)>>16;
for (int x=0; x<data_layer->w; x++, i+=3) {
int sx = (x*x_ratio)>>16;
int p = (int) IMAGE_GET_GRAYSCALE_PIXEL(img, sx+roi->x, sy+roi->y);
input_data[i+0] = (q7_t)__SSAT((((p - (int) mean)<<7) + (1<<(input_scale-1))) >> input_scale, 8);
input_data[i+1] = (q7_t)__SSAT((((p - (int) mean)<<7) + (1<<(input_scale-1))) >> input_scale, 8);
input_data[i+2] = (q7_t)__SSAT((((p - (int) mean)<<7) + (1<<(input_scale-1))) >> input_scale, 8);
}
}
} else if ((img->bpp == 1) && (data_layer->c == 1)) { // GS to GS
for (int y=0, i=0; y<data_layer->h; y++) {
int sy = (y*y_ratio)>>16;
for (int x=0; x<data_layer->w; x++, i++) {
int sx = (x*x_ratio)>>16;
int p = (int) IMAGE_GET_GRAYSCALE_PIXEL(img, sx+roi->x, sy+roi->y);
input_data[i] = (q7_t)__SSAT((((p - (int) data_layer->r_mean)<<7) + (1<<(input_scale-1))) >> input_scale, 8);
}
}
} else if ((img->bpp == 0) && (data_layer->c == 3)) { // BINARY to RGB88
int mean = (int) ((0.30f * data_layer->r_mean) +
(0.59f * data_layer->g_mean) +
(0.11f * data_layer->b_mean));
for (int y=0, i=0; y<data_layer->h; y++) {
int sy = (y*y_ratio)>>16;
for (int x=0; x<data_layer->w; x++, i+=3) {
int sx = (x*x_ratio)>>16;
int p = (int) COLOR_BINARY_TO_GRAYSCALE(IMAGE_GET_BINARY_PIXEL(img, sx+roi->x, sy+roi->y));
input_data[i+0] = (q7_t)__SSAT((((p - (int) mean)<<7) + (1<<(input_scale-1))) >> input_scale, 8);
input_data[i+1] = (q7_t)__SSAT((((p - (int) mean)<<7) + (1<<(input_scale-1))) >> input_scale, 8);
input_data[i+2] = (q7_t)__SSAT((((p - (int) mean)<<7) + (1<<(input_scale-1))) >> input_scale, 8);
}
}
} else if ((img->bpp == 0) && (data_layer->c == 1)) { // BINARY to GS
for (int y=0, i=0; y<data_layer->h; y++) {
int sy = (y*y_ratio)>>16;
for (int x=0; x<data_layer->w; x++, i++) {
int sx = (x*x_ratio)>>16;
int p = (int) COLOR_BINARY_TO_GRAYSCALE(IMAGE_GET_BINARY_PIXEL(img, sx+roi->x, sy+roi->y));
input_data[i] = (q7_t)__SSAT((((p - (int) data_layer->r_mean)<<7) + (1<<(input_scale-1))) >> input_scale, 8);
}
}
}
}
int nn_run_network(nn_t *net, image_t *img, rectangle_t *roi, bool softmax)
{
uint32_t layer_idx = 0;
layer_t *layer = net->layers;
if (layer == NULL) {
printf("First layer is NULL!\n");
return -1;
}
if (layer->type != LAYER_TYPE_DATA) {
printf("First layer is not a DATA layer!\n");
return -1;
}
q7_t *input_data = NULL;
q7_t *input_buffer = NULL;
q7_t *output_buffer = NULL;
fb_alloc_mark();
q7_t *buffer1 = fb_alloc(net->max_scrbuf_size, FB_ALLOC_NO_HINT);
q7_t *buffer2 = buffer1 + net->max_layer_size;
q7_t *col_buffer = fb_alloc(net->max_colbuf_size, FB_ALLOC_NO_HINT);
while (layer != NULL) {
layer_t *prev_layer = layer->prev;
switch (layer->type) {
case LAYER_TYPE_DATA: {
data_layer_t *data_layer = (data_layer_t *) layer;
input_data = fb_alloc(data_layer->c * data_layer->h * data_layer->w, FB_ALLOC_NO_HINT);
nn_transform_input(data_layer, img, input_data, roi);
// Set image data as input buffer for the next layer.
input_buffer = input_data;
output_buffer = buffer1;
break;
}
case LAYER_TYPE_CONV: {
conv_func_t conv_func = NULL;
conv_func_nonsquare_t conv_func_nonsquare = NULL;
conv_layer_t *conv_layer = (conv_layer_t *) layer;
if (prev_layer->c % 4 != 0 ||
conv_layer->n % 2 != 0 || prev_layer->h % 2 != 0) {
if (prev_layer->c == 3) {
conv_func = arm_convolve_HWC_q7_RGB;
} else if (prev_layer->w == prev_layer->h) {
conv_func = arm_convolve_HWC_q7_basic;
} else {
conv_func_nonsquare = arm_convolve_HWC_q7_basic_nonsquare;
}
} else {
if (prev_layer->w == prev_layer->h) {
conv_func = arm_convolve_HWC_q7_fast;
} else {
conv_func_nonsquare = arm_convolve_HWC_q7_fast_nonsquare;
}
}
if (conv_func) {
conv_func(input_buffer, prev_layer->h, prev_layer->c, conv_layer->wt, conv_layer->c,
conv_layer->krn_dim, conv_layer->krn_pad, conv_layer->krn_str, conv_layer->bias,
conv_layer->l_shift, conv_layer->r_shift, output_buffer, conv_layer->h, (q15_t*)col_buffer, NULL);
} else {
conv_func_nonsquare(input_buffer, prev_layer->w, prev_layer->h, prev_layer->c, conv_layer->wt, conv_layer->c,
conv_layer->krn_dim, conv_layer->krn_dim, conv_layer->krn_pad, conv_layer->krn_pad, conv_layer->krn_str,
conv_layer->krn_str, conv_layer->bias, conv_layer->l_shift, conv_layer->r_shift, output_buffer,
conv_layer->w, conv_layer->h, (q15_t*)col_buffer, NULL);
}
break;
}
case LAYER_TYPE_RELU: {
relu_layer_t *relu_layer = (relu_layer_t *) layer;
arm_relu_q7(input_buffer, relu_layer->h * relu_layer->w * relu_layer->c);
break;
}
case LAYER_TYPE_POOL: {
pool_func_t pool_func = NULL;
pool_func_nonsquare_t pool_func_nonsquare = NULL;
pool_layer_t *pool_layer = (pool_layer_t *) layer;
if (pool_layer->ptype == POOL_TYPE_MAX) {
if (prev_layer->w == prev_layer->h) {
pool_func = arm_maxpool_q7_HWC;
} else {
pool_func_nonsquare = arm_maxpool_q7_HWC_nonsquare;
}
} else {
if (prev_layer->w == prev_layer->h) {
pool_func = arm_avepool_q7_HWC;
} else {
pool_func_nonsquare = arm_avepool_q7_HWC_nonsquare;
}
}
if (pool_func) {
pool_func(input_buffer, prev_layer->h, prev_layer->c, pool_layer->krn_dim,
pool_layer->krn_pad, pool_layer->krn_str, layer->w, col_buffer, output_buffer);
} else {
pool_func_nonsquare(input_buffer, prev_layer->w, prev_layer->h, prev_layer->c, pool_layer->krn_dim,
pool_layer->krn_pad, pool_layer->krn_str, layer->w, layer->h, col_buffer, output_buffer);
}
break;
}
case LAYER_TYPE_IP: {
ip_layer_t *ip_layer = (ip_layer_t*) layer;
arm_fully_connected_q7_opt(input_buffer, ip_layer->wt, prev_layer->c * prev_layer->h * prev_layer->w,
ip_layer->c, ip_layer->l_shift, ip_layer->r_shift, ip_layer->bias, output_buffer, (q15_t*)col_buffer);
break;
}
}
if (layer_idx++ > 0) {
if (input_buffer == input_data) {
// Image data has been processed
input_buffer = buffer2;
}
if (layer->type != LAYER_TYPE_RELU) {
// Switch buffers
q7_t *tmp_buffer = input_buffer;
input_buffer = output_buffer;
output_buffer = tmp_buffer;
}
// Last layer
if (layer->next && layer->next->next == NULL) {
output_buffer = net->output_data;
}
}
layer = layer->next;
}
// Softmax output
if (softmax) {
arm_softmax_q7(net->output_data, net->output_size, net->output_data);
}
fb_alloc_free_till_mark();
return 0;
}
#define BUFFER_2STR(buffer)\
(buffer == buffer1) ? "buffer1":\
(buffer == buffer2) ? "buffer2":\
(buffer == input_data) ? "input_data":\
(buffer == net->output_data) ? "output_data": "???"
#define CONV_FUNC_2STR(conv_func)\
(conv_func == arm_convolve_HWC_q7_basic) ? "arm_convolve_HWC_q7_basic" :\
(conv_func == arm_convolve_HWC_q7_fast ) ? "arm_convolve_HWC_q7_fast":"arm_convolve_HWC_q7_RGB"
#define POOL_FUNC_2STR(pool_func)\
(pool_func == arm_maxpool_q7_HWC) ? "arm_maxpool_q7_HWC" : "arm_avepool_q7_HWC"
#define CONV_FUNC_NONSQ_2STR(conv_func)\
(conv_func == arm_convolve_HWC_q7_basic_nonsquare) ? "arm_convolve_HWC_q7_basic_nonsquare":\
"arm_convolve_HWC_q7_fast_nonsquare"
#define POOL_FUNC_NONSQ_2STR(pool_func)\
(pool_func == arm_maxpool_q7_HWC_nonsquare) ? "arm_maxpool_q7_HWC_nonsquare" : "arm_avepool_q7_HWC_nonsquare"
int nn_dry_run_network(nn_t *net, image_t *img, bool softmax)
{
uint32_t layer_idx = 0;
layer_t *layer = net->layers;
if (layer == NULL) {
printf("First layer is NULL!\n");
return -1;
}
if (layer->type != LAYER_TYPE_DATA) {
printf("First layer is not a DATA layer!\n");
return -1;
}
q7_t *input_data = NULL;
q7_t *input_buffer = NULL;
q7_t *output_buffer = NULL;
fb_alloc_mark();
q7_t *buffer1 = fb_alloc(net->max_scrbuf_size, FB_ALLOC_NO_HINT);
q7_t *buffer2 = buffer1 + net->max_layer_size;
while (layer != NULL) {
layer_t *prev_layer = layer->prev;
switch (layer->type) {
case LAYER_TYPE_DATA: {
data_layer_t *data_layer = (data_layer_t *) layer;
// Set image data as input buffer for the next layer.
input_buffer = input_data = fb_alloc(data_layer->c * data_layer->h * data_layer->w, FB_ALLOC_NO_HINT);
output_buffer = buffer1;
break;
}
case LAYER_TYPE_CONV: {
conv_func_t conv_func = NULL;
conv_func_nonsquare_t conv_func_nonsquare = NULL;
conv_layer_t *conv_layer = (conv_layer_t *) layer;
if (prev_layer->c % 4 != 0 ||
conv_layer->n % 2 != 0 || prev_layer->h % 2 != 0) {
if (prev_layer->c == 3) {
conv_func = arm_convolve_HWC_q7_RGB;
} else if (prev_layer->w == prev_layer->h) {
conv_func = arm_convolve_HWC_q7_basic;
} else {
conv_func_nonsquare = arm_convolve_HWC_q7_basic_nonsquare;
}
} else {
if (prev_layer->w == prev_layer->h) {
conv_func = arm_convolve_HWC_q7_fast;
} else {
conv_func_nonsquare = arm_convolve_HWC_q7_fast_nonsquare;
}
}
if (conv_func) {
printf("forward: %s(%s, %lu, %lu, %s, %lu, %lu, %lu, %lu, %s, %lu, %lu, %s, %lu, %s, %p);\n",
CONV_FUNC_2STR(conv_func), BUFFER_2STR(input_buffer),
prev_layer->h, prev_layer->c, "conv_wt", conv_layer->c,
conv_layer->krn_dim, conv_layer->krn_pad, conv_layer->krn_str,
"conv_bias", conv_layer->l_shift, conv_layer->r_shift,
BUFFER_2STR(output_buffer), conv_layer->h, "col_buffer", NULL);
} else {
printf("forward: %s(%s, %lu, %lu, %lu, %s, %lu, %lu, %lu, %lu, %lu, %lu, %lu, %s, %lu, %lu, \
%s, %lu, %lu, %s, %p);\n",
CONV_FUNC_NONSQ_2STR(conv_func_nonsquare), BUFFER_2STR(input_buffer),
prev_layer->w, prev_layer->h, prev_layer->c, "conv_wt", conv_layer->c,
conv_layer->krn_dim, conv_layer->krn_dim, conv_layer->krn_pad, conv_layer->krn_pad,
conv_layer->krn_str, conv_layer->krn_str, "conv_bias", conv_layer->l_shift, conv_layer->r_shift,
BUFFER_2STR(output_buffer), conv_layer->w, conv_layer->h, "col_buffer", NULL);
}
break;
}
case LAYER_TYPE_RELU: {
relu_layer_t *relu_layer = (relu_layer_t *) layer;
printf("forward: arm_relu_q7(%s, %lu*%lu*%lu);\n",
BUFFER_2STR(input_buffer), relu_layer->h, relu_layer->w, relu_layer->c);
break;
}
case LAYER_TYPE_POOL: {
pool_func_t pool_func = NULL;
pool_func_nonsquare_t pool_func_nonsquare = NULL;
pool_layer_t *pool_layer = (pool_layer_t *) layer;
if (pool_layer->ptype == POOL_TYPE_MAX) {
if (prev_layer->w == prev_layer->h) {
pool_func = arm_maxpool_q7_HWC;
} else {
pool_func_nonsquare = arm_maxpool_q7_HWC_nonsquare;
}
} else {
if (prev_layer->w == prev_layer->h) {
pool_func = arm_avepool_q7_HWC;
} else {
pool_func_nonsquare = arm_avepool_q7_HWC_nonsquare;
}
}
if (pool_func) {
printf("forward: %s(%s, %lu, %lu, %lu, %lu, %lu, %lu, %s, %s);\n",
POOL_FUNC_2STR(pool_func), BUFFER_2STR(input_buffer),
prev_layer->h, prev_layer->c, pool_layer->krn_dim,
pool_layer->krn_pad, pool_layer->krn_str, layer->w, "col_buffer", BUFFER_2STR(output_buffer));
} else {
printf("forward: %s(%s, %lu, %lu, %lu, %lu, %lu, %lu, %lu, %lu, %s, %s);\n",
POOL_FUNC_NONSQ_2STR(pool_func_nonsquare), BUFFER_2STR(input_buffer),
prev_layer->w, prev_layer->h, prev_layer->c, pool_layer->krn_dim,
pool_layer->krn_pad, pool_layer->krn_str, layer->w, layer->h, "col_buffer", BUFFER_2STR(output_buffer));
}
break;
}
case LAYER_TYPE_IP: {
ip_layer_t *ip_layer = (ip_layer_t*) layer;
printf("forward: arm_fully_connected_q7_opt(%s, %s, %lu, %lu, %lu, %lu, %s, %s, %s);\n",
BUFFER_2STR(input_buffer), "ip_wt", prev_layer->c * prev_layer->h * prev_layer->w,
ip_layer->c, ip_layer->l_shift, ip_layer->r_shift, "ip_bias", BUFFER_2STR(output_buffer), "col_buffer");
break;
}
}
if (layer_idx++ > 0) {
if (input_buffer == input_data) {
// Image data has been processed
input_buffer = buffer2;
}
if (layer->type != LAYER_TYPE_RELU) {
// Switch buffers
q7_t *tmp_buffer = input_buffer;
input_buffer = output_buffer;
output_buffer = tmp_buffer;
}
// Last layer
if (layer->next && layer->next->next == NULL) {
output_buffer = net->output_data;
}
}
layer = layer->next;
}
fb_alloc_free_till_mark();
printf("\n");
return 0;
}
#endif //IMLIB_ENABLE_CNN

View File

@ -1,118 +0,0 @@
/*
* This file is part of the OpenMV project.
*
* Copyright (c) 2013-2019 Ibrahim Abdelkader <iabdalkader@openmv.io>
* Copyright (c) 2013-2019 Kwabena W. Agyeman <kwagyeman@openmv.io>
*
* This work is licensed under the MIT license, see the file LICENSE for details.
*
* CNN code.
*/
#ifndef __NN_H__
#define __NN_H__
#include <stdint.h>
#include <imlib.h>
typedef enum {
LAYER_TYPE_DATA = 0,
LAYER_TYPE_CONV,
LAYER_TYPE_RELU,
LAYER_TYPE_POOL,
LAYER_TYPE_IP,
} layer_type_t;
typedef enum {
POOL_TYPE_MAX,
POOL_TYPE_AVE,
} pool_type_t;
typedef enum {
NETWORK_TYPE_CAFFE = 0,
} network_type_t;
#define NN_LAYER_BASE \
uint32_t type; \
uint32_t n, c, h, w;\
struct _layer *prev;\
struct _layer *next \
typedef struct _layer {
NN_LAYER_BASE;
} layer_t;
typedef struct {
NN_LAYER_BASE;
uint32_t r_mean;
uint32_t g_mean;
uint32_t b_mean;
uint32_t scale;
} data_layer_t;
typedef struct {
NN_LAYER_BASE;
uint32_t l_shift;
uint32_t r_shift;
uint32_t krn_dim;
uint32_t krn_str;
uint32_t krn_pad;
uint32_t w_size;
uint32_t b_size;
int8_t *wt, *bias;
} conv_layer_t;
typedef struct {
NN_LAYER_BASE;
} relu_layer_t;
typedef struct {
NN_LAYER_BASE;
uint32_t ptype;
uint32_t krn_dim;
uint32_t krn_str;
uint32_t krn_pad;
} pool_layer_t;
typedef struct {
NN_LAYER_BASE;
uint32_t l_shift;
uint32_t r_shift;
uint32_t w_size;
uint32_t b_size;
int8_t *wt, *bias;
} ip_layer_t;
typedef struct {
uint8_t type[4];
uint32_t n_layers;
int8_t *output_data;
uint32_t output_size;
uint32_t max_layer_size;
uint32_t max_colbuf_size;
uint32_t max_scrbuf_size;
layer_t *layers;
} nn_t;
typedef arm_status (*conv_func_t) (const q7_t * Im_in, const uint16_t dim_im_in, const uint16_t ch_im_in,
const q7_t * wt, const uint16_t ch_im_out, const uint16_t dim_kernel, const uint16_t padding,
const uint16_t stride, const q7_t * bias, const uint16_t bias_shift, const uint16_t out_shift,
q7_t * Im_out, const uint16_t dim_im_out, q15_t * bufferA, q7_t * bufferB);
typedef arm_status (*conv_func_nonsquare_t) (const q7_t * Im_in, const uint16_t dim_im_in_x, const uint16_t dim_im_in_y,
const uint16_t ch_im_in, const q7_t * wt, const uint16_t ch_im_out, const uint16_t dim_kernel_x, const uint16_t dim_kernel_y,
const uint16_t padding_x, const uint16_t padding_y, const uint16_t stride_x, const uint16_t stride_y, const q7_t * bias,
const uint16_t bias_shift, const uint16_t out_shift, q7_t * Im_out, const uint16_t dim_im_out_x, const uint16_t dim_im_out_y,
q15_t * bufferA, q7_t * bufferB);
typedef void (*pool_func_t)(q7_t * Im_in, const uint16_t dim_im_in, const uint16_t ch_im_in,
const uint16_t dim_kernel, const uint16_t padding, const uint16_t stride,
const uint16_t dim_im_out, q7_t * bufferA, q7_t * Im_out);
typedef void (*pool_func_nonsquare_t)(q7_t * Im_in, const uint16_t dim_im_in_x, const uint16_t dim_im_in_y, const uint16_t ch_im_in,
const uint16_t dim_kernel, const uint16_t padding, const uint16_t stride,
const uint16_t dim_im_out_x, const uint16_t dim_im_out_y, q7_t * bufferA, q7_t * Im_out);
int nn_dump_network(nn_t *net);
int nn_load_network(nn_t *net, const char *path);
int nn_run_network(nn_t *net, image_t *img, rectangle_t *roi, bool softmax);
int nn_dry_run_network(nn_t *net, image_t *img, bool softmax);
#endif //#define __CNN_H__

View File

@ -1,408 +0,0 @@
/*
* This file is part of the OpenMV project.
*
* Copyright (c) 2013-2019 Ibrahim Abdelkader <iabdalkader@openmv.io>
* Copyright (c) 2013-2019 Kwabena W. Agyeman <kwagyeman@openmv.io>
*
* This work is licensed under the MIT license, see the file LICENSE for details.
*
* NN Python module.
*/
#include <mp.h>
#include "nn.h"
#include "py_helper.h"
#include "py_image.h"
#include "omv_boardconfig.h"
#ifdef IMLIB_ENABLE_CNN
static const mp_obj_type_t py_net_type;
typedef struct _py_net_obj_t {
mp_obj_base_t base;
nn_t _cobj;
} py_net_obj_t;
void *py_net_cobj(mp_obj_t net_obj)
{
PY_ASSERT_TYPE(net_obj, &py_net_type);
return &((py_net_obj_t *)net_obj)->_cobj;
}
STATIC void py_net_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind)
{
py_net_obj_t *self = self_in;
nn_dump_network(py_net_cobj(self));
}
STATIC mp_obj_t py_net_forward(uint n_args, const mp_obj_t *args, mp_map_t *kw_args)
{
nn_t *net = py_net_cobj(args[0]);
image_t *img = py_helper_arg_to_image_mutable(args[1]);
rectangle_t roi;
py_helper_keyword_rectangle_roi(img, n_args, args, 2, kw_args, &roi);
bool softmax = py_helper_keyword_int(n_args, args, 3, kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_softmax), false);
bool dry_run = py_helper_keyword_int(n_args, args, 4, kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_dry_run), false);
mp_obj_t output_list = mp_obj_new_list(0, NULL);
if (dry_run == false) {
nn_run_network(net, img, &roi, softmax);
} else {
nn_dry_run_network(net, img, softmax);
}
for (int i=0; i<net->output_size; i++) {
mp_obj_list_append(output_list, mp_obj_new_float(((float) (net->output_data[i] + 128)) / 255));
}
return output_list;
}
STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_net_forward_obj, 2, py_net_forward);
// NN Class Object
#define py_nn_class_obj_size 6
typedef struct py_nn_class_obj {
mp_obj_base_t base;
mp_obj_t x, y, w, h, index, value;
} py_nn_class_obj_t;
static void py_nn_class_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind)
{
py_nn_class_obj_t *self = self_in;
mp_printf(print,
"{\"x\":%d, \"y\":%d, \"w\":%d, \"h\":%d, \"index\":%d, \"value\":%f}",
mp_obj_get_int(self->x),
mp_obj_get_int(self->y),
mp_obj_get_int(self->w),
mp_obj_get_int(self->h),
mp_obj_get_int(self->index),
(double) mp_obj_get_float(self->value));
}
static mp_obj_t py_nn_class_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value)
{
if (value == MP_OBJ_SENTINEL) { // load
py_nn_class_obj_t *self = self_in;
if (MP_OBJ_IS_TYPE(index, &mp_type_slice)) {
mp_bound_slice_t slice;
if (!mp_seq_get_fast_slice_indexes(py_nn_class_obj_size, index, &slice)) {
nlr_raise(mp_obj_new_exception_msg(&mp_type_OSError, "only slices with step=1 (aka None) are supported"));
}
mp_obj_tuple_t *result = mp_obj_new_tuple(slice.stop - slice.start, NULL);
mp_seq_copy(result->items, &(self->x) + slice.start, result->len, mp_obj_t);
return result;
}
switch (mp_get_index(self->base.type, py_nn_class_obj_size, index, false)) {
case 0: return self->x;
case 1: return self->y;
case 2: return self->w;
case 3: return self->h;
case 4: return self->index;
case 5: return self->value;
}
}
return MP_OBJ_NULL; // op not supported
}
mp_obj_t py_nn_class_rect(mp_obj_t self_in)
{
return mp_obj_new_tuple(4, (mp_obj_t []) {((py_nn_class_obj_t *) self_in)->x,
((py_nn_class_obj_t *) self_in)->y,
((py_nn_class_obj_t *) self_in)->w,
((py_nn_class_obj_t *) self_in)->h});
}
mp_obj_t py_nn_class_x(mp_obj_t self_in) { return ((py_nn_class_obj_t *) self_in)->x; }
mp_obj_t py_nn_class_y(mp_obj_t self_in) { return ((py_nn_class_obj_t *) self_in)->y; }
mp_obj_t py_nn_class_w(mp_obj_t self_in) { return ((py_nn_class_obj_t *) self_in)->w; }
mp_obj_t py_nn_class_h(mp_obj_t self_in) { return ((py_nn_class_obj_t *) self_in)->h; }
mp_obj_t py_nn_class_index(mp_obj_t self_in) { return ((py_nn_class_obj_t *) self_in)->index; }
mp_obj_t py_nn_class_value(mp_obj_t self_in) { return ((py_nn_class_obj_t *) self_in)->value; }
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_nn_class_rect_obj, py_nn_class_rect);
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_nn_class_x_obj, py_nn_class_x);
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_nn_class_y_obj, py_nn_class_y);
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_nn_class_w_obj, py_nn_class_w);
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_nn_class_h_obj, py_nn_class_h);
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_nn_class_index_obj, py_nn_class_index);
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_nn_class_value_obj, py_nn_class_value);
STATIC const mp_rom_map_elem_t py_nn_class_locals_dict_table[] = {
{ MP_ROM_QSTR(MP_QSTR_rect), MP_ROM_PTR(&py_nn_class_rect_obj) },
{ MP_ROM_QSTR(MP_QSTR_x), MP_ROM_PTR(&py_nn_class_x_obj) },
{ MP_ROM_QSTR(MP_QSTR_y), MP_ROM_PTR(&py_nn_class_y_obj) },
{ MP_ROM_QSTR(MP_QSTR_w), MP_ROM_PTR(&py_nn_class_w_obj) },
{ MP_ROM_QSTR(MP_QSTR_h), MP_ROM_PTR(&py_nn_class_h_obj) },
{ MP_ROM_QSTR(MP_QSTR_index), MP_ROM_PTR(&py_nn_class_index_obj) },
{ MP_ROM_QSTR(MP_QSTR_value), MP_ROM_PTR(&py_nn_class_value_obj) }
};
STATIC MP_DEFINE_CONST_DICT(py_nn_class_locals_dict, py_nn_class_locals_dict_table);
static const mp_obj_type_t py_nn_class_type = {
{ &mp_type_type },
.name = MP_QSTR_nn_class,
.print = py_nn_class_print,
.subscr = py_nn_class_subscr,
.locals_dict = (mp_obj_t) &py_nn_class_locals_dict
};
typedef struct py_nn_class_obj_list_lnk_data {
rectangle_t rect;
int index;
float value;
int merge_number;
} py_nn_class_obj_list_lnk_data_t;
STATIC mp_obj_t py_net_search(uint n_args, const mp_obj_t *args, mp_map_t *kw_args)
{
nn_t *arg_net = py_net_cobj(args[0]);
image_t *arg_img = py_helper_arg_to_image_mutable(args[1]);
rectangle_t roi;
py_helper_keyword_rectangle_roi(arg_img, n_args, args, 2, kw_args, &roi);
float arg_threshold = py_helper_keyword_float(n_args, args, 3, kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_threshold), 0.6);
PY_ASSERT_TRUE_MSG((0 <= arg_threshold) && (arg_threshold <= 1), "0 <= threshold <= 1");
float arg_min_scale = py_helper_keyword_float(n_args, args, 4, kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_min_scale), 1.0);
PY_ASSERT_TRUE_MSG((0 < arg_min_scale) && (arg_min_scale <= 1), "0 < min_scale <= 1");
float arg_scale_mul = py_helper_keyword_float(n_args, args, 5, kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_scale_mul), 0.5);
PY_ASSERT_TRUE_MSG((0 <= arg_scale_mul) && (arg_scale_mul < 1), "0 <= scale_mul < 1");
float arg_x_overlap = py_helper_keyword_float(n_args, args, 6, kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_x_overlap), 0);
PY_ASSERT_TRUE_MSG(((0 <= arg_x_overlap) && (arg_x_overlap < 1)) || (arg_x_overlap == -1), "0 <= x_overlap < 1");
float arg_y_overlap = py_helper_keyword_float(n_args, args, 7, kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_y_overlap), 0);
PY_ASSERT_TRUE_MSG(((0 <= arg_y_overlap) && (arg_y_overlap < 1)) || (arg_y_overlap == -1), "0 <= y_overlap < 1");
float arg_contrast_threshold = py_helper_keyword_float(n_args, args, 8, kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_contrast_threshold), 1);
PY_ASSERT_TRUE_MSG(0 <= arg_contrast_threshold, "0 <= contrast_threshold");
bool softmax = py_helper_keyword_int(n_args, args, 9, kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_softmax), false);
list_t out;
list_init(&out, sizeof(py_nn_class_obj_list_lnk_data_t));
for (float scale = 1; scale >= arg_min_scale; scale *= arg_scale_mul) {
// Either provide a subtle offset to center multiple detection windows or center the only detection window.
for (int y = roi.y + ((arg_y_overlap != -1) ? (fmodf(roi.h, (roi.h * scale)) / 2) : ((roi.h - (roi.h * scale)) / 2));
// Finish when the detection window is outside of the ROI.
(y + (roi.h * scale)) <= (roi.y + roi.h);
// Step by an overlap amount accounting for scale or just terminate after one iteration.
y += ((arg_y_overlap != -1) ? (roi.h * scale * (1 - arg_y_overlap)) : roi.h)) {
// Either provide a subtle offset to center multiple detection windows or center the only detection window.
for (int x = roi.x + ((arg_x_overlap != -1) ? (fmodf(roi.w, (roi.w * scale)) / 2) : ((roi.w - (roi.w * scale)) / 2));
// Finish when the detection window is outside of the ROI.
(x + (roi.w * scale)) <= (roi.x + roi.w);
// Step by an overlap amount accounting for scale or just terminate after one iteration.
x += ((arg_x_overlap != -1) ? (roi.w * scale * (1 - arg_x_overlap)) : roi.w)) {
rectangle_t new_roi;
rectangle_init(&new_roi, x, y, roi.w * scale, roi.h * scale);
if (rectangle_overlap(&roi, &new_roi)) {
int sum = 0;
int sum_2 = 0;
for (int b = new_roi.y, bb = new_roi.y + new_roi.h, bbb = fast_sqrtf(new_roi.h); b < bb; b += bbb) {
for (int a = new_roi.x, aa = new_roi.x + new_roi.w, aaa = fast_sqrtf(new_roi.w); a < aa; a += aaa) {
switch(arg_img->bpp) {
case IMAGE_BPP_BINARY: {
int pixel = COLOR_BINARY_TO_GRAYSCALE(IMAGE_GET_BINARY_PIXEL(arg_img, a, b));
sum += pixel;
sum_2 += pixel * pixel;
break;
}
case IMAGE_BPP_GRAYSCALE: {
int pixel = IMAGE_GET_GRAYSCALE_PIXEL(arg_img, a, b);
sum += pixel;
sum_2 += pixel * pixel;
break;
}
case IMAGE_BPP_RGB565: {
int pixel = COLOR_RGB565_TO_GRAYSCALE(IMAGE_GET_RGB565_PIXEL(arg_img, a, b));
sum += pixel;
sum_2 += pixel * pixel;
break;
}
}
}
}
int area = new_roi.w * new_roi.h;
int mean = sum / area;
int variance = (sum_2 / area) - (mean * mean);
if (fast_sqrtf(variance) >= arg_contrast_threshold) { // Skip flat regions...
nn_run_network(arg_net, arg_img, &new_roi, softmax);
int max_index = -1;
float max_value = -1;
for (int i=0; i<arg_net->output_size; i++) {
float value = ((float) (arg_net->output_data[i] + 128)) / 255;
if ((value >= arg_threshold) && (value > max_value)) {
max_index = i;
max_value = value;
}
}
if (max_index != -1) {
py_nn_class_obj_list_lnk_data_t lnk_data;
lnk_data.rect.x = new_roi.x;
lnk_data.rect.y = new_roi.y;
lnk_data.rect.w = new_roi.w;
lnk_data.rect.h = new_roi.h;
lnk_data.index = max_index;
lnk_data.value = max_value;
lnk_data.merge_number = 1;
list_push_back(&out, &lnk_data);
}
}
}
}
}
}
// Merge all overlapping and same detections and average them.
for (;;) {
bool merge_occured = false;
list_t out_temp;
list_init(&out_temp, sizeof(py_nn_class_obj_list_lnk_data_t));
while (list_size(&out)) {
py_nn_class_obj_list_lnk_data_t lnk_data;
list_pop_front(&out, &lnk_data);
for (size_t k = 0, l = list_size(&out); k < l; k++) {
py_nn_class_obj_list_lnk_data_t tmp_data;
list_pop_front(&out, &tmp_data);
if ((lnk_data.index == tmp_data.index)
&& rectangle_overlap(&(lnk_data.rect), &(tmp_data.rect))) {
lnk_data.rect.x = ((lnk_data.rect.x * lnk_data.merge_number) + tmp_data.rect.x) / (lnk_data.merge_number + 1);
lnk_data.rect.y = ((lnk_data.rect.y * lnk_data.merge_number) + tmp_data.rect.y) / (lnk_data.merge_number + 1);
lnk_data.rect.w = ((lnk_data.rect.w * lnk_data.merge_number) + tmp_data.rect.w) / (lnk_data.merge_number + 1);
lnk_data.rect.h = ((lnk_data.rect.h * lnk_data.merge_number) + tmp_data.rect.h) / (lnk_data.merge_number + 1);
lnk_data.value = ((lnk_data.value * lnk_data.merge_number) + tmp_data.value) / (lnk_data.merge_number + 1);
lnk_data.merge_number += 1;
merge_occured = true;
} else {
list_push_back(&out, &tmp_data);
}
}
list_push_back(&out_temp, &lnk_data);
}
list_copy(&out, &out_temp);
if (!merge_occured) {
break;
}
}
// Determine the winner between overlapping different class detections.
for (;;) {
bool merge_occured = false;
list_t out_temp;
list_init(&out_temp, sizeof(py_nn_class_obj_list_lnk_data_t));
while (list_size(&out)) {
py_nn_class_obj_list_lnk_data_t lnk_data;
list_pop_front(&out, &lnk_data);
for (size_t k = 0, l = list_size(&out); k < l; k++) {
py_nn_class_obj_list_lnk_data_t tmp_data;
list_pop_front(&out, &tmp_data);
if ((lnk_data.index != tmp_data.index)
&& rectangle_overlap(&(lnk_data.rect), &(tmp_data.rect))) {
if (tmp_data.value > lnk_data.value) {
memcpy(&lnk_data, &tmp_data, sizeof(py_nn_class_obj_list_lnk_data_t));
}
merge_occured = true;
} else {
list_push_back(&out, &tmp_data);
}
}
list_push_back(&out_temp, &lnk_data);
}
list_copy(&out, &out_temp);
if (!merge_occured) {
break;
}
}
mp_obj_list_t *objects_list = mp_obj_new_list(list_size(&out), NULL);
for (size_t i = 0; list_size(&out); i++) {
py_nn_class_obj_list_lnk_data_t lnk_data;
list_pop_front(&out, &lnk_data);
py_nn_class_obj_t *o = m_new_obj(py_nn_class_obj_t);
o->base.type = &py_nn_class_type;
o->x = mp_obj_new_int(lnk_data.rect.x);
o->y = mp_obj_new_int(lnk_data.rect.y);
o->w = mp_obj_new_int(lnk_data.rect.w);
o->h = mp_obj_new_int(lnk_data.rect.h);
o->index = mp_obj_new_int(lnk_data.index);
o->value = mp_obj_new_float(lnk_data.value);
objects_list->items[i] = o;
}
return objects_list;
}
STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_net_search_obj, 2, py_net_search);
STATIC const mp_rom_map_elem_t locals_dict_table[] = {
{ MP_ROM_QSTR(MP_QSTR_forward), MP_ROM_PTR(&py_net_forward_obj) },
{ MP_ROM_QSTR(MP_QSTR_search), MP_ROM_PTR(&py_net_search_obj) }
};
STATIC MP_DEFINE_CONST_DICT(locals_dict, locals_dict_table);
static const mp_obj_type_t py_net_type = {
{ &mp_type_type },
.name = MP_QSTR_Net,
.print = py_net_print,
.locals_dict = (mp_obj_t) &locals_dict
};
static mp_obj_t py_nn_load(mp_obj_t path_obj)
{
const char *path = mp_obj_str_get_str(path_obj);
py_net_obj_t *net = m_new_obj(py_net_obj_t);
net->base.type = &py_net_type;
nn_load_network(py_net_cobj(net), path);
return net;
}
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_nn_load_obj, py_nn_load);
#endif // IMLIB_ENABLE_CNN
STATIC const mp_rom_map_elem_t globals_dict_table[] = {
{ MP_ROM_QSTR(MP_QSTR___name__), MP_OBJ_NEW_QSTR(MP_QSTR_nn) },
#ifdef IMLIB_ENABLE_CNN
{ MP_ROM_QSTR(MP_QSTR_load), MP_ROM_PTR(&py_nn_load_obj) },
#else
{ MP_ROM_QSTR(MP_QSTR_load), MP_ROM_PTR(&py_func_unavailable_obj) }
#endif // IMLIB_ENABLE_CNN
};
STATIC MP_DEFINE_CONST_DICT(globals_dict, globals_dict_table);
const mp_obj_module_t nn_module = {
.base = { &mp_type_module },
.globals = (mp_obj_t) &globals_dict
};