Add initial NN loading code.

This commit is contained in:
iabdalkader 2018-05-19 23:50:07 +02:00
parent 97518da1df
commit 23993210dd
7 changed files with 468 additions and 1 deletions

View File

@ -97,6 +97,7 @@ CFLAGS += -I$(MP_BOARD_CONFIG_DIR)
CFLAGS += -I$(TOP_DIR)/$(OMV_DIR)/
CFLAGS += -I$(TOP_DIR)/$(OMV_DIR)/py/
CFLAGS += -I$(TOP_DIR)/$(OMV_DIR)/nn/
CFLAGS += -I$(TOP_DIR)/$(OMV_DIR)/img/
CFLAGS += -I$(OMV_BOARD_CONFIG_DIR)
CFLAGS += -I$(TOP_DIR)/$(LEPTON_DIR)/include/
@ -212,6 +213,10 @@ FIRM_OBJ += $(addprefix $(BUILD)/$(OMV_DIR)/img/,\
cifar10_model.o \
)
FIRM_OBJ += $(addprefix $(BUILD)/$(OMV_DIR)/nn/,\
nn.o \
)
FIRM_OBJ += $(addprefix $(BUILD)/$(OMV_DIR)/py/, \
py_helper.o \
py_omv.o \
@ -224,6 +229,7 @@ FIRM_OBJ += $(addprefix $(BUILD)/$(OMV_DIR)/py/, \
py_mjpeg.o \
py_winc.o \
py_cpufreq.o \
py_nn.o \
)

@ -1 +1 @@
Subproject commit 106ac41ea678f4fb4eb18dd920d9774b342d1444
Subproject commit 8a34ce6e073985628bc7ed0148fb8dc1c5282c13

View File

@ -79,6 +79,11 @@ SRCS += $(addprefix img/, \
cifar10_model.c \
)
SRCS += $(addprefix nn/, \
nn.c \
)
SRCS += $(addprefix py/, \
py_helper.c \
py_omv.c \
@ -91,6 +96,7 @@ SRCS += $(addprefix py/, \
py_mjpeg.c \
py_winc.c \
py_cpufreq.c \
py_nn.c \
)
OBJS = $(addprefix $(BUILD)/, $(SRCS:.c=.o))

276
src/omv/nn/nn.c Normal file
View File

@ -0,0 +1,276 @@
/* This file is part of the OpenMV project.
* Copyright (c) 2013-2017 Ibrahim Abdelkader <iabdalkader@openmv.io> & Kwabena W. Agyeman <kwagyeman@openmv.io>
* This work is licensed under the MIT license, see the file LICENSE for details.
*
* CNN code.
*
*/
#include "nn.h"
#include "imlib.h"
#include "ff_wrapper.h"
#include "omv_boardconfig.h"
#ifdef IMLIB_ENABLE_CNN
static const char *layer_to_str(layer_type_t type)
{
static const char *layers[] = {
"DATA", "CONV", "RELU", "POOL", "IP"
};
if (type > sizeof(layers)/sizeof(layers[0])) {
return "Unknown layer";
} else {
return layers[type];
}
}
int nn_dump_network(nn_t *net)
{
layer_t *layer = net->layers;
printf("Net type: %4s Num layers: %lu Max layer: %lu Max col buf: %lu Max scratch buf: %lu\n",
net->type, net->n_layers, net->max_layer_size, net->max_colbuf_size, net->max_scrbuf_size);
while (layer != NULL) {
printf("Layer: %s Shape: [%lu, %lu, %lu] ",
layer_to_str(layer->type), layer->c, layer->h, layer->w);
switch (layer->type) {
case LAYER_TYPE_DATA: {
data_layer_t *data_layer = (data_layer_t *) layer;
printf("r_mean: %lu g_mean: %lu b_mean: %lu\n",
data_layer->r_mean, data_layer->g_mean, data_layer->b_mean);
break;
}
case LAYER_TYPE_CONV: {
conv_layer_t *conv_layer = (conv_layer_t *) layer;
printf("l_shift: %lu r_shift:%lu k_size: %lu k_stride: %lu k_padding: %lu\n",
conv_layer->l_shift, conv_layer->r_shift,
conv_layer->krn_dim, conv_layer->krn_str, conv_layer->krn_pad);
break;
}
case LAYER_TYPE_RELU: {
// Nothing to read for RELU layer
printf("\n");
//relu_layer_t *relu_layer = layer;
break;
}
case LAYER_TYPE_POOL: {
pool_layer_t *pool_layer = (pool_layer_t *) layer;
printf("k_size: %lu k_stride: %lu k_padding: %lu\n",
pool_layer->krn_dim, pool_layer->krn_str, pool_layer->krn_pad);
break;
}
case LAYER_TYPE_IP: {
ip_layer_t *ip_layer = (ip_layer_t*) layer;
printf("l_shift: %lu r_shift:%lu\n", ip_layer->l_shift, ip_layer->r_shift);
break;
}
}
layer = layer->next;
}
return 0;
}
int nn_load_network(nn_t *net, const char *path)
{
FIL fp;
int res = 0;
file_read_open(&fp, path);
file_buffer_on(&fp);
// Read network type
read_data(&fp, net->type, 4);
// Read number of layers
read_data(&fp, &net->n_layers, 4);
printf("Net type: %4s Num layers: %lu\n", net->type, net->n_layers);
layer_t *prev_layer = NULL;
for (int i=0; i<net->n_layers - 1; i++) {
layer_t *layer;
layer_type_t layer_type;
// Read layer type
read_data(&fp, &layer_type, 4);
switch (layer_type) {
case LAYER_TYPE_DATA:
layer = xalloc0(sizeof(data_layer_t));
break;
case LAYER_TYPE_CONV:
layer = xalloc0(sizeof(conv_layer_t));
break;
case LAYER_TYPE_RELU:
layer = xalloc0(sizeof(relu_layer_t));
break;
case LAYER_TYPE_POOL:
layer = xalloc0(sizeof(pool_layer_t));
break;
case LAYER_TYPE_IP:
layer = xalloc0(sizeof(ip_layer_t));
break;
default:
res = -1;
goto error;
}
if (prev_layer == NULL) { // First layer
net->layers = layer;
} else {
layer->prev = prev_layer;
prev_layer->next = layer;
}
prev_layer = layer;
// Set type
layer->type = layer_type;
// Read layer shape (c, h, w)
read_data(&fp, &layer->c, 4);
read_data(&fp, &layer->w, 4);
read_data(&fp, &layer->h, 4);
printf("Reading layer: %s Shape: [%lu, %lu, %lu] ",
layer_to_str(layer->type), layer->c, layer->h, layer->w);
switch (layer_type) {
case LAYER_TYPE_DATA: {
data_layer_t *data_layer = (data_layer_t *) layer;
// Read data layer R, G, B mean
read_data(&fp, &data_layer->r_mean, 4);
read_data(&fp, &data_layer->g_mean, 4);
read_data(&fp, &data_layer->b_mean, 4);
printf("r_mean: %lu g_mean: %lu b_mean: %lu\n",
data_layer->r_mean, data_layer->g_mean, data_layer->b_mean);
break;
}
case LAYER_TYPE_CONV: {
conv_layer_t *conv_layer = (conv_layer_t *) layer;
// Read layer l_shift, r_shift
read_data(&fp, &conv_layer->l_shift, 4);
read_data(&fp, &conv_layer->r_shift, 4);
// Read krnel dim, stride and padding
read_data(&fp, &conv_layer->krn_dim, 4);
read_data(&fp, &conv_layer->krn_str, 4);
read_data(&fp, &conv_layer->krn_pad, 4);
printf("l_shift: %lu r_shift:%lu k_size: %lu k_stride: %lu k_padding: %lu ",
conv_layer->l_shift, conv_layer->r_shift,
conv_layer->krn_dim, conv_layer->krn_str, conv_layer->krn_pad);
// Alloc and read weights array
read_data(&fp, &conv_layer->w_size, 4);
printf("weights: %lu ", conv_layer->w_size);
conv_layer->w = xalloc(conv_layer->w_size);
read_data(&fp, conv_layer->w, conv_layer->w_size);
// Alloc and read bias array
read_data(&fp, &conv_layer->b_size, 4);
printf("bias: %lu\n", conv_layer->b_size);
conv_layer->b = xalloc(conv_layer->b_size);
read_data(&fp, conv_layer->b, conv_layer->b_size);
break;
}
case LAYER_TYPE_RELU: {
// Nothing to read for RELU layer
printf("\n");
break;
}
case LAYER_TYPE_POOL: {
pool_layer_t *pool_layer = (pool_layer_t *) layer;
// Read pooling layer type
read_data(&fp, &pool_layer->type, 4);
// Read krnel dim, stride and padding
read_data(&fp, &pool_layer->krn_dim, 4);
read_data(&fp, &pool_layer->krn_str, 4);
read_data(&fp, &pool_layer->krn_pad, 4);
printf("k_size: %lu k_stride: %lu k_padding: %lu\n",
pool_layer->krn_dim, pool_layer->krn_str, pool_layer->krn_pad);
break;
}
case LAYER_TYPE_IP: {
ip_layer_t *ip_layer = (ip_layer_t *) layer;
// Read layer l_shift, r_shift
read_data(&fp, &ip_layer->l_shift, 4);
read_data(&fp, &ip_layer->r_shift, 4);
printf("l_shift: %lu r_shift:%lu ",
ip_layer->l_shift, ip_layer->r_shift);
// Alloc and read weights array
read_data(&fp, &ip_layer->w_size, 4);
printf("weights: %lu ", ip_layer->w_size);
ip_layer->w = xalloc(ip_layer->w_size);
read_data(&fp, ip_layer->w, ip_layer->w_size);
// Alloc and read bias array
read_data(&fp, &ip_layer->b_size, 4);
printf("bias %lu\n", ip_layer->b_size);
ip_layer->b = xalloc(ip_layer->b_size);
read_data(&fp, ip_layer->b, ip_layer->b_size);
break;
}
}
}
uint32_t max_layer_size = 0;
uint32_t max_colbuf_size = 0;
uint32_t max_scrbuf_size = 0;
layer_t *layer = net->layers;
while (layer != NULL) {
// First layer is DATA will be skipped, so prev_layer *should* not be NULL.
prev_layer = layer->prev;
if (layer->type == LAYER_TYPE_IP) {
uint32_t fc_buffer_size = 2 * layer->c;
max_colbuf_size = IM_MAX(max_colbuf_size, fc_buffer_size);
}
if (layer->type == LAYER_TYPE_CONV) {
conv_layer_t *conv_layer = (conv_layer_t *) layer;
uint32_t im2col_buffer_size = 2 * 2 * layer->c * conv_layer->krn_dim * conv_layer->krn_dim;
max_colbuf_size = IM_MAX(max_colbuf_size, im2col_buffer_size);
}
if (layer->type == LAYER_TYPE_IP) {
uint32_t buffer_size = layer->c;
if (prev_layer->type == LAYER_TYPE_IP) {
buffer_size = buffer_size + prev_layer->c;
} else if (prev_layer->type == LAYER_TYPE_CONV || prev_layer->type == LAYER_TYPE_POOL) {
buffer_size = buffer_size + prev_layer->c * prev_layer->h * prev_layer->w;
}
max_scrbuf_size = IM_MAX(max_scrbuf_size, buffer_size);
}
if (layer->type == LAYER_TYPE_CONV || layer->type == LAYER_TYPE_POOL) {
uint32_t buffer_size = layer->c * layer->h * layer->w + prev_layer->c * prev_layer->h * prev_layer->w;
max_scrbuf_size = IM_MAX(max_scrbuf_size, buffer_size);
}
uint32_t layer_size = layer->c * layer->h * layer->w;
max_layer_size = IM_MAX(max_layer_size, layer_size);
layer = layer->next;
}
net->max_layer_size = max_layer_size;
net->max_colbuf_size = max_colbuf_size;
net->max_scrbuf_size = max_scrbuf_size;
printf("max layer size: %lu max col buf size: %lu max scratch buf size: %lu\n",
max_layer_size, max_colbuf_size, max_scrbuf_size);
error:
file_buffer_off(&fp);
file_close(&fp);
return res;
}
int nn_run_network(nn_t *net, image_t *image)
{
return 0;
}
#endif //IMLIB_ENABLE_CNN

88
src/omv/nn/nn.h Normal file
View File

@ -0,0 +1,88 @@
/* This file is part of the OpenMV project.
* Copyright (c) 2013-2017 Ibrahim Abdelkader <iabdalkader@openmv.io> & Kwabena W. Agyeman <kwagyeman@openmv.io>
* This work is licensed under the MIT license, see the file LICENSE for details.
*
* CNN code.
*
*/
#ifndef __NN_H__
#define __NN_H__
#include <stdint.h>
#include <imlib.h>
typedef enum {
LAYER_TYPE_DATA = 0,
LAYER_TYPE_CONV,
LAYER_TYPE_RELU,
LAYER_TYPE_POOL,
LAYER_TYPE_IP,
} layer_type_t;
typedef enum {
POOL_TYPE_MAX,
POOL_TYPE_AVE,
} pool_type_t;
typedef enum {
NETWORK_TYPE_CAFFE = 0,
} network_type_t;
typedef struct _layer {
layer_type_t type;
uint32_t c, h, w;
struct _layer *prev;
struct _layer *next;
} layer_t;
typedef struct {
layer_t base;
uint32_t r_mean;
uint32_t g_mean;
uint32_t b_mean;
} data_layer_t;
typedef struct {
layer_t base;
uint32_t l_shift;
uint32_t r_shift;
uint32_t krn_dim;
uint32_t krn_str;
uint32_t krn_pad;
uint32_t w_size;
uint32_t b_size;
int8_t *w, *b;
} conv_layer_t;
typedef struct {
layer_t base;
} relu_layer_t;
typedef struct {
layer_t base;
pool_type_t type;
uint32_t krn_dim;
uint32_t krn_str;
uint32_t krn_pad;
} pool_layer_t;
typedef struct {
layer_t base;
uint32_t l_shift;
uint32_t r_shift;
uint32_t w_size;
uint32_t b_size;
int8_t *w, *b;
} ip_layer_t;
typedef struct {
uint8_t type[4];
uint32_t n_layers;
uint32_t max_layer_size;
uint32_t max_colbuf_size;
uint32_t max_scrbuf_size;
layer_t *layers;
} nn_t;
int nn_dump_network(nn_t *net);
int nn_load_network(nn_t *net, const char *path);
int nn_run_network(nn_t *net, image_t *image);
#endif //#define __CNN_H__

84
src/omv/py/py_nn.c Normal file
View File

@ -0,0 +1,84 @@
/*
* This file is part of the OpenMV project.
* Copyright (c) 2013/2014 Ibrahim Abdelkader <i.abdalkader@gmail.com>
* This work is licensed under the MIT license, see the file LICENSE for details.
*
* NN module.
*
*/
#include "mp.h"
#include "nn.h"
#include "imlib.h"
#include "xalloc.h"
#include "py_image.h"
#include "py_helper.h"
#include "py_assert.h"
#include "omv_boardconfig.h"
static const mp_obj_type_t py_net_type;
typedef struct _py_net_obj_t {
mp_obj_base_t base;
nn_t _cobj;
} py_net_obj_t;
void *py_net_cobj(mp_obj_t net)
{
PY_ASSERT_TYPE(net, &py_net_type);
return &((py_net_obj_t *)net)->_cobj;
}
STATIC mp_obj_t py_net_forward(uint n_args, const mp_obj_t *args, mp_map_t *kw_args)
{
nn_t *net = py_net_cobj(args[0]);
image_t *img = py_helper_arg_to_image_mutable(args[1]);
nn_run_network(net, img);
return mp_const_none;
}
STATIC void py_net_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind)
{
py_net_obj_t *self = self_in;
nn_dump_network(py_net_cobj(self));
}
STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_net_forward_obj, 2, py_net_forward);
static const mp_map_elem_t locals_dict_table[] = {
{ MP_OBJ_NEW_QSTR(MP_QSTR_forward), (mp_obj_t)&py_net_forward_obj},
{ NULL, NULL },
};
STATIC MP_DEFINE_CONST_DICT(locals_dict, locals_dict_table);
static const mp_obj_type_t py_net_type = {
{ &mp_type_type },
.name = MP_QSTR_Net,
.print = py_net_print,
.locals_dict = (mp_obj_t)&locals_dict,
};
static mp_obj_t py_nn_load(mp_obj_t path_obj)
{
py_net_obj_t *net = NULL;
const char *path = mp_obj_str_get_str(path_obj);
net = m_new_obj(py_net_obj_t);
net->base.type = &py_net_type;
nn_load_network(py_net_cobj(net), path);
return net;
}
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_nn_load_obj, py_nn_load);
static const mp_map_elem_t globals_dict_table[] = {
{ MP_OBJ_NEW_QSTR(MP_QSTR___name__), MP_OBJ_NEW_QSTR(MP_QSTR_nn) },
{ MP_OBJ_NEW_QSTR(MP_QSTR_load), (mp_obj_t)&py_nn_load_obj },
};
STATIC MP_DEFINE_CONST_DICT(globals_dict, globals_dict_table);
const mp_obj_module_t nn_module = {
.base = { &mp_type_module },
.globals = (mp_obj_t)&globals_dict,
};

View File

@ -133,6 +133,13 @@ Q(MT9V034)
Q(LEPTON)
Q(value)
// NN Module
Q(load)
// Net
Q(Net)
Q(forward)
// C/SIF Resolutions
Q(QQCIF)
Q(QCIF)