NN: Remove hard-coded NNs.

This commit is contained in:
iabdalkader 2018-05-25 22:50:28 +02:00
parent 280546d63e
commit d151f7e38d
9 changed files with 0 additions and 7017 deletions

View File

@ -208,10 +208,6 @@ FIRM_OBJ += $(addprefix $(BUILD)/$(OMV_DIR)/img/,\
sincos_tab.o \
edge.o \
hog.o \
lenet.o \
lenet_model_num.o \
cifar10.o \
cifar10_model.o \
)
FIRM_OBJ += $(addprefix $(BUILD)/$(OMV_DIR)/nn/,\

View File

@ -74,10 +74,6 @@ SRCS += $(addprefix img/, \
sincos_tab.c \
edge.c \
hog.c \
lenet.c \
lenet_model_num.c \
cifar10.c \
cifar10_model.c \
)
SRCS += $(addprefix nn/, \

View File

@ -1,228 +0,0 @@
/* ----------------------------------------------------------------------
* Copyright (C) 2010-2018 Arm Limited. All rights reserved.
*
* Project: CMSIS NN Library
* Description: Convolutional Neural Network Example
* Target Processor: Cortex-M4/Cortex-M7
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Arm LIMITED nor the names of its contributors
* may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* -------------------------------------------------------------------- */
/**
* Convolutional Neural Network Example
*
* Description:
* Demonstrates a convolutional neural network (CNN) example with the use of convolution,
* ReLU activation, pooling and fully-connected functions.
*
* Model definition:
* The CNN used in this example is based on CIFAR-10 example from Caffe [1]. The neural network
* consists of 3 convolution layers interspersed by ReLU activation and max pooling layers, followed
* by a fully-connected layer at the end. The input to the network is a 32x32 pixel color image,
* which will be classified into one of the 10 output classes.
* This example model implementation needs 32.3 KB to store weights, 40 KB for activations and
* 3.1 KB for storing the im2col data.
*
* Neural Network model definition:
* Variables Description:
* conv1_wt, conv2_wt, conv3_wt are convolution layer weight matrices
* conv1_bias, conv2_bias, conv3_bias are convolution layer bias arrays
* ip1_wt, ip1_bias point to fully-connected layer weights and biases
* input_data points to the input image data
* output_data points to the classification output
* col_buffer is a buffer to store the im2col output
* scratch_buffer is used to store the activation data (intermediate layer outputs)
*
* CMSIS DSP Software Library Functions Used:
* - arm_convolve_HWC_q7_RGB()
* - arm_convolve_HWC_q7_fast()
* - arm_relu_q7()
* - arm_maxpool_q7_HWC()
* - arm_avepool_q7_HWC()
* - arm_fully_connected_q7_opt()
* - arm_fully_connected_q7()
*
* [1] https://github.com/BVLC/caffe
*/
#include <stdint.h>
#include <stdio.h>
#include "imlib.h"
#include "fb_alloc.h"
#include "arm_math.h"
#include "arm_nnfunctions.h"
#ifdef IMLIB_ENABLE_CNN
#define CONV1_IM_DIM (32)
#define CONV1_IM_CH (3)
#define CONV1_KER_DIM (5)
#define CONV1_PADDING (2)
#define CONV1_STRIDE (1)
#define CONV1_OUT_CH (32)
#define CONV1_OUT_DIM (32)
#define POOL1_KER_DIM (3)
#define POOL1_STRIDE (2)
#define POOL1_PADDING (0)
#define POOL1_OUT_DIM (16)
#define CONV2_IM_DIM (16)
#define CONV2_IM_CH (32)
#define CONV2_KER_DIM (5)
#define CONV2_PADDING (2)
#define CONV2_STRIDE (1)
#define CONV2_OUT_CH (16)
#define CONV2_OUT_DIM (16)
#define POOL2_KER_DIM (3)
#define POOL2_STRIDE (2)
#define POOL2_PADDING (0)
#define POOL2_OUT_DIM (8)
#define CONV3_IM_DIM (8)
#define CONV3_IM_CH (16)
#define CONV3_KER_DIM (5)
#define CONV3_PADDING (2)
#define CONV3_STRIDE (1)
#define CONV3_OUT_CH (32)
#define CONV3_OUT_DIM (8)
#define POOL3_KER_DIM (3)
#define POOL3_STRIDE (2)
#define POOL3_PADDING (0)
#define POOL3_OUT_DIM (4)
#define IP1_DIM (4*4*32)
#define IP1_IM_DIM (4)
#define IP1_IM_CH (32)
#define IP1_OUT (10)
#define CONV1_BIAS_LSHIFT (0)
#define CONV1_OUT_RSHIFT (11)
#define CONV2_BIAS_LSHIFT (0)
#define CONV2_OUT_RSHIFT (8)
#define CONV3_BIAS_LSHIFT (0)
#define CONV3_OUT_RSHIFT (8)
#define IP1_BIAS_LSHIFT (5)
#define IP1_OUT_RSHIFT (7)
// include the input and weights
extern const q7_t cifar10_mean_image[CONV1_IM_CH * CONV1_IM_DIM * CONV1_IM_DIM];
extern const q7_t cifar10_conv1_wt[CONV1_IM_CH * CONV1_KER_DIM * CONV1_KER_DIM * CONV1_OUT_CH];
extern const q7_t cifar10_conv1_bias[CONV1_OUT_CH];
extern const q7_t cifar10_conv2_wt[CONV2_IM_CH * CONV2_KER_DIM * CONV2_KER_DIM * CONV2_OUT_CH];
extern const q7_t cifar10_conv2_bias[CONV2_OUT_CH];
extern const q7_t cifar10_conv3_wt[CONV3_IM_CH * CONV3_KER_DIM * CONV3_KER_DIM * CONV3_OUT_CH];
extern const q7_t cifar10_conv3_bias[CONV3_OUT_CH];
extern const q7_t cifar10_ip1_wt[IP1_DIM * IP1_OUT];
extern const q7_t cifar10_ip1_bias[IP1_OUT];
int imlib_classify_object(image_t *img, int8_t *output_data)
{
//TODO: Load from file.
const q7_t *mean_image = cifar10_mean_image;
const q7_t *conv1_wt = cifar10_conv1_wt;
const q7_t *conv1_bias = cifar10_conv1_bias;
const q7_t *conv2_wt = cifar10_conv2_wt;
const q7_t *conv2_bias = cifar10_conv2_bias;
const q7_t *conv3_wt = cifar10_conv3_wt;
const q7_t *conv3_bias = cifar10_conv3_bias;
const q7_t *ip1_wt = cifar10_ip1_wt;
const q7_t *ip1_bias = cifar10_ip1_bias;
q7_t *input_data = fb_alloc0(CONV1_IM_CH * CONV1_IM_DIM * CONV1_IM_DIM);
q7_t *col_buffer = fb_alloc0(2 * 5 * 5 * 32 * 2);
q7_t *img_buffer1 = fb_alloc0(32 * 32 * 10 * 4);
q7_t *img_buffer2 = img_buffer1 + 32 * 32 * 32;
// Scale, convert, remove mean image and load input data.
int x_ratio = (int)((img->w<<16)/32)+1;
int y_ratio = (int)((img->h<<16)/32)+1;
for (int y=0, i=0; y<32; y++) {
int sy = (y*y_ratio)>>16;
for (int x=0; x<32; x++, i+=3) {
int sx = (x*x_ratio)>>16;
uint16_t p = IM_GET_RGB565_PIXEL(img, sx, sy);
input_data[i+0] = (int8_t) (((int) COLOR_RGB565_TO_R8(p)) - (int) mean_image[i+0]);
input_data[i+1] = (int8_t) (((int) COLOR_RGB565_TO_G8(p)) - (int) mean_image[i+1]);
input_data[i+2] = (int8_t) (((int) COLOR_RGB565_TO_B8(p)) - (int) mean_image[i+2]);
}
}
// conv1 input_data -> img_buffer1
arm_convolve_HWC_q7_RGB(input_data, CONV1_IM_DIM, CONV1_IM_CH, conv1_wt, CONV1_OUT_CH, CONV1_KER_DIM, CONV1_PADDING,
CONV1_STRIDE, conv1_bias, CONV1_BIAS_LSHIFT, CONV1_OUT_RSHIFT, img_buffer1, CONV1_OUT_DIM,
(q15_t *) col_buffer, NULL);
// pool1 img_buffer1 -> img_buffer2
arm_maxpool_q7_HWC(img_buffer1, CONV1_OUT_DIM, CONV1_OUT_CH, POOL1_KER_DIM,
POOL1_PADDING, POOL1_STRIDE, POOL1_OUT_DIM, NULL, img_buffer2);
arm_relu_q7(img_buffer2, CONV2_IM_DIM * CONV2_IM_DIM * CONV2_IM_CH);
// conv2 img_buffer2 -> img_buffer1
arm_convolve_HWC_q7_fast(img_buffer2, CONV2_IM_DIM, CONV2_IM_CH, conv2_wt, CONV2_OUT_CH, CONV2_KER_DIM,
CONV2_PADDING, CONV2_STRIDE, conv2_bias, CONV2_BIAS_LSHIFT, CONV2_OUT_RSHIFT, img_buffer1,
CONV2_OUT_DIM, (q15_t *) col_buffer, NULL);
arm_relu_q7(img_buffer1, CONV2_OUT_DIM * CONV2_OUT_DIM * CONV2_OUT_CH);
// pool2 img_buffer1 -> img_buffer2
arm_avepool_q7_HWC(img_buffer1, CONV2_OUT_DIM, CONV2_OUT_CH, POOL2_KER_DIM,
POOL2_PADDING, POOL2_STRIDE, POOL2_OUT_DIM, col_buffer, img_buffer2);
// conv3 img_buffer2 -> img_buffer1
arm_convolve_HWC_q7_fast(img_buffer2, CONV3_IM_DIM, CONV3_IM_CH, conv3_wt, CONV3_OUT_CH, CONV3_KER_DIM,
CONV3_PADDING, CONV3_STRIDE, conv3_bias, CONV3_BIAS_LSHIFT, CONV3_OUT_RSHIFT, img_buffer1,
CONV3_OUT_DIM, (q15_t *) col_buffer, NULL);
arm_relu_q7(img_buffer1, CONV3_OUT_DIM * CONV3_OUT_DIM * CONV3_OUT_CH);
// pool3 img_buffer-> img_buffer2
arm_avepool_q7_HWC(img_buffer1, CONV3_OUT_DIM, CONV3_OUT_CH, POOL3_KER_DIM,
POOL3_PADDING, POOL3_STRIDE, POOL3_OUT_DIM, col_buffer, img_buffer2);
#if 1
arm_fully_connected_q7_opt(img_buffer2, ip1_wt, IP1_DIM, IP1_OUT, IP1_BIAS_LSHIFT, IP1_OUT_RSHIFT, ip1_bias,
output_data, (q15_t *) img_buffer1);
#else
arm_fully_connected_q7(img_buffer2, ip1_wt, IP1_DIM, IP1_OUT, IP1_BIAS_LSHIFT, IP1_OUT_RSHIFT, ip1_bias,
output_data, (q15_t *) img_buffer1);
#endif
arm_softmax_q7(output_data, 10, output_data);
fb_free_all();
return 0;
}
#endif //IMLIB_ENABLE_CNN

File diff suppressed because one or more lines are too long

View File

@ -1362,57 +1362,4 @@ void imlib_find_barcodes(list_t *out, image_t *ptr, rectangle_t *roi);
void imlib_phasecorrelate(image_t *img0, image_t *img1, rectangle_t *roi0, rectangle_t *roi1, bool logpolar, bool fix_rotation_scale,
float *x_translation, float *y_translation, float *rotation, float *scale, float *response);
// LeNet (CNN for character recognition)
#define LENGTH_KERNEL 5
#define LENGTH_FEATURE0 32
#define LENGTH_FEATURE1 (LENGTH_FEATURE0 - LENGTH_KERNEL + 1)
#define LENGTH_FEATURE2 (LENGTH_FEATURE1 >> 1)
#define LENGTH_FEATURE3 (LENGTH_FEATURE2 - LENGTH_KERNEL + 1)
#define LENGTH_FEATURE4 (LENGTH_FEATURE3 >> 1)
#define LENGTH_FEATURE5 (LENGTH_FEATURE4 - LENGTH_KERNEL + 1)
#define INPUT 1
#define LAYER1 6
#define LAYER2 6
#define LAYER3 16
#define LAYER4 16
#define LAYER5 120
#define LENET_INPUT_W (28)
#define LENET_INPUT_H (28)
#define LENET_OUTPUT_SIZE (10)
#define LENET_PADDING_SIZE (2)
#define LENET_MODEL_SIZE (51902)
typedef struct lenet5 {
float weight0_1[INPUT][LAYER1][LENGTH_KERNEL][LENGTH_KERNEL];
float weight2_3[LAYER2][LAYER3][LENGTH_KERNEL][LENGTH_KERNEL];
float weight4_5[LAYER4][LAYER5][LENGTH_KERNEL][LENGTH_KERNEL];
float weight5_6[LAYER5 * LENGTH_FEATURE5 * LENGTH_FEATURE5][LENET_OUTPUT_SIZE];
float bias0_1[LAYER1];
float bias2_3[LAYER3];
float bias4_5[LAYER5];
float bias5_6[LENET_OUTPUT_SIZE];
} lenet5_t;
typedef struct lenet5_feature {
float input[INPUT][LENGTH_FEATURE0][LENGTH_FEATURE0];
float layer1[LAYER1][LENGTH_FEATURE1][LENGTH_FEATURE1];
float layer2[LAYER2][LENGTH_FEATURE2][LENGTH_FEATURE2];
float layer3[LAYER3][LENGTH_FEATURE3][LENGTH_FEATURE3];
float layer4[LAYER4][LENGTH_FEATURE4][LENGTH_FEATURE4];
float layer5[LAYER5][LENGTH_FEATURE5][LENGTH_FEATURE5];
float output[LENET_OUTPUT_SIZE];
} lenet5_feature_t;
// LeNet pretrained models.
extern const float lenet_model_num[LENET_MODEL_SIZE];
uint8_t lenet_predict(lenet5_t *lenet, image_t *src, rectangle_t *roi, float *conf);
// CMSIS CNN
int imlib_classify_object(image_t *img, int8_t *output_data);
#endif //__IMLIB_H__

View File

@ -1,150 +0,0 @@
/* This file is part of the OpenMV project.
* Copyright (c) 2013-2017 Ibrahim Abdelkader <iabdalkader@openmv.io> & Kwabena W. Agyeman <kwagyeman@openmv.io>
* This work is licensed under the MIT license, see the file LICENSE for details.
*/
/*
*
* 2016-04-20
* Yann Lecun的论文Gradient-based Learning Applied To Document Recognition
*/
#include "imlib.h"
#include "fb_alloc.h"
#ifdef IMLIB_ENABLE_LENET
#define GETLENGTH(array) (sizeof(array)/sizeof(*(array)))
#define GETCOUNT(array) (sizeof(array)/sizeof(float))
#define FOREACH(i,count) for (int i = 0; i < count; ++i)
#define CONVOLUTE_VALID(input,output,weight) \
{ \
FOREACH(o0,GETLENGTH(output)) \
FOREACH(o1,GETLENGTH(*(output))) \
FOREACH(w0,GETLENGTH(weight)) \
FOREACH(w1,GETLENGTH(*(weight))) \
(output)[o0][o1] += (input)[o0 + w0][o1 + w1] * (weight)[w0][w1]; \
}
#define CONVOLUTION_FORWARD(input,output,weight,bias,action) \
{ \
for (int x = 0; x < GETLENGTH(weight); ++x) \
for (int y = 0; y < GETLENGTH(*weight); ++y) \
CONVOLUTE_VALID(input[x], output[y], weight[x][y]); \
FOREACH(j, GETLENGTH(output)) \
FOREACH(i, GETCOUNT(output[j])) \
((float *)output[j])[i] = action(((float *)output[j])[i] + bias[j]); \
}
#define SUBSAMP_MAX_FORWARD(input,output) \
{ \
const int len0 = GETLENGTH(*(input)) / GETLENGTH(*(output)); \
const int len1 = GETLENGTH(**(input)) / GETLENGTH(**(output)); \
FOREACH(i, GETLENGTH(output)) \
FOREACH(o0, GETLENGTH(*(output))) \
FOREACH(o1, GETLENGTH(**(output))) \
{ \
int x0 = 0, x1 = 0, ismax; \
FOREACH(l0, len0) \
FOREACH(l1, len1) \
{ \
ismax = input[i][o0*len0 + l0][o1*len1 + l1] > input[i][o0*len0 + x0][o1*len1 + x1];\
x0 += ismax * (l0 - x0); \
x1 += ismax * (l1 - x1); \
} \
output[i][o0][o1] = input[i][o0*len0 + x0][o1*len1 + x1]; \
} \
}
#define DOT_PRODUCT_FORWARD(input,output,weight,bias,action) \
{ \
for (int x = 0; x < GETLENGTH(weight); ++x) \
for (int y = 0; y < GETLENGTH(*weight); ++y) \
((float *)output)[y] += ((float *)input)[x] * weight[x][y]; \
FOREACH(j, GETLENGTH(bias)) \
((float *)output)[j] = action(((float *)output)[j] + bias[j]); \
}
float relu(float x)
{
return x*(x > 0);
}
float relugrad(float y)
{
return y > 0;
}
static void forward(lenet5_t *lenet, lenet5_feature_t *features, float(*action)(float))
{
CONVOLUTION_FORWARD(features->input, features->layer1, lenet->weight0_1, lenet->bias0_1, action);
SUBSAMP_MAX_FORWARD(features->layer1, features->layer2);
CONVOLUTION_FORWARD(features->layer2, features->layer3, lenet->weight2_3, lenet->bias2_3, action);
SUBSAMP_MAX_FORWARD(features->layer3, features->layer4);
CONVOLUTION_FORWARD(features->layer4, features->layer5, lenet->weight4_5, lenet->bias4_5, action);
DOT_PRODUCT_FORWARD(features->layer5, features->output, lenet->weight5_6, lenet->bias5_6, action);
}
static inline void load_input(lenet5_feature_t *features, image_t *src, rectangle_t *r)
{
float mean = 0, std = 0;
float (*layer0)[LENGTH_FEATURE0][LENGTH_FEATURE0] = features->input;
for (int k=r->y; k<r->y+r->h; k++) {
for (int j=r->x; j<r->x+r->w; j++) {
uint8_t p = imlib_get_pixel(src, j, k);
mean += p;
std += p * p;
}
}
mean /= (r->w*r->h);
std = fast_sqrtf(std / (r->w*r->h) - mean*mean);
for (int k=r->y; k<r->y+r->h; k++) {
for (int j=r->x; j<r->x+r->w; j++) {
layer0[0][(k-r->y) + LENET_PADDING_SIZE][(j-r->x)+ LENET_PADDING_SIZE] = (imlib_get_pixel(src, j, k) - mean) / std;
}
}
}
static inline void softmax(float *input, float *loss, int label, int count)
{
float inner = 0;
for (int i = 0; i < count; ++i)
{
float res = 0;
for (int j = 0; j < count; ++j)
{
res += expf(input[j] - input[i]);
}
loss[i] = 1. / res;
inner -= loss[i] * loss[i];
}
inner += loss[label];
for (int i = 0; i < count; ++i)
{
loss[i] *= (i == label) - loss[i] - inner;
}
}
uint8_t lenet_predict(lenet5_t *lenet, image_t *src, rectangle_t *roi, float *conf)
{
lenet5_feature_t *features = fb_alloc0(sizeof(*features));
load_input(features, src, roi);
forward(lenet, features, relu);
uint8_t result = 0;
float *output = (float *)features->output;
float maxvalue = *output;
for (uint8_t i=1; i < LENET_OUTPUT_SIZE; ++i) {
if (output[i] > maxvalue) {
maxvalue = output[i];
result = i;
}
}
*conf = output[result];
fb_free();
return result;
}
#endif //IMLIB_ENABLE_LENET

File diff suppressed because it is too large Load Diff

View File

@ -4980,54 +4980,6 @@ static mp_obj_t py_image_find_displacement(uint n_args, const mp_obj_t *args, mp
STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_image_find_displacement_obj, 2, py_image_find_displacement);
#endif // IMLIB_ENABLE_FIND_DISPLACEMENT
#ifdef IMLIB_ENABLE_LENET
static mp_obj_t py_image_find_number(uint n_args, const mp_obj_t *args, mp_map_t *kw_args)
{
image_t *arg_img = py_helper_arg_to_image_grayscale(args[0]);
rectangle_t roi;
py_helper_keyword_rectangle_roi(arg_img, n_args, args, 1, kw_args, &roi);
// Make sure ROI is bigger than or equal to template size
PY_ASSERT_TRUE_MSG((roi.w == LENET_INPUT_W && roi.h == LENET_INPUT_H),
"Region of interest must be 28x28!");
int r = 0;
float c = 0.0f;
lenet5_t *lenet = (lenet5_t*) lenet_model_num;
r = lenet_predict(lenet, arg_img, &roi, &c);
mp_obj_t ret_obj[2] = {
mp_obj_new_int(r),
mp_obj_new_float(c),
};
return mp_obj_new_tuple(2, ret_obj);
}
STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_image_find_number_obj, 1, py_image_find_number);
#endif //IMLIB_ENABLE_LENET
#ifdef IMLIB_ENABLE_CNN
static mp_obj_t py_image_classify_object(uint n_args, const mp_obj_t *args, mp_map_t *kw_args)
{
image_t *arg_img = py_helper_arg_to_image_color(args[0]);
rectangle_t roi;
py_helper_keyword_rectangle_roi(arg_img, n_args, args, 1, kw_args, &roi);
int8_t output_data[10];
imlib_classify_object(arg_img, output_data);
mp_obj_t output_list = mp_obj_new_list(0, NULL);
for (int i=0; i<10; i++) {
mp_obj_list_append(output_list, mp_obj_new_int(output_data[i]));
}
return output_list;
}
STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_image_classify_object_obj, 1, py_image_classify_object);
#endif //IMLIB_ENABLE_CNN
static mp_obj_t py_image_find_template(uint n_args, const mp_obj_t *args, mp_map_t *kw_args)
{
image_t *arg_img = py_helper_arg_to_image_grayscale(args[0]);
@ -5437,16 +5389,6 @@ static const mp_rom_map_elem_t locals_dict_table[] = {
{MP_ROM_QSTR(MP_QSTR_find_displacement), MP_ROM_PTR(&py_image_find_displacement_obj)},
#else
{MP_ROM_QSTR(MP_QSTR_find_displacement), MP_ROM_PTR(&py_image_unavailable_obj)},
#endif
#ifdef IMLIB_ENABLE_LENET
{MP_ROM_QSTR(MP_QSTR_find_number), MP_ROM_PTR(&py_image_find_number_obj)},
#else
{MP_ROM_QSTR(MP_QSTR_find_number), MP_ROM_PTR(&py_image_unavailable_obj)},
#endif
#ifdef IMLIB_ENABLE_CNN
{MP_ROM_QSTR(MP_QSTR_classify_object), MP_ROM_PTR(&py_image_classify_object_obj)},
#else
{MP_ROM_QSTR(MP_QSTR_classify_object), MP_ROM_PTR(&py_image_unavailable_obj)},
#endif
{MP_ROM_QSTR(MP_QSTR_find_template), MP_ROM_PTR(&py_image_find_template_obj)},
{MP_ROM_QSTR(MP_QSTR_find_features), MP_ROM_PTR(&py_image_find_features_obj)},

View File

@ -958,12 +958,6 @@ Q(displacement)
// duplicate Q(scale)
Q(response)
// LENET
Q(find_number)
// CMSIS CNN
Q(classify_object)
// Image Writer
Q(ImageWriter)
// Image Writer Object