mirror of
https://github.com/openmv/openmv.git
synced 2025-11-04 14:49:50 +08:00
Merge pull request #355 from kwagyeman/nn_upgrade
NN: Clean up and support sliding window.
This commit is contained in:
commit
59eabdf13d
@ -0,0 +1,53 @@
|
||||
# CIFAR-10 Search Just Center Example
|
||||
#
|
||||
# CIFAR is a convolutional nueral network designed to classify it's field of view into several
|
||||
# different object types and works on RGB video data.
|
||||
#
|
||||
# In this example we slide the LeNet detector window over the image and get a list of activations
|
||||
# where there might be an object. Note that use a CNN with a sliding window is extremely compute
|
||||
# expensive so for an exhaustive search do not expect the CNN to be real-time.
|
||||
|
||||
import sensor, image, time, os, nn
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||
sensor.set_windowing((128, 128)) # Set 128x128 window.
|
||||
sensor.skip_frames(time=750) # Don't let autogain run every long.
|
||||
sensor.set_auto_gain(False) # Turn off autogain.
|
||||
sensor.set_auto_exposure(False) # Turn off whitebalance.
|
||||
|
||||
# Load cifar10 network (You can get the network from OpenMV IDE).
|
||||
net = nn.load('/cifar10.network')
|
||||
# Faster, smaller and less accurate.
|
||||
# net = nn.load('/cifar10_fast.network')
|
||||
labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
|
||||
|
||||
clock = time.clock()
|
||||
while(True):
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot()
|
||||
|
||||
# net.search() will search an roi in the image for the network (or the whole image if the roi is not
|
||||
# specified). At each location to look in the image if one of the classifier outputs is larger than
|
||||
# threshold the location and label will be stored in an object list and returned. At each scale the
|
||||
# detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide.
|
||||
# If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note
|
||||
# the computational work load goes WAY up the more overlap. Finally, for mult-scale matching after
|
||||
# sliding the network around in the x/y dimensions the detection window will shrink by scale_mul (0-1)
|
||||
# down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%.
|
||||
# Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small...
|
||||
# contrast_threshold skips running the CNN in areas that are flat.
|
||||
|
||||
# Setting x_overlap=-1 forces the window to stay centered in the ROI in the x direction always. If
|
||||
# y_overlap is not -1 the method will search in all vertical positions.
|
||||
|
||||
# Setting y_overlap=-1 forces the window to stay centered in the ROI in the y direction always. If
|
||||
# x_overlap is not -1 the method will serach in all horizontal positions.
|
||||
|
||||
for obj in net.search(img, threshold=0.6, min_scale=0.4, scale_mul=0.8, \
|
||||
x_overlap=-1, y_overlap=-1, contrast_threshold=0.5):
|
||||
print("Detected %s - Confidence %f%%" % (labels[obj.index()], obj.value()))
|
||||
img.draw_rectangle(obj.rect(), color=(255, 0, 0))
|
||||
print(clock.fps())
|
||||
@ -0,0 +1,47 @@
|
||||
# CIFAR-10 Search Whole Window Example
|
||||
#
|
||||
# CIFAR is a convolutional nueral network designed to classify it's field of view into several
|
||||
# different object types and works on RGB video data.
|
||||
#
|
||||
# In this example we slide the LeNet detector window over the image and get a list of activations
|
||||
# where there might be an object. Note that use a CNN with a sliding window is extremely compute
|
||||
# expensive so for an exhaustive search do not expect the CNN to be real-time.
|
||||
|
||||
import sensor, image, time, os, nn
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||
sensor.set_windowing((128, 128)) # Set 128x128 window.
|
||||
sensor.skip_frames(time=750) # Don't let autogain run every long.
|
||||
sensor.set_auto_gain(False) # Turn off autogain.
|
||||
sensor.set_auto_exposure(False) # Turn off whitebalance.
|
||||
|
||||
# Load cifar10 network (You can get the network from OpenMV IDE).
|
||||
net = nn.load('/cifar10.network')
|
||||
# Faster, smaller and less accurate.
|
||||
# net = nn.load('/cifar10_fast.network')
|
||||
labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
|
||||
|
||||
clock = time.clock()
|
||||
while(True):
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot()
|
||||
|
||||
# net.search() will search an roi in the image for the network (or the whole image if the roi is not
|
||||
# specified). At each location to look in the image if one of the classifier outputs is larger than
|
||||
# threshold the location and label will be stored in an object list and returned. At each scale the
|
||||
# detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide.
|
||||
# If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note
|
||||
# the computational work load goes WAY up the more overlap. Finally, for mult-scale matching after
|
||||
# sliding the network around in the x/y dimensions the detection window will shrink by scale_mul (0-1)
|
||||
# down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%.
|
||||
# Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small...
|
||||
# contrast_threshold skips running the CNN in areas that are flat.
|
||||
|
||||
for obj in net.search(img, threshold=0.6, min_scale=0.5, scale_mul=0.5, \
|
||||
x_overlap=0.5, y_overlap=0.5, contrast_threshold=0.5):
|
||||
print("Detected %s - Confidence %f%%" % (labels[obj.index()], obj.value()))
|
||||
img.draw_rectangle(obj.rect(), color=(255, 0, 0))
|
||||
print(clock.fps())
|
||||
@ -0,0 +1,51 @@
|
||||
# LeNet Search Just Center Example
|
||||
#
|
||||
# LeNet is a convolutional nueral network designed to classify it's field of view into digits 0-9.
|
||||
#
|
||||
# In this example we slide the LeNet detector window over the image and get a list of activations
|
||||
# where there might be an object. Note that use a CNN with a sliding window is extremely compute
|
||||
# expensive so for an exhaustive search do not expect the CNN to be real-time.
|
||||
|
||||
import sensor, image, time, os, nn
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||
sensor.set_windowing((128, 128)) # Set 128x128 window.
|
||||
sensor.skip_frames(time=500) # Don't let autogain run every long.
|
||||
sensor.set_auto_gain(False) # Turn off autogain.
|
||||
sensor.set_auto_exposure(False) # Turn off whitebalance.
|
||||
|
||||
# Load lenet network (You can get the network from OpenMV IDE).
|
||||
net = nn.load('/lenet.network')
|
||||
labels = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
|
||||
|
||||
clock = time.clock()
|
||||
while(True):
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot()
|
||||
tmp_img = img.copy().binary([(150, 255)], invert=True)
|
||||
|
||||
# net.search() will search an roi in the image for the network (or the whole image if the roi is not
|
||||
# specified). At each location to look in the image if one of the classifier outputs is larger than
|
||||
# threshold the location and label will be stored in an object list and returned. At each scale the
|
||||
# detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide.
|
||||
# If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note
|
||||
# the computational work load goes WAY up the more overlap. Finally, for mult-scale matching after
|
||||
# sliding the network around in the x/y dimensions the detection window will shrink by scale_mul (0-1)
|
||||
# down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%.
|
||||
# Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small...
|
||||
# contrast_threshold skips running the CNN in areas that are flat.
|
||||
|
||||
# Setting x_overlap=-1 forces the window to stay centered in the ROI in the x direction always. If
|
||||
# y_overlap is not -1 the method will search in all vertical positions.
|
||||
|
||||
# Setting y_overlap=-1 forces the window to stay centered in the ROI in the y direction always. If
|
||||
# x_overlap is not -1 the method will serach in all horizontal positions.
|
||||
|
||||
for obj in net.search(tmp_img, threshold=0.8, min_scale=0.4, scale_mul=0.8, \
|
||||
x_overlap=-1, y_overlap=-1, contrast_threshold=0.5):
|
||||
print("Detected %s - Confidence %f%%" % (labels[obj.index()], obj.value()))
|
||||
img.draw_rectangle(obj.rect())
|
||||
print(clock.fps())
|
||||
@ -0,0 +1,45 @@
|
||||
# LeNet Search Whole Window Example
|
||||
#
|
||||
# LeNet is a convolutional nueral network designed to classify it's field of view into digits 0-9.
|
||||
#
|
||||
# In this example we slide the LeNet detector window over the image and get a list of activations
|
||||
# where there might be an object. Note that use a CNN with a sliding window is extremely compute
|
||||
# expensive so for an exhaustive search do not expect the CNN to be real-time.
|
||||
|
||||
import sensor, image, time, os, nn
|
||||
|
||||
sensor.reset() # Reset and initialize the sensor.
|
||||
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE)
|
||||
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
|
||||
sensor.set_windowing((128, 128)) # Set 128x128 window.
|
||||
sensor.skip_frames(time=500) # Don't let autogain run every long.
|
||||
sensor.set_auto_gain(False) # Turn off autogain.
|
||||
sensor.set_auto_exposure(False) # Turn off whitebalance.
|
||||
|
||||
# Load lenet network (You can get the network from OpenMV IDE).
|
||||
net = nn.load('/lenet.network')
|
||||
labels = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
|
||||
|
||||
clock = time.clock()
|
||||
while(True):
|
||||
clock.tick()
|
||||
|
||||
img = sensor.snapshot()
|
||||
tmp_img = img.copy().binary([(150, 255)], invert=True)
|
||||
|
||||
# net.search() will search an roi in the image for the network (or the whole image if the roi is not
|
||||
# specified). At each location to look in the image if one of the classifier outputs is larger than
|
||||
# threshold the location and label will be stored in an object list and returned. At each scale the
|
||||
# detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide.
|
||||
# If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note
|
||||
# the computational work load goes WAY up the more overlap. Finally, for mult-scale matching after
|
||||
# sliding the network around in the x/y dimensions the detection window will shrink by scale_mul (0-1)
|
||||
# down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%.
|
||||
# Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small...
|
||||
# contrast_threshold skips running the CNN in areas that are flat.
|
||||
|
||||
for obj in net.search(tmp_img, threshold=0.9, min_scale=0.5, scale_mul=0.5, \
|
||||
x_overlap=0.5, y_overlap=0.5, contrast_threshold=0.5):
|
||||
print("Detected %s - Confidence %f%%" % (labels[obj.index()], obj.value()))
|
||||
img.draw_rectangle(obj.rect())
|
||||
print(clock.fps())
|
||||
@ -427,6 +427,69 @@ extern "C"
|
||||
q15_t * bufferA,
|
||||
q7_t * bufferB);
|
||||
|
||||
/**
|
||||
* @brief Fast Q15 convolution function (non-sqaure shape)
|
||||
* @param[in] Im_in pointer to input tensor
|
||||
* @param[in] dim_im_in_x input tensor dimention x
|
||||
* @param[in] dim_im_in_y input tensor dimention y
|
||||
* @param[in] ch_im_in number of input tensor channels
|
||||
* @param[in] wt pointer to kernel weights
|
||||
* @param[in] ch_im_out number of filters, i.e., output tensor channels
|
||||
* @param[in] dim_kernel_x filter kernel size x
|
||||
* @param[in] dim_kernel_y filter kernel size y
|
||||
* @param[in] padding_x padding size x
|
||||
* @param[in] padding_y padding size y
|
||||
* @param[in] stride_x convolution stride x
|
||||
* @param[in] stride_y convolution stride y
|
||||
* @param[in] bias pointer to bias
|
||||
* @param[in] bias_shift amount of left-shift for bias
|
||||
* @param[in] out_shift amount of right-shift for output
|
||||
* @param[in,out] Im_out pointer to output tensor
|
||||
* @param[in] dim_im_out_x output tensor dimension x
|
||||
* @param[in] dim_im_out_y output tensor dimension y
|
||||
* @param[in,out] bufferA pointer to buffer space for input
|
||||
* @param[in,out] bufferB pointer to buffer space for output
|
||||
* @return The function returns either
|
||||
* <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.
|
||||
*
|
||||
* @details
|
||||
*
|
||||
* <b>Buffer size:</b>
|
||||
*
|
||||
* bufferA size: 2*ch_im_in*dim_kernel*dim_kernel
|
||||
*
|
||||
* bufferB size: 0
|
||||
*
|
||||
* <b>Input dimension constraints:</b>
|
||||
*
|
||||
* ch_im_in is multiple of 2
|
||||
*
|
||||
* ch_im_out is multipe of 2
|
||||
*
|
||||
*/
|
||||
|
||||
arm_status
|
||||
arm_convolve_HWC_q15_fast_nonsquare(const q15_t * Im_in,
|
||||
const uint16_t dim_im_in_x,
|
||||
const uint16_t dim_im_in_y,
|
||||
const uint16_t ch_im_in,
|
||||
const q15_t * wt,
|
||||
const uint16_t ch_im_out,
|
||||
const uint16_t dim_kernel_x,
|
||||
const uint16_t dim_kernel_y,
|
||||
const uint16_t padding_x,
|
||||
const uint16_t padding_y,
|
||||
const uint16_t stride_x,
|
||||
const uint16_t stride_y,
|
||||
const q15_t * bias,
|
||||
const uint16_t bias_shift,
|
||||
const uint16_t out_shift,
|
||||
q15_t * Im_out,
|
||||
const uint16_t dim_im_out_x,
|
||||
const uint16_t dim_im_out_y,
|
||||
q15_t * bufferA,
|
||||
q7_t * bufferB);
|
||||
|
||||
/**
|
||||
* @brief Q7 depthwise separable convolution function
|
||||
* @param[in] Im_in pointer to input tensor
|
||||
|
||||
@ -80,7 +80,7 @@ void arm_nn_activations_direct_q7(q7_t * data, uint16_t size, uint16_t int_width
|
||||
while (i)
|
||||
{
|
||||
in = *pIn++;
|
||||
out = lookup_table[(uint8_t) in >> shift_size];
|
||||
out = lookup_table[(uint8_t) (in >> shift_size)];
|
||||
*pOut++ = out;
|
||||
i--;
|
||||
}
|
||||
|
||||
@ -0,0 +1,265 @@
|
||||
/*
|
||||
* Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the License); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
* Project: CMSIS NN Library
|
||||
* Title: arm_convolve_HWC_q15_fast.c
|
||||
* Description: Fast Q15 version of convolution
|
||||
*
|
||||
* $Date: 24. May 2018
|
||||
* $Revision: V.1.0.0
|
||||
*
|
||||
* Target Processor: Cortex-M cores
|
||||
*
|
||||
* -------------------------------------------------------------------- */
|
||||
|
||||
#include "arm_math.h"
|
||||
#include "arm_nnfunctions.h"
|
||||
|
||||
/**
|
||||
* @ingroup groupNN
|
||||
*/
|
||||
|
||||
/**
|
||||
* @addtogroup NNConv
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Fast Q15 convolution function (non-sqaure shape)
|
||||
* @param[in] Im_in pointer to input tensor
|
||||
* @param[in] dim_im_in_x input tensor dimention x
|
||||
* @param[in] dim_im_in_y input tensor dimention y
|
||||
* @param[in] ch_im_in number of input tensor channels
|
||||
* @param[in] wt pointer to kernel weights
|
||||
* @param[in] ch_im_out number of filters, i.e., output tensor channels
|
||||
* @param[in] dim_kernel_x filter kernel size x
|
||||
* @param[in] dim_kernel_y filter kernel size y
|
||||
* @param[in] padding_x padding size x
|
||||
* @param[in] padding_y padding size y
|
||||
* @param[in] stride_x convolution stride x
|
||||
* @param[in] stride_y convolution stride y
|
||||
* @param[in] bias pointer to bias
|
||||
* @param[in] bias_shift amount of left-shift for bias
|
||||
* @param[in] out_shift amount of right-shift for output
|
||||
* @param[in,out] Im_out pointer to output tensor
|
||||
* @param[in] dim_im_out_x output tensor dimension x
|
||||
* @param[in] dim_im_out_y output tensor dimension y
|
||||
* @param[in,out] bufferA pointer to buffer space for input
|
||||
* @param[in,out] bufferB pointer to buffer space for output
|
||||
* @return The function returns either
|
||||
* <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.
|
||||
*
|
||||
* @details
|
||||
*
|
||||
* <b>Buffer size:</b>
|
||||
*
|
||||
* bufferA size: 2*ch_im_in*dim_kernel*dim_kernel
|
||||
*
|
||||
* bufferB size: 0
|
||||
*
|
||||
* <b>Input dimension constraints:</b>
|
||||
*
|
||||
* ch_im_in is multiple of 2
|
||||
*
|
||||
* ch_im_out is multipe of 2
|
||||
*
|
||||
*/
|
||||
|
||||
arm_status
|
||||
arm_convolve_HWC_q15_fast_nonsquare(const q15_t * Im_in,
|
||||
const uint16_t dim_im_in_x,
|
||||
const uint16_t dim_im_in_y,
|
||||
const uint16_t ch_im_in,
|
||||
const q15_t * wt,
|
||||
const uint16_t ch_im_out,
|
||||
const uint16_t dim_kernel_x,
|
||||
const uint16_t dim_kernel_y,
|
||||
const uint16_t padding_x,
|
||||
const uint16_t padding_y,
|
||||
const uint16_t stride_x,
|
||||
const uint16_t stride_y,
|
||||
const q15_t * bias,
|
||||
const uint16_t bias_shift,
|
||||
const uint16_t out_shift,
|
||||
q15_t * Im_out,
|
||||
const uint16_t dim_im_out_x,
|
||||
const uint16_t dim_im_out_y,
|
||||
q15_t * bufferA,
|
||||
q7_t * bufferB)
|
||||
{
|
||||
|
||||
#if defined (ARM_MATH_DSP)
|
||||
int16_t i_out_y, i_out_x, i_ker_y, i_ker_x;
|
||||
|
||||
q15_t *pBuffer = bufferA;
|
||||
q15_t *im_buffer = bufferA;
|
||||
q15_t *pOut = Im_out;
|
||||
|
||||
if (ch_im_in % 2 != 0 || ch_im_out % 2 != 0)
|
||||
{
|
||||
/* check if the input dimension meets the constraints */
|
||||
return ARM_MATH_SIZE_MISMATCH;
|
||||
}
|
||||
|
||||
/* Run the following code for Cortex-M4 and Cortex-M7 */
|
||||
|
||||
/* This part implements the im2col function */
|
||||
for (i_out_y = 0; i_out_y < dim_im_out_y; i_out_y++)
|
||||
{
|
||||
for (i_out_x = 0; i_out_x < dim_im_out_x; i_out_x++)
|
||||
{
|
||||
for (i_ker_y = i_out_y * stride_y - padding_y; i_ker_y < i_out_y * stride_y - padding_y + dim_kernel_y; i_ker_y++)
|
||||
{
|
||||
for (i_ker_x = i_out_x * stride_x - padding_x; i_ker_x < i_out_x * stride_x - padding_x + dim_kernel_x; i_ker_x++)
|
||||
{
|
||||
if (i_ker_y < 0 || i_ker_y >= dim_im_in_y || i_ker_x < 0 || i_ker_x >= dim_im_in_x)
|
||||
{
|
||||
/* arm_fill_q15(0, pBuffer, ch_im_in); */
|
||||
memset(pBuffer, 0, sizeof(q15_t)*ch_im_in);
|
||||
} else
|
||||
{
|
||||
/* arm_copy_q15((q15_t *) Im_in + (i_ker_y * dim_im_in_x + i_ker_x) * ch_im_in, pBuffer, ch_im_in); */
|
||||
memcpy(pBuffer, (q15_t *) Im_in + (i_ker_y * dim_im_in_x + i_ker_x) * ch_im_in, sizeof(q15_t)*ch_im_in);
|
||||
}
|
||||
pBuffer += ch_im_in;
|
||||
}
|
||||
}
|
||||
|
||||
if (i_out_x & 0x1)
|
||||
{
|
||||
int i;
|
||||
/* initialize the matrix pointers for A */
|
||||
const q15_t *pA = wt;
|
||||
|
||||
/* set up the second output pointers */
|
||||
q15_t *pOut2 = pOut + ch_im_out;
|
||||
|
||||
/* this loop over rows in A */
|
||||
for (i = 0; i < ch_im_out; i += 2)
|
||||
{
|
||||
/* setup pointers for B */
|
||||
q15_t *pB = im_buffer;
|
||||
const q15_t *pB2 = pB + ch_im_in * dim_kernel_y * dim_kernel_x;
|
||||
|
||||
/* aling the second pointer for A */
|
||||
const q15_t *pA2 = pA + ch_im_in * dim_kernel_y * dim_kernel_x;
|
||||
|
||||
/* init the sum with bias */
|
||||
q31_t sum = ((q31_t)bias[i] << bias_shift) + NN_ROUND(out_shift);
|
||||
q31_t sum2 = ((q31_t)bias[i] << bias_shift) + NN_ROUND(out_shift);
|
||||
q31_t sum3 = ((q31_t)bias[i + 1] << bias_shift) + NN_ROUND(out_shift);
|
||||
q31_t sum4 = ((q31_t)bias[i + 1] << bias_shift) + NN_ROUND(out_shift);
|
||||
|
||||
uint16_t colCnt = ch_im_in * dim_kernel_y * dim_kernel_x >> 1;
|
||||
/* accumulate over the vector */
|
||||
while (colCnt)
|
||||
{
|
||||
q31_t inA1 = *__SIMD32(pA)++;
|
||||
q31_t inB1 = *__SIMD32(pB)++;
|
||||
q31_t inA2 = *__SIMD32(pA2)++;
|
||||
q31_t inB2 = *__SIMD32(pB2)++;
|
||||
|
||||
sum = __SMLAD(inA1, inB1, sum);
|
||||
sum2 = __SMLAD(inA1, inB2, sum2);
|
||||
sum3 = __SMLAD(inA2, inB1, sum3);
|
||||
sum4 = __SMLAD(inA2, inB2, sum4);
|
||||
|
||||
colCnt--;
|
||||
} /* while over colCnt */
|
||||
colCnt = ch_im_in * dim_kernel_y * dim_kernel_x & 0x1;
|
||||
while (colCnt)
|
||||
{
|
||||
q15_t inA1 = *pA++;
|
||||
q15_t inB1 = *pB++;
|
||||
q15_t inA2 = *pA2++;
|
||||
q15_t inB2 = *pB2++;
|
||||
|
||||
sum += inA1 * inB1;
|
||||
sum2 += inA1 * inB2;
|
||||
sum3 += inA2 * inB1;
|
||||
sum4 += inA2 * inB2;
|
||||
colCnt--;
|
||||
} /* while over colCnt */
|
||||
*pOut++ = (q15_t) __SSAT(sum >> out_shift, 16);
|
||||
*pOut++ = (q15_t) __SSAT(sum3 >> out_shift, 16);
|
||||
*pOut2++ = (q15_t) __SSAT(sum2 >> out_shift, 16);
|
||||
*pOut2++ = (q15_t) __SSAT(sum4 >> out_shift, 16);
|
||||
|
||||
/* skip the row computed with A2 */
|
||||
pA += ch_im_in * dim_kernel_y * dim_kernel_x;
|
||||
} /* for over ch_im_out */
|
||||
|
||||
pOut += ch_im_out;
|
||||
/* counter reset */
|
||||
pBuffer = im_buffer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
/* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */
|
||||
uint16_t i, j, k, l, m, n;
|
||||
int conv_out;
|
||||
signed char in_row, in_col;
|
||||
|
||||
if (ch_im_in % 2 != 0 || ch_im_out % 2 != 0)
|
||||
{
|
||||
/* check if the input dimension meets the constraints */
|
||||
return ARM_MATH_SIZE_MISMATCH;
|
||||
}
|
||||
|
||||
for (i = 0; i < ch_im_out; i++)
|
||||
{
|
||||
for (j = 0; j < dim_im_out_y; j++)
|
||||
{
|
||||
for (k = 0; k < dim_im_out_x; k++)
|
||||
{
|
||||
conv_out = ((q31_t)bias[i] << bias_shift) + NN_ROUND(out_shift);
|
||||
for (m = 0; m < dim_kernel_y; m++)
|
||||
{
|
||||
for (n = 0; n < dim_kernel_x; n++)
|
||||
{
|
||||
in_row = stride_y * j + m - padding_y;
|
||||
in_col = stride_x * k + n - padding_x;
|
||||
if (in_row >= 0 && in_col >= 0 && in_row < dim_im_in_y && in_col < dim_im_in_x)
|
||||
{
|
||||
for (l = 0; l < ch_im_in; l++)
|
||||
{
|
||||
conv_out +=
|
||||
Im_in[(in_row * dim_im_in_x + in_col) * ch_im_in +
|
||||
l] * wt[i * ch_im_in * dim_kernel_x * dim_kernel_y + (m * dim_kernel_x +
|
||||
n) * ch_im_in + l];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Im_out[i + (j * dim_im_out_x + k) * ch_im_out] = (q15_t) __SSAT((conv_out >> out_shift), 16);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* ARM_MATH_DSP */
|
||||
|
||||
/* Return to application */
|
||||
return ARM_MATH_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* @} end of NNConv group
|
||||
*/
|
||||
@ -273,19 +273,27 @@ error:
|
||||
return res;
|
||||
}
|
||||
|
||||
void nn_transform_input(data_layer_t *data_layer, image_t *img, q7_t *input_data)
|
||||
#ifndef __SSAT
|
||||
#define __SSAT(a, b) ({ __typeof__ (a) _a = (a); \
|
||||
__typeof__ (b) _b = (b); \
|
||||
_b = 1 << (_b - 1); \
|
||||
_a = _a < (_b - 1) ? _a : (_b - 1); \
|
||||
_a > (-_b) ? _a : (-_b); })
|
||||
#endif
|
||||
|
||||
void nn_transform_input(data_layer_t *data_layer, image_t *img, q7_t *input_data, rectangle_t *roi)
|
||||
{
|
||||
int input_scale = data_layer->scale;
|
||||
// scale, convert and normalize input image.
|
||||
int x_ratio = (int)((img->w<<16)/data_layer->w)+1;
|
||||
int y_ratio = (int)((img->h<<16)/data_layer->h)+1;
|
||||
// Scale, convert and normalize input image.
|
||||
int x_ratio = (int)((roi->w<<16)/data_layer->w)+1;
|
||||
int y_ratio = (int)((roi->h<<16)/data_layer->h)+1;
|
||||
|
||||
if (img->bpp == 2 && data_layer->c == 3) { //RGB565 to RGB888
|
||||
if ((img->bpp == 2) && (data_layer->c == 3)) { // RGB565 to RGB888
|
||||
for (int y=0, i=0; y<data_layer->h; y++) {
|
||||
int sy = (y*y_ratio)>>16;
|
||||
for (int x=0; x<data_layer->w; x++, i+=3) {
|
||||
int sx = (x*x_ratio)>>16;
|
||||
uint16_t p = IM_GET_RGB565_PIXEL(img, sx, sy);
|
||||
uint16_t p = IM_GET_RGB565_PIXEL(img, sx+roi->x, sy+roi->y);
|
||||
input_data[i+0] = (q7_t)__SSAT((((((int) COLOR_RGB565_TO_R8(p))
|
||||
- (int) data_layer->r_mean)<<7) + (1<<(input_scale-1))) >> input_scale, 8);
|
||||
input_data[i+1] = (q7_t)__SSAT((((((int) COLOR_RGB565_TO_G8(p))
|
||||
@ -294,17 +302,17 @@ void nn_transform_input(data_layer_t *data_layer, image_t *img, q7_t *input_data
|
||||
- (int) data_layer->b_mean)<<7) + (1<<(input_scale-1))) >> input_scale, 8);
|
||||
}
|
||||
}
|
||||
} else if (img->bpp == 2 && data_layer->c == 1) { //RGB565 to GS
|
||||
} else if ((img->bpp == 2) && (data_layer->c == 1)) { // RGB565 to GS
|
||||
for (int y=0, i=0; y<data_layer->h; y++) {
|
||||
int sy = (y*y_ratio)>>16;
|
||||
for (int x=0; x<data_layer->w; x++, i++) {
|
||||
int sx = (x*x_ratio)>>16;
|
||||
uint16_t p = IM_GET_RGB565_PIXEL(img, sx, sy);
|
||||
uint16_t p = IM_GET_RGB565_PIXEL(img, sx+roi->x, sy+roi->y);
|
||||
input_data[i] = (q7_t)__SSAT((((((int) COLOR_RGB565_TO_GRAYSCALE(p))
|
||||
- (int) data_layer->r_mean)<<7) + (1<<(input_scale-1))) >> input_scale, 8);
|
||||
}
|
||||
}
|
||||
} else if (img->bpp == 1 && data_layer->c == 3) { //GS to RGB88
|
||||
} else if ((img->bpp == 1) && (data_layer->c == 3)) { // GS to RGB88
|
||||
int mean = (int) ((0.30f * data_layer->r_mean) +
|
||||
(0.59f * data_layer->g_mean) +
|
||||
(0.11f * data_layer->b_mean));
|
||||
@ -312,25 +320,48 @@ void nn_transform_input(data_layer_t *data_layer, image_t *img, q7_t *input_data
|
||||
int sy = (y*y_ratio)>>16;
|
||||
for (int x=0; x<data_layer->w; x++, i+=3) {
|
||||
int sx = (x*x_ratio)>>16;
|
||||
int p = (int) IMAGE_GET_GRAYSCALE_PIXEL(img, sx, sy);
|
||||
int p = (int) IMAGE_GET_GRAYSCALE_PIXEL(img, sx+roi->x, sy+roi->y);
|
||||
input_data[i+0] = (q7_t)__SSAT((((p - (int) mean)<<7) + (1<<(input_scale-1))) >> input_scale, 8);
|
||||
input_data[i+1] = (q7_t)__SSAT((((p - (int) mean)<<7) + (1<<(input_scale-1))) >> input_scale, 8);
|
||||
input_data[i+2] = (q7_t)__SSAT((((p - (int) mean)<<7) + (1<<(input_scale-1))) >> input_scale, 8);
|
||||
}
|
||||
}
|
||||
} else if (img->bpp == 1 && data_layer->c == 1) { //GS to GS
|
||||
} else if ((img->bpp == 1) && (data_layer->c == 1)) { // GS to GS
|
||||
for (int y=0, i=0; y<data_layer->h; y++) {
|
||||
int sy = (y*y_ratio)>>16;
|
||||
for (int x=0; x<data_layer->w; x++, i++) {
|
||||
int sx = (x*x_ratio)>>16;
|
||||
int p = (int) IMAGE_GET_GRAYSCALE_PIXEL(img, sx, sy);
|
||||
int p = (int) IMAGE_GET_GRAYSCALE_PIXEL(img, sx+roi->x, sy+roi->y);
|
||||
input_data[i] = (q7_t)__SSAT((((p - (int) data_layer->r_mean)<<7) + (1<<(input_scale-1))) >> input_scale, 8);
|
||||
}
|
||||
}
|
||||
} else if ((img->bpp == 0) && (data_layer->c == 3)) { // BINARY to RGB88
|
||||
int mean = (int) ((0.30f * data_layer->r_mean) +
|
||||
(0.59f * data_layer->g_mean) +
|
||||
(0.11f * data_layer->b_mean));
|
||||
for (int y=0, i=0; y<data_layer->h; y++) {
|
||||
int sy = (y*y_ratio)>>16;
|
||||
for (int x=0; x<data_layer->w; x++, i+=3) {
|
||||
int sx = (x*x_ratio)>>16;
|
||||
int p = (int) COLOR_BINARY_TO_GRAYSCALE(IMAGE_GET_BINARY_PIXEL(img, sx+roi->x, sy+roi->y));
|
||||
input_data[i+0] = (q7_t)__SSAT((((p - (int) mean)<<7) + (1<<(input_scale-1))) >> input_scale, 8);
|
||||
input_data[i+1] = (q7_t)__SSAT((((p - (int) mean)<<7) + (1<<(input_scale-1))) >> input_scale, 8);
|
||||
input_data[i+2] = (q7_t)__SSAT((((p - (int) mean)<<7) + (1<<(input_scale-1))) >> input_scale, 8);
|
||||
}
|
||||
}
|
||||
} else if ((img->bpp == 0) && (data_layer->c == 1)) { // BINARY to GS
|
||||
for (int y=0, i=0; y<data_layer->h; y++) {
|
||||
int sy = (y*y_ratio)>>16;
|
||||
for (int x=0; x<data_layer->w; x++, i++) {
|
||||
int sx = (x*x_ratio)>>16;
|
||||
int p = (int) COLOR_BINARY_TO_GRAYSCALE(IMAGE_GET_BINARY_PIXEL(img, sx+roi->x, sy+roi->y));
|
||||
input_data[i] = (q7_t)__SSAT((((p - (int) data_layer->r_mean)<<7) + (1<<(input_scale-1))) >> input_scale, 8);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int nn_run_network(nn_t *net, image_t *img, bool softmax)
|
||||
int nn_run_network(nn_t *net, image_t *img, rectangle_t *roi, bool softmax)
|
||||
{
|
||||
uint32_t layer_idx = 0;
|
||||
layer_t *layer = net->layers;
|
||||
@ -360,7 +391,7 @@ int nn_run_network(nn_t *net, image_t *img, bool softmax)
|
||||
case LAYER_TYPE_DATA: {
|
||||
data_layer_t *data_layer = (data_layer_t *) layer;
|
||||
input_data = fb_alloc(data_layer->c * data_layer->h * data_layer->w);
|
||||
nn_transform_input(data_layer, img, input_data);
|
||||
nn_transform_input(data_layer, img, input_data, roi);
|
||||
// Set image data as input buffer for the next layer.
|
||||
input_buffer = input_data;
|
||||
output_buffer = buffer1;
|
||||
|
||||
@ -100,6 +100,6 @@ typedef void (*pool_func_t)(q7_t * Im_in, const uint16_t dim_im_in, const uint16
|
||||
|
||||
int nn_dump_network(nn_t *net);
|
||||
int nn_load_network(nn_t *net, const char *path);
|
||||
int nn_run_network(nn_t *net, image_t *img, bool softmax);
|
||||
int nn_run_network(nn_t *net, image_t *img, rectangle_t *roi, bool softmax);
|
||||
int nn_dry_run_network(nn_t *net, image_t *img, bool softmax);
|
||||
#endif //#define __CNN_H__
|
||||
|
||||
@ -1,22 +1,15 @@
|
||||
/*
|
||||
* This file is part of the OpenMV project.
|
||||
* Copyright (c) 2013/2014 Ibrahim Abdelkader <i.abdalkader@gmail.com>
|
||||
/* This file is part of the OpenMV project.
|
||||
* Copyright (c) 2013-2018 Ibrahim Abdelkader <iabdalkader@openmv.io> & Kwabena W. Agyeman <kwagyeman@openmv.io>
|
||||
* This work is licensed under the MIT license, see the file LICENSE for details.
|
||||
*
|
||||
* NN module.
|
||||
*
|
||||
*/
|
||||
#include "mp.h"
|
||||
#include <mp.h>
|
||||
#include "nn.h"
|
||||
#include "imlib.h"
|
||||
#include "xalloc.h"
|
||||
#include "py_image.h"
|
||||
#include "py_helper.h"
|
||||
#include "py_assert.h"
|
||||
#include "py_image.h"
|
||||
#include "omv_boardconfig.h"
|
||||
#include <stdbool.h>
|
||||
|
||||
#ifdef IMLIB_ENABLE_CNN
|
||||
|
||||
static const mp_obj_type_t py_net_type;
|
||||
|
||||
typedef struct _py_net_obj_t {
|
||||
@ -24,31 +17,10 @@ typedef struct _py_net_obj_t {
|
||||
nn_t _cobj;
|
||||
} py_net_obj_t;
|
||||
|
||||
void *py_net_cobj(mp_obj_t net)
|
||||
void *py_net_cobj(mp_obj_t net_obj)
|
||||
{
|
||||
PY_ASSERT_TYPE(net, &py_net_type);
|
||||
return &((py_net_obj_t *)net)->_cobj;
|
||||
}
|
||||
|
||||
STATIC mp_obj_t py_net_forward(uint n_args, const mp_obj_t *args, mp_map_t *kw_args)
|
||||
{
|
||||
nn_t *net = py_net_cobj(args[0]);
|
||||
image_t *img = py_helper_arg_to_image_mutable(args[1]);
|
||||
|
||||
mp_obj_t output_list = mp_obj_new_list(0, NULL);
|
||||
bool softmax = py_helper_keyword_int(n_args, args, 2, kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_softmax), false);
|
||||
bool dry_run = py_helper_keyword_int(n_args, args, 3, kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_dry_run), false);
|
||||
|
||||
if (dry_run == false) {
|
||||
nn_run_network(net, img, softmax);
|
||||
} else {
|
||||
nn_dry_run_network(net, img, softmax);
|
||||
}
|
||||
|
||||
for (int i=0; i<net->output_size; i++) {
|
||||
mp_obj_list_append(output_list, mp_obj_new_int(net->output_data[i]));
|
||||
}
|
||||
return output_list;
|
||||
PY_ASSERT_TYPE(net_obj, &py_net_type);
|
||||
return &((py_net_obj_t *)net_obj)->_cobj;
|
||||
}
|
||||
|
||||
STATIC void py_net_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind)
|
||||
@ -57,12 +29,343 @@ STATIC void py_net_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kin
|
||||
nn_dump_network(py_net_cobj(self));
|
||||
}
|
||||
|
||||
STATIC mp_obj_t py_net_forward(uint n_args, const mp_obj_t *args, mp_map_t *kw_args)
|
||||
{
|
||||
nn_t *net = py_net_cobj(args[0]);
|
||||
image_t *img = py_helper_arg_to_image_mutable(args[1]);
|
||||
|
||||
rectangle_t roi;
|
||||
py_helper_keyword_rectangle_roi(img, n_args, args, 2, kw_args, &roi);
|
||||
|
||||
bool softmax = py_helper_keyword_int(n_args, args, 3, kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_softmax), false);
|
||||
bool dry_run = py_helper_keyword_int(n_args, args, 4, kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_dry_run), false);
|
||||
|
||||
mp_obj_t output_list = mp_obj_new_list(0, NULL);
|
||||
fb_alloc_mark();
|
||||
|
||||
if (dry_run == false) {
|
||||
nn_run_network(net, img, &roi, softmax);
|
||||
} else {
|
||||
nn_dry_run_network(net, img, softmax);
|
||||
}
|
||||
|
||||
for (int i=0; i<net->output_size; i++) {
|
||||
mp_obj_list_append(output_list, mp_obj_new_int(net->output_data[i]));
|
||||
}
|
||||
|
||||
fb_alloc_free_till_mark();
|
||||
return output_list;
|
||||
}
|
||||
STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_net_forward_obj, 2, py_net_forward);
|
||||
|
||||
// NN Class Object
|
||||
#define py_nn_class_obj_size 6
|
||||
typedef struct py_nn_class_obj {
|
||||
mp_obj_base_t base;
|
||||
mp_obj_t x, y, w, h, index, value;
|
||||
} py_nn_class_obj_t;
|
||||
|
||||
static const mp_map_elem_t locals_dict_table[] = {
|
||||
{ MP_OBJ_NEW_QSTR(MP_QSTR_forward), (mp_obj_t)&py_net_forward_obj},
|
||||
{ NULL, NULL },
|
||||
static void py_nn_class_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind)
|
||||
{
|
||||
py_nn_class_obj_t *self = self_in;
|
||||
mp_printf(print,
|
||||
"{\"x\":%d, \"y\":%d, \"w\":%d, \"h\":%d, \"index\":%d, \"value\":%f}",
|
||||
mp_obj_get_int(self->x),
|
||||
mp_obj_get_int(self->y),
|
||||
mp_obj_get_int(self->w),
|
||||
mp_obj_get_int(self->h),
|
||||
mp_obj_get_int(self->index),
|
||||
(double) mp_obj_get_float(self->value));
|
||||
}
|
||||
|
||||
static mp_obj_t py_nn_class_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value)
|
||||
{
|
||||
if (value == MP_OBJ_SENTINEL) { // load
|
||||
py_nn_class_obj_t *self = self_in;
|
||||
if (MP_OBJ_IS_TYPE(index, &mp_type_slice)) {
|
||||
mp_bound_slice_t slice;
|
||||
if (!mp_seq_get_fast_slice_indexes(py_nn_class_obj_size, index, &slice)) {
|
||||
nlr_raise(mp_obj_new_exception_msg(&mp_type_OSError, "only slices with step=1 (aka None) are supported"));
|
||||
}
|
||||
mp_obj_tuple_t *result = mp_obj_new_tuple(slice.stop - slice.start, NULL);
|
||||
mp_seq_copy(result->items, &(self->x) + slice.start, result->len, mp_obj_t);
|
||||
return result;
|
||||
}
|
||||
switch (mp_get_index(self->base.type, py_nn_class_obj_size, index, false)) {
|
||||
case 0: return self->x;
|
||||
case 1: return self->y;
|
||||
case 2: return self->w;
|
||||
case 3: return self->h;
|
||||
case 4: return self->index;
|
||||
case 5: return self->value;
|
||||
}
|
||||
}
|
||||
return MP_OBJ_NULL; // op not supported
|
||||
}
|
||||
|
||||
mp_obj_t py_nn_class_rect(mp_obj_t self_in)
|
||||
{
|
||||
return mp_obj_new_tuple(4, (mp_obj_t []) {((py_nn_class_obj_t *) self_in)->x,
|
||||
((py_nn_class_obj_t *) self_in)->y,
|
||||
((py_nn_class_obj_t *) self_in)->w,
|
||||
((py_nn_class_obj_t *) self_in)->h});
|
||||
}
|
||||
|
||||
mp_obj_t py_nn_class_x(mp_obj_t self_in) { return ((py_nn_class_obj_t *) self_in)->x; }
|
||||
mp_obj_t py_nn_class_y(mp_obj_t self_in) { return ((py_nn_class_obj_t *) self_in)->y; }
|
||||
mp_obj_t py_nn_class_w(mp_obj_t self_in) { return ((py_nn_class_obj_t *) self_in)->w; }
|
||||
mp_obj_t py_nn_class_h(mp_obj_t self_in) { return ((py_nn_class_obj_t *) self_in)->h; }
|
||||
mp_obj_t py_nn_class_index(mp_obj_t self_in) { return ((py_nn_class_obj_t *) self_in)->index; }
|
||||
mp_obj_t py_nn_class_value(mp_obj_t self_in) { return ((py_nn_class_obj_t *) self_in)->value; }
|
||||
|
||||
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_nn_class_rect_obj, py_nn_class_rect);
|
||||
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_nn_class_x_obj, py_nn_class_x);
|
||||
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_nn_class_y_obj, py_nn_class_y);
|
||||
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_nn_class_w_obj, py_nn_class_w);
|
||||
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_nn_class_h_obj, py_nn_class_h);
|
||||
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_nn_class_index_obj, py_nn_class_index);
|
||||
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_nn_class_value_obj, py_nn_class_value);
|
||||
|
||||
STATIC const mp_rom_map_elem_t py_nn_class_locals_dict_table[] = {
|
||||
{ MP_ROM_QSTR(MP_QSTR_rect), MP_ROM_PTR(&py_nn_class_rect_obj) },
|
||||
{ MP_ROM_QSTR(MP_QSTR_x), MP_ROM_PTR(&py_nn_class_x_obj) },
|
||||
{ MP_ROM_QSTR(MP_QSTR_y), MP_ROM_PTR(&py_nn_class_y_obj) },
|
||||
{ MP_ROM_QSTR(MP_QSTR_w), MP_ROM_PTR(&py_nn_class_w_obj) },
|
||||
{ MP_ROM_QSTR(MP_QSTR_h), MP_ROM_PTR(&py_nn_class_h_obj) },
|
||||
{ MP_ROM_QSTR(MP_QSTR_index), MP_ROM_PTR(&py_nn_class_index_obj) },
|
||||
{ MP_ROM_QSTR(MP_QSTR_value), MP_ROM_PTR(&py_nn_class_value_obj) }
|
||||
};
|
||||
|
||||
STATIC MP_DEFINE_CONST_DICT(py_nn_class_locals_dict, py_nn_class_locals_dict_table);
|
||||
|
||||
static const mp_obj_type_t py_nn_class_type = {
|
||||
{ &mp_type_type },
|
||||
.name = MP_QSTR_nn_class,
|
||||
.print = py_nn_class_print,
|
||||
.subscr = py_nn_class_subscr,
|
||||
.locals_dict = (mp_obj_t) &py_nn_class_locals_dict
|
||||
};
|
||||
|
||||
typedef struct py_nn_class_obj_list_lnk_data {
|
||||
rectangle_t rect;
|
||||
int index;
|
||||
float value;
|
||||
int merge_number;
|
||||
} py_nn_class_obj_list_lnk_data_t;
|
||||
|
||||
STATIC mp_obj_t py_net_search(uint n_args, const mp_obj_t *args, mp_map_t *kw_args)
|
||||
{
|
||||
nn_t *arg_net = py_net_cobj(args[0]);
|
||||
image_t *arg_img = py_helper_arg_to_image_mutable(args[1]);
|
||||
|
||||
rectangle_t roi;
|
||||
py_helper_keyword_rectangle_roi(arg_img, n_args, args, 2, kw_args, &roi);
|
||||
|
||||
float arg_threshold = py_helper_keyword_float(n_args, args, 3, kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_threshold), 0.6);
|
||||
PY_ASSERT_TRUE_MSG((0 <= arg_threshold) && (arg_threshold <= 1), "0 <= threshold <= 1");
|
||||
|
||||
float arg_min_scale = py_helper_keyword_float(n_args, args, 4, kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_min_scale), 1.0);
|
||||
PY_ASSERT_TRUE_MSG((0 < arg_min_scale) && (arg_min_scale <= 1), "0 < min_scale <= 1");
|
||||
|
||||
float arg_scale_mul = py_helper_keyword_float(n_args, args, 5, kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_scale_mul), 0.5);
|
||||
PY_ASSERT_TRUE_MSG((0 <= arg_scale_mul) && (arg_scale_mul < 1), "0 <= scale_mul < 1");
|
||||
|
||||
float arg_x_overlap = py_helper_keyword_float(n_args, args, 6, kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_x_overlap), 0);
|
||||
PY_ASSERT_TRUE_MSG(((0 <= arg_x_overlap) && (arg_x_overlap < 1)) || (arg_x_overlap == -1), "0 <= x_overlap < 1");
|
||||
|
||||
float arg_y_overlap = py_helper_keyword_float(n_args, args, 7, kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_y_overlap), 0);
|
||||
PY_ASSERT_TRUE_MSG(((0 <= arg_y_overlap) && (arg_y_overlap < 1)) || (arg_y_overlap == -1), "0 <= y_overlap < 1");
|
||||
|
||||
float arg_contrast_threshold = py_helper_keyword_float(n_args, args, 8, kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_contrast_threshold), 1);
|
||||
PY_ASSERT_TRUE_MSG(0 <= arg_contrast_threshold, "0 <= contrast_threshold");
|
||||
|
||||
bool softmax = py_helper_keyword_int(n_args, args, 9, kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_softmax), false);
|
||||
|
||||
list_t out;
|
||||
list_init(&out, sizeof(py_nn_class_obj_list_lnk_data_t));
|
||||
|
||||
fb_alloc_mark();
|
||||
|
||||
for (float scale = 1; scale >= arg_min_scale; scale *= arg_scale_mul) {
|
||||
// Either provide a subtle offset to center multiple detection windows or center the only detection window.
|
||||
for (int y = roi.y + ((arg_y_overlap != -1) ? (fmodf(roi.h, (roi.h * scale)) / 2) : ((roi.h - (roi.h * scale)) / 2));
|
||||
// Finish when the detection window is outside of the ROI.
|
||||
(y + (roi.h * scale)) <= (roi.y + roi.h);
|
||||
// Step by an overlap amount accounting for scale or just terminate after one iteration.
|
||||
y += ((arg_y_overlap != -1) ? (roi.h * scale * (1 - arg_y_overlap)) : roi.h)) {
|
||||
// Either provide a subtle offset to center multiple detection windows or center the only detection window.
|
||||
for (int x = roi.x + ((arg_x_overlap != -1) ? (fmodf(roi.w, (roi.w * scale)) / 2) : ((roi.w - (roi.w * scale)) / 2));
|
||||
// Finish when the detection window is outside of the ROI.
|
||||
(x + (roi.w * scale)) <= (roi.x + roi.w);
|
||||
// Step by an overlap amount accounting for scale or just terminate after one iteration.
|
||||
x += ((arg_x_overlap != -1) ? (roi.w * scale * (1 - arg_x_overlap)) : roi.w)) {
|
||||
rectangle_t new_roi;
|
||||
rectangle_init(&new_roi, x, y, roi.w * scale, roi.h * scale);
|
||||
if (rectangle_overlap(&roi, &new_roi)) {
|
||||
|
||||
int sum = 0;
|
||||
int sum_2 = 0;
|
||||
for (int b = new_roi.y, bb = new_roi.y + new_roi.h, bbb = fast_sqrtf(new_roi.h); b < bb; b += bbb) {
|
||||
for (int a = new_roi.x, aa = new_roi.x + new_roi.w, aaa = fast_sqrtf(new_roi.w); a < aa; a += aaa) {
|
||||
switch(arg_img->bpp) {
|
||||
case IMAGE_BPP_BINARY: {
|
||||
int pixel = COLOR_BINARY_TO_GRAYSCALE(IMAGE_GET_BINARY_PIXEL(arg_img, a, b));
|
||||
sum += pixel;
|
||||
sum_2 += pixel * pixel;
|
||||
}
|
||||
case IMAGE_BPP_GRAYSCALE: {
|
||||
int pixel = IMAGE_GET_GRAYSCALE_PIXEL(arg_img, a, b);
|
||||
sum += pixel;
|
||||
sum_2 += pixel * pixel;
|
||||
}
|
||||
case IMAGE_BPP_RGB565: {
|
||||
int pixel = COLOR_RGB565_TO_GRAYSCALE(IMAGE_GET_RGB565_PIXEL(arg_img, a, b));
|
||||
sum += pixel;
|
||||
sum_2 += pixel * pixel;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int area = new_roi.w * new_roi.h;
|
||||
int mean = sum / area;
|
||||
int variance = (sum_2 / area) - (mean * mean);
|
||||
|
||||
if (fast_sqrtf(variance) >= arg_contrast_threshold) { // Skip flat regions...
|
||||
nn_run_network(arg_net, arg_img, &new_roi, softmax);
|
||||
|
||||
int max_index = -1;
|
||||
float max_value = -1;
|
||||
for (int i=0; i<arg_net->output_size; i++) {
|
||||
float value = ((float) (arg_net->output_data[i] + 128)) / 255;
|
||||
if ((value >= arg_threshold) && (value > max_value)) {
|
||||
max_index = i;
|
||||
max_value = value;
|
||||
}
|
||||
}
|
||||
|
||||
if (max_index != -1) {
|
||||
py_nn_class_obj_list_lnk_data_t lnk_data;
|
||||
lnk_data.rect.x = new_roi.x;
|
||||
lnk_data.rect.y = new_roi.y;
|
||||
lnk_data.rect.w = new_roi.w;
|
||||
lnk_data.rect.h = new_roi.h;
|
||||
lnk_data.index = max_index;
|
||||
lnk_data.value = max_value;
|
||||
lnk_data.merge_number = 1;
|
||||
list_push_back(&out, &lnk_data);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fb_alloc_free_till_mark();
|
||||
|
||||
// Merge all overlapping and same detections and average them.
|
||||
|
||||
for (;;) {
|
||||
bool merge_occured = false;
|
||||
|
||||
list_t out_temp;
|
||||
list_init(&out_temp, sizeof(py_nn_class_obj_list_lnk_data_t));
|
||||
|
||||
while (list_size(&out)) {
|
||||
py_nn_class_obj_list_lnk_data_t lnk_data;
|
||||
list_pop_front(&out, &lnk_data);
|
||||
|
||||
for (size_t k = 0, l = list_size(&out); k < l; k++) {
|
||||
py_nn_class_obj_list_lnk_data_t tmp_data;
|
||||
list_pop_front(&out, &tmp_data);
|
||||
|
||||
if ((lnk_data.index == tmp_data.index)
|
||||
&& rectangle_overlap(&(lnk_data.rect), &(tmp_data.rect))) {
|
||||
lnk_data.rect.x = ((lnk_data.rect.x * lnk_data.merge_number) + tmp_data.rect.x) / (lnk_data.merge_number + 1);
|
||||
lnk_data.rect.y = ((lnk_data.rect.y * lnk_data.merge_number) + tmp_data.rect.y) / (lnk_data.merge_number + 1);
|
||||
lnk_data.rect.w = ((lnk_data.rect.w * lnk_data.merge_number) + tmp_data.rect.w) / (lnk_data.merge_number + 1);
|
||||
lnk_data.rect.h = ((lnk_data.rect.h * lnk_data.merge_number) + tmp_data.rect.h) / (lnk_data.merge_number + 1);
|
||||
lnk_data.value = ((lnk_data.value * lnk_data.merge_number) + tmp_data.value) / (lnk_data.merge_number + 1);
|
||||
lnk_data.merge_number += 1;
|
||||
merge_occured = true;
|
||||
} else {
|
||||
list_push_back(&out, &tmp_data);
|
||||
}
|
||||
}
|
||||
|
||||
list_push_back(&out_temp, &lnk_data);
|
||||
}
|
||||
|
||||
list_copy(&out, &out_temp);
|
||||
|
||||
if (!merge_occured) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Determine the winner between overlapping different class detections.
|
||||
|
||||
for (;;) {
|
||||
bool merge_occured = false;
|
||||
|
||||
list_t out_temp;
|
||||
list_init(&out_temp, sizeof(py_nn_class_obj_list_lnk_data_t));
|
||||
|
||||
while (list_size(&out)) {
|
||||
py_nn_class_obj_list_lnk_data_t lnk_data;
|
||||
list_pop_front(&out, &lnk_data);
|
||||
|
||||
for (size_t k = 0, l = list_size(&out); k < l; k++) {
|
||||
py_nn_class_obj_list_lnk_data_t tmp_data;
|
||||
list_pop_front(&out, &tmp_data);
|
||||
|
||||
if ((lnk_data.index != tmp_data.index)
|
||||
&& rectangle_overlap(&(lnk_data.rect), &(tmp_data.rect))) {
|
||||
if (tmp_data.value > lnk_data.value) {
|
||||
memcpy(&lnk_data, &tmp_data, sizeof(py_nn_class_obj_list_lnk_data_t));
|
||||
}
|
||||
|
||||
merge_occured = true;
|
||||
} else {
|
||||
list_push_back(&out, &tmp_data);
|
||||
}
|
||||
}
|
||||
|
||||
list_push_back(&out_temp, &lnk_data);
|
||||
}
|
||||
|
||||
list_copy(&out, &out_temp);
|
||||
|
||||
if (!merge_occured) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
mp_obj_list_t *objects_list = mp_obj_new_list(list_size(&out), NULL);
|
||||
|
||||
for (size_t i = 0; list_size(&out); i++) {
|
||||
py_nn_class_obj_list_lnk_data_t lnk_data;
|
||||
list_pop_front(&out, &lnk_data);
|
||||
|
||||
py_nn_class_obj_t *o = m_new_obj(py_nn_class_obj_t);
|
||||
o->base.type = &py_nn_class_type;
|
||||
o->x = mp_obj_new_int(lnk_data.rect.x);
|
||||
o->y = mp_obj_new_int(lnk_data.rect.y);
|
||||
o->w = mp_obj_new_int(lnk_data.rect.w);
|
||||
o->h = mp_obj_new_int(lnk_data.rect.h);
|
||||
o->index = mp_obj_new_int(lnk_data.index);
|
||||
o->value = mp_obj_new_float(lnk_data.value);
|
||||
|
||||
objects_list->items[i] = o;
|
||||
}
|
||||
|
||||
return objects_list;
|
||||
}
|
||||
STATIC MP_DEFINE_CONST_FUN_OBJ_KW(py_net_search_obj, 2, py_net_search);
|
||||
|
||||
STATIC const mp_rom_map_elem_t locals_dict_table[] = {
|
||||
{ MP_ROM_QSTR(MP_QSTR_forward), MP_ROM_PTR(&py_net_forward_obj) },
|
||||
{ MP_ROM_QSTR(MP_QSTR_search), MP_ROM_PTR(&py_net_search_obj) }
|
||||
};
|
||||
|
||||
STATIC MP_DEFINE_CONST_DICT(locals_dict, locals_dict_table);
|
||||
@ -71,28 +374,28 @@ static const mp_obj_type_t py_net_type = {
|
||||
{ &mp_type_type },
|
||||
.name = MP_QSTR_Net,
|
||||
.print = py_net_print,
|
||||
.locals_dict = (mp_obj_t)&locals_dict,
|
||||
.locals_dict = (mp_obj_t) &locals_dict
|
||||
};
|
||||
|
||||
static mp_obj_t py_nn_load(mp_obj_t path_obj)
|
||||
{
|
||||
py_net_obj_t *net = NULL;
|
||||
const char *path = mp_obj_str_get_str(path_obj);
|
||||
net = m_new_obj(py_net_obj_t);
|
||||
py_net_obj_t *net = m_new_obj(py_net_obj_t);
|
||||
net->base.type = &py_net_type;
|
||||
nn_load_network(py_net_cobj(net), path);
|
||||
return net;
|
||||
}
|
||||
|
||||
STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_nn_load_obj, py_nn_load);
|
||||
|
||||
#endif // IMLIB_ENABLE_CNN
|
||||
|
||||
static const mp_map_elem_t globals_dict_table[] = {
|
||||
{ MP_OBJ_NEW_QSTR(MP_QSTR___name__), MP_OBJ_NEW_QSTR(MP_QSTR_nn) },
|
||||
STATIC const mp_rom_map_elem_t globals_dict_table[] = {
|
||||
{ MP_ROM_QSTR(MP_QSTR___name__), MP_OBJ_NEW_QSTR(MP_QSTR_nn) },
|
||||
#ifdef IMLIB_ENABLE_CNN
|
||||
{ MP_OBJ_NEW_QSTR(MP_QSTR_load), (mp_obj_t)&py_nn_load_obj },
|
||||
{ MP_ROM_QSTR(MP_QSTR_load), MP_ROM_PTR(&py_nn_load_obj) },
|
||||
#else
|
||||
{ MP_OBJ_NEW_QSTR(MP_QSTR_load), (mp_obj_t)&py_func_unavailable_obj },
|
||||
{ MP_ROM_QSTR(MP_QSTR_load), MP_ROM_PTR(&py_func_unavailable_obj) }
|
||||
#endif // IMLIB_ENABLE_CNN
|
||||
};
|
||||
|
||||
@ -100,5 +403,5 @@ STATIC MP_DEFINE_CONST_DICT(globals_dict, globals_dict_table);
|
||||
|
||||
const mp_obj_module_t nn_module = {
|
||||
.base = { &mp_type_module },
|
||||
.globals = (mp_obj_t)&globals_dict,
|
||||
.globals = (mp_obj_t) &globals_dict
|
||||
};
|
||||
|
||||
@ -138,10 +138,31 @@ Q(load)
|
||||
|
||||
// Net
|
||||
Q(Net)
|
||||
|
||||
// Forward
|
||||
Q(forward)
|
||||
Q(dry_run)
|
||||
Q(softmax)
|
||||
|
||||
// Search
|
||||
// duplicate Q(search)
|
||||
// duplicate Q(roi)
|
||||
// duplicate Q(threshold)
|
||||
Q(min_scale)
|
||||
Q(scale_mul)
|
||||
Q(x_overlap)
|
||||
Q(y_overlap)
|
||||
Q(contrast_threshold)
|
||||
// duplicate Q(softmax)
|
||||
// NN Class
|
||||
Q(nn_class)
|
||||
// duplicate Q(x)
|
||||
// duplicate Q(y)
|
||||
// duplicate Q(w)
|
||||
// duplicate Q(h)
|
||||
// duplicate Q(index)
|
||||
// duplicate Q(value)
|
||||
|
||||
// C/SIF Resolutions
|
||||
Q(QQCIF)
|
||||
Q(QCIF)
|
||||
|
||||
Loading…
Reference in New Issue
Block a user