mirror of
https://github.com/openmv/openmv.git
synced 2025-11-04 14:49:50 +08:00
Merge pull request #2707 from openmv/add_stai_libs
lib: Add STAI library and ML backend.
This commit is contained in:
commit
c89787552a
86
lib/stai/libstai/LICENSE.md
Normal file
86
lib/stai/libstai/LICENSE.md
Normal file
@ -0,0 +1,86 @@
|
||||
This software component is provided to you as part of a software package and
|
||||
applicable license terms are in the Package_license file. If you received this
|
||||
software component outside of a package or without applicable license terms,
|
||||
the terms of the SLA0044 license shall apply and are fully reproduced below:
|
||||
|
||||
SLA0044 Rev5/February 2018
|
||||
|
||||
Software license agreement
|
||||
|
||||
ULTIMATE LIBERTY SOFTWARE LICENSE AGREEMENT
|
||||
|
||||
BY INSTALLING, COPYING, DOWNLOADING, ACCESSING OR OTHERWISE USING THIS SOFTWARE
|
||||
OR ANY PART THEREOF (AND THE RELATED DOCUMENTATION) FROM STMICROELECTRONICS
|
||||
INTERNATIONAL N.V, SWISS BRANCH AND/OR ITS AFFILIATED COMPANIES
|
||||
(STMICROELECTRONICS), THE RECIPIENT, ON BEHALF OF HIMSELF OR HERSELF, OR ON
|
||||
BEHALF OF ANY ENTITY BY WHICH SUCH RECIPIENT IS EMPLOYED AND/OR ENGAGED AGREES
|
||||
TO BE BOUND BY THIS SOFTWARE LICENSE AGREEMENT.
|
||||
|
||||
Under STMicroelectronics’ intellectual property rights, the redistribution,
|
||||
reproduction and use in source and binary forms of the software or any part
|
||||
thereof, with or without modification, are permitted provided that the following
|
||||
conditions are met:
|
||||
|
||||
1. Redistribution of source code (modified or not) must retain any copyright
|
||||
notice, this list of conditions and the disclaimer set forth below as items 10
|
||||
and 11.
|
||||
|
||||
2. Redistributions in binary form, except as embedded into microcontroller or
|
||||
microprocessor device manufactured by or for STMicroelectronics or a software
|
||||
update for such device, must reproduce any copyright notice provided with the
|
||||
binary code, this list of conditions, and the disclaimer set forth below as
|
||||
items 10 and 11, in documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
3. Neither the name of STMicroelectronics nor the names of other contributors to
|
||||
this software may be used to endorse or promote products derived from this
|
||||
software or part thereof without specific written permission.
|
||||
|
||||
4. This software or any part thereof, including modifications and/or derivative
|
||||
works of this software, must be used and execute solely and exclusively on or in
|
||||
combination with a microcontroller or microprocessor device manufactured by or
|
||||
for STMicroelectronics.
|
||||
|
||||
5. No use, reproduction or redistribution of this software partially or totally
|
||||
may be done in any manner that would subject this software to any Open Source
|
||||
Terms. “Open Source Terms” shall mean any open source license which requires as
|
||||
part of distribution of software that the source code of such software is
|
||||
distributed therewith or otherwise made available, or open source license that
|
||||
substantially complies with the Open Source definition specified at
|
||||
www.opensource.org and any other comparable open source license such as for
|
||||
example GNU General Public License (GPL), Eclipse Public License (EPL), Apache
|
||||
Software License, BSD license or MIT license.
|
||||
|
||||
6. STMicroelectronics has no obligation to provide any maintenance, support or
|
||||
updates for the software.
|
||||
|
||||
7. The software is and will remain the exclusive property of STMicroelectronics
|
||||
and its licensors. The recipient will not take any action that jeopardizes
|
||||
STMicroelectronics and its licensors' proprietary rights or acquire any rights
|
||||
in the software, except the limited rights specified hereunder.
|
||||
|
||||
8. The recipient shall comply with all applicable laws and regulations affecting
|
||||
the use of the software or any part thereof including any applicable export
|
||||
control law or regulation.
|
||||
|
||||
9. Redistribution and use of this software or any part thereof other than as
|
||||
permitted under this license is void and will automatically terminate your
|
||||
rights under this license.
|
||||
|
||||
10. THIS SOFTWARE IS PROVIDED BY STMICROELECTRONICS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS, IMPLIED OR STATUTORY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NON-INFRINGEMENT OF THIRD PARTY INTELLECTUAL PROPERTY RIGHTS, WHICH ARE
|
||||
DISCLAIMED TO THE FULLEST EXTENT PERMITTED BY LAW. IN NO EVENT SHALL
|
||||
STMICROELECTRONICS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
||||
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
11. EXCEPT AS EXPRESSLY PERMITTED HEREUNDER, NO LICENSE OR OTHER RIGHTS, WHETHER
|
||||
EXPRESS OR IMPLIED, ARE GRANTED UNDER ANY PATENT OR OTHER INTELLECTUAL PROPERTY
|
||||
RIGHTS OF STMICROELECTRONICS OR ANY THIRD PARTY.
|
||||
|
||||
100293
lib/stai/libstai/include/ATON.h
Normal file
100293
lib/stai/libstai/include/ATON.h
Normal file
File diff suppressed because it is too large
Load Diff
31
lib/stai/libstai/include/ai_common_config.h
Normal file
31
lib/stai/libstai/include/ai_common_config.h
Normal file
@ -0,0 +1,31 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file ai_common_config.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @brief header file of AI platform common compile configuration defines
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2018 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef AI_COMMON_CONFIG_H
|
||||
#define AI_COMMON_CONFIG_H
|
||||
|
||||
/*!
|
||||
* @defgroup layers Layers Compilation Config Definitions
|
||||
* @brief definition
|
||||
*
|
||||
*/
|
||||
|
||||
#define HAS_PROFILE_FLOAT
|
||||
#define HAS_PROFILE_FIXED
|
||||
|
||||
|
||||
#endif /*AI_COMMON_CONFIG_H*/
|
||||
71
lib/stai/libstai/include/ai_datatypes.h
Normal file
71
lib/stai/libstai/include/ai_datatypes.h
Normal file
@ -0,0 +1,71 @@
|
||||
|
||||
#ifndef AI_DATATYPES_H
|
||||
#define AI_DATATYPES_H
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file ai_datatypes.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @brief Definitions of AI platform private APIs types
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2017 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#include <string.h>
|
||||
#include "ai_platform.h"
|
||||
#include "ai_platform_interface.h"
|
||||
|
||||
/*!
|
||||
* @defgroup datatypes Platform Interface Datatypes
|
||||
* @brief Data structures used by AI platform to implement neural networks
|
||||
*
|
||||
*/
|
||||
|
||||
/** Count Variable Number of Arguments (up to 64 elements) *******************/
|
||||
#define AI_NUMARGS(...) \
|
||||
PP_NARG_(__VA_ARGS__,PP_RSEQ_N())
|
||||
#define PP_NARG_(...) \
|
||||
PP_ARG_N(__VA_ARGS__)
|
||||
#define PP_ARG_N( \
|
||||
_1, _2, _3, _4, _5, _6, _7, _8, _9,_10, \
|
||||
_11,_12,_13,_14,_15,_16,_17,_18,_19,_20, \
|
||||
_21,_22,_23,_24,_25,_26,_27,_28,_29,_30, \
|
||||
_31,_32,_33,_34,_35,_36,_37,_38,_39,_40, \
|
||||
_41,_42,_43,_44,_45,_46,_47,_48,_49,_50, \
|
||||
_51,_52,_53,_54,_55,_56,_57,_58,_59,_60, \
|
||||
_61,_62,_63,N,...) N
|
||||
#define PP_RSEQ_N() \
|
||||
63,62,61,60, \
|
||||
59,58,57,56,55,54,53,52,51,50, \
|
||||
49,48,47,46,45,44,43,42,41,40, \
|
||||
39,38,37,36,35,34,33,32,31,30, \
|
||||
29,28,27,26,25,24,23,22,21,20, \
|
||||
19,18,17,16,15,14,13,12,11,10, \
|
||||
9,8,7,6,5,4,3,2,1,0
|
||||
|
||||
|
||||
/*****************************************************************************/
|
||||
#define AI_PTR_ALIGN(ptr, alignment) \
|
||||
((((ai_uptr)(ptr))+((ai_uptr)(alignment)-1))&(~((ai_uptr)(alignment)-1)))
|
||||
|
||||
|
||||
/*!
|
||||
* @typedef ai_offset
|
||||
* @ingroup ai_datatypes_internal
|
||||
* @brief Generic index offset type
|
||||
*/
|
||||
typedef int32_t ai_offset;
|
||||
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /* AI_DATATYPES_H */
|
||||
151
lib/stai/libstai/include/ai_datatypes_defines.h
Normal file
151
lib/stai/libstai/include/ai_datatypes_defines.h
Normal file
@ -0,0 +1,151 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file ai_datatypes_defines.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @brief Definitions of AI platform private APIs types
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2017 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef AI_DATATYPES_DEFINES_H
|
||||
#define AI_DATATYPES_DEFINES_H
|
||||
|
||||
#include "ai_platform.h"
|
||||
#include "core_assert.h"
|
||||
|
||||
|
||||
/*!
|
||||
* @defgroup datatypes_defines Internal Datatypes Defines Header
|
||||
* @brief Data structures used internally to implement neural networks
|
||||
*
|
||||
*/
|
||||
|
||||
/* define to track datatypes used by codegen */
|
||||
#define AI_INTERNAL_API /* AI_INTERNAL_API */
|
||||
|
||||
#define AI_CONST const
|
||||
#define AI_STATIC static
|
||||
#define AI_STATIC_CONST static const
|
||||
|
||||
/******************************************************************************/
|
||||
/* NOP operation used by codegen */
|
||||
#define AI_NOP /* NOP */
|
||||
|
||||
#define AI_WRAP_FUNC(fn_) do { fn_ } while (0);
|
||||
|
||||
#define AI_CAT(a, ...) AI_PRIMITIVE_CAT(a, __VA_ARGS__)
|
||||
#define AI_PRIMITIVE_CAT(a, ...) a ## __VA_ARGS__
|
||||
|
||||
/******************************************************************************/
|
||||
#define AI_ASSERT(expr) \
|
||||
CORE_ASSERT(expr)
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
#define AI_NO_PACKED_STRUCTS
|
||||
|
||||
/* Macro for defining packed structures (compiler dependent).
|
||||
* This just reduces memory requirements, but is not required.
|
||||
*/
|
||||
#if defined(AI_NO_PACKED_STRUCTS)
|
||||
/* Disable struct packing */
|
||||
#define AI_PACKED_STRUCT_START /* AI_PACKED_STRUCT_START */
|
||||
#define AI_PACKED_STRUCT_END /* AI_PACKED_STRUCT_END */
|
||||
#define AI_PACKED /* AI_PACKED */
|
||||
#elif defined(__GNUC__) || defined(__clang__)
|
||||
/* For GCC and clang */
|
||||
#define AI_PACKED_STRUCT_START /* AI_PACKED_STRUCT_START */
|
||||
#define AI_PACKED_STRUCT_END /* AI_PACKED_STRUCT_END */
|
||||
#define AI_PACKED __attribute__((packed))
|
||||
#elif defined(__ICCARM__) || defined (__IAR_SYSTEMS_ICC__) || defined(__CC_ARM)
|
||||
/* For IAR ARM and Keil MDK-ARM compilers */
|
||||
#define AI_PACKED_STRUCT_START _Pragma("pack(push, 1)")
|
||||
#define AI_PACKED_STRUCT_END _Pragma("pack(pop)")
|
||||
#define AI_PACKED /* AI_PACKED */
|
||||
#elif defined(_MSC_VER) && (_MSC_VER >= 1500)
|
||||
/* For Microsoft Visual C++ */
|
||||
#define AI_PACKED_STRUCT_START __pragma(pack(push, 1))
|
||||
#define AI_PACKED_STRUCT_END __pragma(pack(pop))
|
||||
#define AI_PACKED /* AI_PACKED */
|
||||
#else
|
||||
/* Unknown compiler */
|
||||
#define AI_PACKED_STRUCT_START /* AI_PACKED_STRUCT_START */
|
||||
#define AI_PACKED_STRUCT_END /* AI_PACKED_STRUCT_END */
|
||||
#define AI_PACKED /* AI_PACKED */
|
||||
#endif /* AI_NO_PACKED_STRUCTS */
|
||||
|
||||
/******************************************************************************/
|
||||
#define AI_STRINGIFY_ARG(contents) # contents
|
||||
#define AI_STRINGIFY(macro_or_string) AI_STRINGIFY_ARG (macro_or_string)
|
||||
|
||||
/******************************************************************************/
|
||||
#if defined(_MSC_VER)
|
||||
#define AI_DECLARE_STATIC static __inline
|
||||
// #define AI_FORCE_INLINE static __forceinline
|
||||
#define AI_FORCE_INLINE static __inline
|
||||
#define AI_HINT_INLINE static __inline
|
||||
#define AI_ALIGNED_TYPE(type, x) type __declspec(align(x))
|
||||
#define AI_INTERFACE_ENTRY __declspec(dllexport)
|
||||
#elif defined(__ICCARM__) || defined (__IAR_SYSTEMS_ICC__)
|
||||
#define AI_DECLARE_STATIC static inline
|
||||
// #define AI_FORCE_INLINE static _Pragma("inline=forced") // TODO: check this definition!
|
||||
#define AI_FORCE_INLINE static inline
|
||||
#define AI_HINT_INLINE static inline
|
||||
#define AI_ALIGNED_TYPE(type, x) type
|
||||
#define AI_INTERFACE_ENTRY /* AI_INTERFACE_ENTRY */
|
||||
#elif defined(__GNUC__)
|
||||
#define AI_DECLARE_STATIC static __inline
|
||||
#define AI_FORCE_INLINE static __inline
|
||||
#define AI_HINT_INLINE static __inline
|
||||
#define AI_ALIGNED_TYPE(type, x) type __attribute__ ((aligned(x)))
|
||||
#define AI_INTERFACE_ENTRY /* AI_INTERFACE_ENTRY */
|
||||
#else /* _MSC_VER */
|
||||
#define AI_DECLARE_STATIC static __inline
|
||||
// #define AI_FORCE_INLINE static __forceinline
|
||||
#define AI_FORCE_INLINE static __inline
|
||||
#define AI_HINT_INLINE static __inline
|
||||
#define AI_ALIGNED_TYPE(type, x) type __attribute__ ((aligned(x)))
|
||||
#define AI_INTERFACE_ENTRY __attribute__((visibility("default")))
|
||||
#endif /* _MSC_VER */
|
||||
|
||||
/******************************************************************************/
|
||||
#define AI_ALIGN_MASKED(value, mask) ( ((value)+(mask))&(~(mask)) )
|
||||
|
||||
#define AI_GET_VERSION_STRING(major, minor, micro) \
|
||||
AI_STRINGIFY_ARG(major) "." \
|
||||
AI_STRINGIFY_ARG(minor) "." \
|
||||
AI_STRINGIFY_ARG(micro) \
|
||||
|
||||
|
||||
#define AI_PACK_TENSORS_PTR(...) \
|
||||
AI_PACK(__VA_ARGS__)
|
||||
|
||||
#define AI_PACK_INFO(size_) (ai_tensor_info[1]) { { \
|
||||
.buffer = (ai_buffer[size_])AI_STRUCT_INIT, \
|
||||
.state = (ai_tensor_state[size_])AI_STRUCT_INIT, \
|
||||
} }
|
||||
|
||||
#define AI_CR "\r\n"
|
||||
|
||||
#if (defined HAS_AI_DEBUG || defined HAS_DEBUG_LIB)
|
||||
#include <stdio.h>
|
||||
#define AI_DEBUG(...) __VA_ARGS__
|
||||
#define AI_DEBUG_PRINT(fmt, ...) { printf(fmt, ##__VA_ARGS__); }
|
||||
#else
|
||||
#define AI_DEBUG(...) AI_WRAP_FUNC(/*AI_DEBUG*/)
|
||||
#define AI_DEBUG_PRINT(fmt, ...) AI_WRAP_FUNC(/*AI_DEBUG_PRINT*/)
|
||||
#endif
|
||||
|
||||
#define AI_FLAG_SET(mask, flag) (mask) |= (flag)
|
||||
#define AI_FLAG_UNSET(mask, flag) (mask) &= (~(flag))
|
||||
#define AI_FLAG_IS_SET(mask, flag) ((flag)==((mask)&(flag)))
|
||||
|
||||
#endif /*AI_DATATYPES_DEFINES_H*/
|
||||
514
lib/stai/libstai/include/ai_datatypes_format.h
Normal file
514
lib/stai/libstai/include/ai_datatypes_format.h
Normal file
@ -0,0 +1,514 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file ai_datatypes_format.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @brief Definitions of AI platform private format handling routines
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2017 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef AI_DATATYPES_FORMAT_H
|
||||
#define AI_DATATYPES_FORMAT_H
|
||||
|
||||
#include "ai_platform.h"
|
||||
#include "ai_datatypes_defines.h"
|
||||
|
||||
|
||||
/*!
|
||||
* @defgroup ai_datatypes_format Definiton and Macro of array and buffer formats
|
||||
* @brief Type definition and implementation of internal @ref ai_array and
|
||||
* @ref ai_buffer formats.
|
||||
* @details The library handles 2 different kind of formats: an internal format
|
||||
* that is part of the @ref ai_array struct that is a packed 32bit representation
|
||||
* of the format attributes, and a public format (used in public APIs) associated
|
||||
* with @ref ai_buffer struct , defined as enum in @ref ai_platform.h,
|
||||
* that is just an enum type. Converters are provided in this header file to
|
||||
* convert from one format representation to another.
|
||||
* Some MSB bits are reserved in both formats to code some bit flag useful to
|
||||
* declare some special attribute. Three flags are actually implemented in both
|
||||
* formats: the @ref AI_BUFFER_FMT_FLAG_CONST and @ref AI_FMT_FLAG_CONST used
|
||||
* to tag read-only memory buffers, @ref AI_BUFFER_FMT_FLAG_STATIC and
|
||||
* @ref AI_FMT_FLAG_STATIC to mark statically allocated memory buffers and
|
||||
* @ref AI_FMT_FLAG_SCRATCH_BUFFER to tag temporary scratch buffers.
|
||||
* All the formats are declared in a proper tuple organize table header named
|
||||
* @ref format_lists.h that enumerates all the formats available for the library.
|
||||
* A new format could be added easily by adding a new FMY_ENTRY() as required.
|
||||
* The preprocessor automatically generates the code for the handling of the
|
||||
* format according to this tuples entry. A rational for the methodology could
|
||||
* be found here:
|
||||
* - https://codecraft.co/2012/10/29/how-enums-spread-disease-and-how-to-cure-it/
|
||||
*
|
||||
* The 32bits internal format fields are organized as follows:
|
||||
*
|
||||
* MSB LSB
|
||||
* 31 25 24 23 21 17 14 7 0
|
||||
* /---------------------------------------------------------------------------/
|
||||
* / ATTR. FLAGS |COMPLEX | SIGN | LDIV | TYPE | PMASK | BITS | FBITS /
|
||||
* /---------------------------------------------------------------------------/
|
||||
* Where:
|
||||
* - FLAGS: is the reserved bits to store additional format attributes (e.g.
|
||||
* I/O / STATIC flags. etc.)
|
||||
* - COMPLEX: 1 bit mark the format as complex type
|
||||
* - SIGN : 1 bit mark the format as signed type
|
||||
* - LDIV : 2 bits is a log2 value that is used to compute elements size
|
||||
* with some special format such as the compressed ones. It is a shift
|
||||
* factor usually set to zero
|
||||
* - TYPE : 4 bits mark the format "family" type. Actually 5 families are coded,
|
||||
* @ref AI_FMT_FLOAT (float types)
|
||||
* @ref AI_FMT_Q (fixed-point types in Qm.n format)
|
||||
* @ref AI_FMT_BOOL (boolean type)
|
||||
* @ref AI_FMT_LUT_FLOAT (compressed float lookup formats)
|
||||
* @ref AI_FMT_LUT_Q (compressed Qmn lookup formats)
|
||||
* - PMASK 3 bits padding mask used to set the optional dimension for padding
|
||||
* to handle special aligned formats/ E.g. a 1 bit format
|
||||
* Usually this is set to 0x0
|
||||
* - BITS 7 bits set the total number of bits of the element, padding bits
|
||||
* excluded. The bits are thus = sign bit + fractional bits + integer bits
|
||||
* The number of integer bits could thus be known using the @ref
|
||||
* AI_FMT_GET_IBITS() macro.
|
||||
* - FBITS 7 bits set the number of fractional bits in the format
|
||||
*
|
||||
*
|
||||
* A reference code snippet for usage is the test unit that uses this header:
|
||||
*
|
||||
* \include test/test_lcut_formats.cpp
|
||||
*
|
||||
*/
|
||||
|
||||
/*!
|
||||
* Format bitfields definition. NOTE: 7 MSB are masked off
|
||||
* for (optional) atributes setting using flags. see @ref AI_FMT_FLAG_CONST that
|
||||
* is used for marking a data as constant readonly
|
||||
*/
|
||||
|
||||
/* 1 bit field to identify floating point values*/
|
||||
#define _FMT_COMPLEX_MASK (0x1)
|
||||
#define _FMT_COMPLEX_BITS (24)
|
||||
|
||||
/*! 1 bit sign info */
|
||||
#define _FMT_SIGN_MASK (0x1)
|
||||
#define _FMT_SIGN_BITS (23)
|
||||
|
||||
/*! fractional bits field (i.e. for Q formats see @ref AI_FMT_Q) */
|
||||
#define _FMT_FBITS_MASK (0x7F)
|
||||
#define _FMT_FBITS_BITS (0)
|
||||
#define _FMT_FBITS_BIAS ((_FMT_FBITS_MASK+1) >> 1)
|
||||
|
||||
/*! TOTAL number of bits (fractional+integer+sign) (excluded padding ones) */
|
||||
#define _FMT_BITS_MASK (0x7F)
|
||||
#define _FMT_BITS_BITS (7)
|
||||
#define _FMT_BITS_BIAS (0)
|
||||
|
||||
/*! Padding bits for handling formats not aligned to multiples of 8 bits */
|
||||
#define _FMT_PMASK_MASK (0x7)
|
||||
#define _FMT_PMASK_BITS (14)
|
||||
|
||||
/*! bits reserved for identifying the family format, e.g. float, fixed-point..*/
|
||||
#define _FMT_TYPE_MASK (0xF)
|
||||
#define _FMT_TYPE_BITS (17)
|
||||
|
||||
#define _FMT_LDIV_MASK (0x3)
|
||||
#define _FMT_LDIV_BITS (21)
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
#define AI_FMT_OBJ(fmt_) ((ai_array_format)(fmt_))
|
||||
|
||||
/*!
|
||||
* Only 25 LSB bits are used for storing actual format bits. 7 bits are reserved
|
||||
* for format attributes, see @ref AI_FMT_FLAG_CONST flag
|
||||
*/
|
||||
#define AI_FMT_FLAG_BITS (25)
|
||||
#define AI_FMT_MASK ((0x1<<AI_FMT_FLAG_BITS)-1)
|
||||
|
||||
#define AI_FMT_FLAG_CONST (0x1<<30)
|
||||
#define AI_FMT_FLAG_STATIC (0x1<<29)
|
||||
#define AI_FMT_FLAG_SCRATCH_BUFFER (0x1<<28)
|
||||
#define AI_FMT_FLAG_IS_IO (0x1<<27)
|
||||
#define AI_FMT_FLAG_VISITED (0x1<<26)
|
||||
|
||||
/******************************************************************************/
|
||||
/*!
|
||||
* Format "Class" type : this identify the family of the format:
|
||||
* float, integer, fixed point (i.e. Q format), compressed via lookup table
|
||||
*/
|
||||
#define AI_FMT_NONE (0x0)
|
||||
#define AI_FMT_FLOAT (0x1)
|
||||
#define AI_FMT_Q (0x2)
|
||||
#define AI_FMT_BOOL (0x3)
|
||||
#define AI_FMT_LUT_Q (0x4)
|
||||
#define AI_FMT_LUT_FLOAT (0x8)
|
||||
|
||||
#define AI_FMT_QMASK \
|
||||
( (_FMT_FBITS_MASK<<_FMT_FBITS_BITS) | \
|
||||
(_FMT_BITS_MASK<<_FMT_BITS_BITS) | \
|
||||
(_FMT_PMASK_MASK<<_FMT_PMASK_BITS) )
|
||||
|
||||
#define AI_FMT_BINARY_MASK \
|
||||
(AI_FMT_MASK & (~(_FMT_SIGN_MASK<<_FMT_SIGN_BITS)))
|
||||
|
||||
#define AI_FMT_IS_BINARY(val_) \
|
||||
(((val_) & AI_FMT_BINARY_MASK) == AI_ARRAY_FORMAT_U1)
|
||||
|
||||
#define AI_FMT_GET(val_) \
|
||||
( (AI_FMT_OBJ(val_)) & AI_FMT_MASK )
|
||||
|
||||
#define AI_FMT_MASK_Q(val_) \
|
||||
( AI_FMT_OBJ(val_) & (~(AI_FMT_QMASK)) )
|
||||
|
||||
#define AI_FMT_GET_Q(val_) \
|
||||
( AI_FMT_MASK_Q(val_) | AI_FMT_SET_BITS(0) | AI_FMT_SET_FBITS(0) )
|
||||
|
||||
#define AI_FMT_GET_FLAGS(val_) \
|
||||
( ((AI_FMT_OBJ(val_)) & (~AI_FMT_MASK)) >> AI_FMT_FLAG_BITS )
|
||||
|
||||
#define AI_FMT_SAME(fmt1_, fmt2_) \
|
||||
( AI_FMT_GET(fmt1_) == AI_FMT_GET(fmt2_) )
|
||||
|
||||
#define _FMT_SET(val, mask, bits) AI_FMT_OBJ(((val)&(mask))<<(bits))
|
||||
#define _FMT_GET(fmt, mask, bits) ((AI_FMT_OBJ(fmt)>>(bits))&(mask))
|
||||
|
||||
#define AI_FMT_SET_COMPLEX(val) _FMT_SET(val, _FMT_COMPLEX_MASK, _FMT_COMPLEX_BITS)
|
||||
#define AI_FMT_GET_COMPLEX(fmt) _FMT_GET(fmt, _FMT_COMPLEX_MASK, _FMT_COMPLEX_BITS)
|
||||
#define AI_FMT_SET_SIGN(val) _FMT_SET(val, _FMT_SIGN_MASK, _FMT_SIGN_BITS)
|
||||
#define AI_FMT_GET_SIGN(fmt) _FMT_GET(fmt, _FMT_SIGN_MASK, _FMT_SIGN_BITS)
|
||||
#define AI_FMT_SET_PMASK(val) _FMT_SET(val, _FMT_PMASK_MASK, _FMT_PMASK_BITS)
|
||||
#define AI_FMT_GET_PMASK(fmt) _FMT_GET(fmt, _FMT_PMASK_MASK, _FMT_PMASK_BITS)
|
||||
#define AI_FMT_SET_TYPE(val) _FMT_SET(val, _FMT_TYPE_MASK, _FMT_TYPE_BITS)
|
||||
#define AI_FMT_GET_TYPE(fmt) _FMT_GET(fmt, _FMT_TYPE_MASK, _FMT_TYPE_BITS)
|
||||
#define AI_FMT_SET_LDIV(val) _FMT_SET(val, _FMT_LDIV_MASK, _FMT_LDIV_BITS)
|
||||
#define AI_FMT_GET_LDIV(fmt) _FMT_GET(fmt, _FMT_LDIV_MASK, _FMT_LDIV_BITS)
|
||||
|
||||
|
||||
#define AI_FMT_SET_BITS(val) \
|
||||
_FMT_SET((val) + _FMT_BITS_BIAS, _FMT_BITS_MASK, _FMT_BITS_BITS)
|
||||
#define AI_FMT_GET_BITS(fmt) \
|
||||
((ai_i8)_FMT_GET(fmt, _FMT_BITS_MASK, _FMT_BITS_BITS) - _FMT_BITS_BIAS)
|
||||
#define AI_FMT_SET_FBITS(val) \
|
||||
_FMT_SET((val) + _FMT_FBITS_BIAS, _FMT_FBITS_MASK, _FMT_FBITS_BITS)
|
||||
#define AI_FMT_GET_FBITS(fmt) \
|
||||
((ai_i8)_FMT_GET(fmt, _FMT_FBITS_MASK, _FMT_FBITS_BITS) - _FMT_FBITS_BIAS)
|
||||
|
||||
/*!
|
||||
* The total number of bits for a given format is supposed to be the sum of the
|
||||
* bits + padding bits. This means that the number of integer bits is derived
|
||||
* as follow: int_bits = bits - fbits (fractional bits) - 1 (for the sign)
|
||||
*/
|
||||
#define AI_FMT_GET_BITS_SIZE(fmt_) \
|
||||
AI_FMT_GET_BITS(fmt_)
|
||||
|
||||
/*! Macro used to compute the integer bits for a format */
|
||||
#define AI_FMT_GET_IBITS(fmt_) \
|
||||
((ai_i16)AI_FMT_GET_BITS(fmt_)-AI_FMT_GET_FBITS(fmt_)-AI_FMT_GET_SIGN(fmt_))
|
||||
|
||||
/*! ai_buffer format handlers section *****************************************/
|
||||
|
||||
#define AI_BUFFER_FMT_MASK_Q(fmt_) \
|
||||
( AI_BUFFER_FMT_OBJ(fmt_) & 0xFFFFC000 )
|
||||
|
||||
#define AI_BUFFER_FMT_GET_Q(fmt_) \
|
||||
( AI_BUFFER_FMT_MASK_Q(fmt_) | AI_BUFFER_FMT_SET_FBITS(0) | \
|
||||
AI_BUFFER_FMT_SET_FBITS(0) )
|
||||
|
||||
#define AI_BUFFER_FMT_SET_Q(bits_, fbits_) \
|
||||
AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 1, 0, bits_, fbits_)
|
||||
|
||||
#define AI_BUFFER_FMT_IS_Q(fmt_) \
|
||||
( (AI_BUFFER_FMT_TYPE_Q==AI_BUFFER_FMT_GET_TYPE(fmt_)) && \
|
||||
(1==AI_BUFFER_FMT_GET_SIGN(fmt_)) )
|
||||
|
||||
#define AI_BUFFER_FMT_SET_UQ(bits_, fbits_) \
|
||||
AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 0, 0, bits_, fbits_)
|
||||
|
||||
#define AI_BUFFER_FMT_IS_UQ(fmt_) \
|
||||
( (AI_BUFFER_FMT_TYPE_Q==AI_BUFFER_FMT_GET_TYPE(fmt_)) && \
|
||||
(0==AI_BUFFER_FMT_GET_SIGN(fmt_)) )
|
||||
|
||||
/*! Q ai_array format handlers ************************************************/
|
||||
#define AI_ARRAY_FMT_Q(bits_, fbits_) \
|
||||
( AI_FMT_MASK_Q(AI_ARRAY_FORMAT_Q) | AI_FMT_SET_BITS(bits_) | AI_FMT_SET_FBITS(fbits_) )
|
||||
|
||||
#define AI_ARRAY_FMT_SET_Q(bits_, fbits_) \
|
||||
AI_ARRAY_FMT_Q(bits_, fbits_)
|
||||
|
||||
#define AI_ARRAY_FMT_IS_Q(fmt_) \
|
||||
( AI_FMT_GET(AI_FMT_MASK_Q(AI_ARRAY_FORMAT_Q))==AI_FMT_GET(AI_FMT_MASK_Q(fmt_)) )
|
||||
|
||||
#define AI_ARRAY_FMT_UQ(bits_, fbits_) \
|
||||
( AI_FMT_MASK_Q(AI_ARRAY_FORMAT_UQ) | AI_FMT_SET_BITS(bits_) | AI_FMT_SET_FBITS(fbits_) )
|
||||
|
||||
#define AI_ARRAY_FMT_SET_UQ(bits_, fbits_) \
|
||||
AI_ARRAY_FMT_UQ(bits_, fbits_)
|
||||
|
||||
#define AI_ARRAY_FMT_IS_UQ(fmt_) \
|
||||
( AI_FMT_GET(AI_FMT_MASK_Q(AI_ARRAY_FORMAT_UQ))==AI_FMT_GET(AI_FMT_MASK_Q(fmt_)) )
|
||||
|
||||
AI_DEPRECATED
|
||||
/* Alias for AI_ARRAY_FMT_SET_Q */
|
||||
#define AI_ARRAY_FMT_SET_SQ(bits_, fbits_) \
|
||||
AI_ARRAY_FMT_SET_Q(bits_, fbits_)
|
||||
|
||||
AI_DEPRECATED
|
||||
/* Alias for AI_ARRAY_FMT_IS_Q */
|
||||
#define AI_ARRAY_FMT_IS_SQ(fmt_) \
|
||||
AI_ARRAY_FMT_IS_Q(fmt_)
|
||||
|
||||
/*! ai_array section **********************************************************/
|
||||
#define AI_ARRAY_FMT_ENTRY(name_) \
|
||||
AI_CONCAT(AI_ARRAY_FORMAT_, name_)
|
||||
|
||||
#define AI_ARRAY_FMT_NAME(fmt_) \
|
||||
ai_array_fmt_name(fmt_)
|
||||
|
||||
#define AI_ARRAY_FMT_VALID(fmt_) \
|
||||
ai_array_fmt_valid(fmt_)
|
||||
|
||||
#define AI_ARRAY_FMT_EXPORTED(fmt_) \
|
||||
ai_array_fmt_exported(fmt_)
|
||||
|
||||
#define AI_ARRAY_FMT_GET_FORMATS(formats_) \
|
||||
ai_array_fmt_get_formats(formats_)
|
||||
|
||||
#define AI_ARRAY_TO_BUFFER_FMT(fmt_) \
|
||||
ai_array_to_buffer_fmt(fmt_)
|
||||
|
||||
#define AI_ARRAY_GET_BYTE_SIZE(fmt_, count_) \
|
||||
ai_array_get_byte_size(fmt_, count_)
|
||||
|
||||
#define AI_ARRAY_GET_DATA_BYTE_SIZE(fmt_, count_) \
|
||||
ai_array_get_data_byte_size(fmt_, count_)
|
||||
|
||||
#define AI_ARRAY_GET_ELEMS_FROM_SIZE(fmt_, size_) \
|
||||
ai_array_get_elems_from_size(fmt_, size_)
|
||||
|
||||
|
||||
/* Compile sanity checks for formats field consistency */
|
||||
#if (AI_FMT_MASK != AI_BUFFER_FMT_MASK)
|
||||
#error "AI_FMT_MASK != AI_BUFFER_FMT_MASK"
|
||||
#endif
|
||||
#if (AI_FMT_NONE != AI_BUFFER_FMT_TYPE_NONE)
|
||||
#error "AI_FMT_NONE != AI_BUFFER_FMT_TYPE_NONE"
|
||||
#endif
|
||||
#if (AI_FMT_FLOAT != AI_BUFFER_FMT_TYPE_FLOAT)
|
||||
#error "AI_FMT_FLOAT != AI_BUFFER_FMT_TYPE_FLOAT"
|
||||
#endif
|
||||
#if (AI_FMT_Q != AI_BUFFER_FMT_TYPE_Q)
|
||||
#error "AI_FMT_Q != AI_BUFFER_FMT_TYPE_Q"
|
||||
#endif
|
||||
#if (AI_FMT_BOOL != AI_BUFFER_FMT_TYPE_BOOL)
|
||||
#error "AI_FMT_BOOL != AI_BUFFER_FMT_TYPE_BOOL"
|
||||
#endif
|
||||
#if (AI_FMT_FLAG_CONST != AI_BUFFER_FMT_FLAG_CONST)
|
||||
#error "AI_FMT_FLAG_CONST != AI_BUFFER_FMT_FLAG_CONST"
|
||||
#endif
|
||||
#if (AI_FMT_FLAG_STATIC != AI_BUFFER_FMT_FLAG_STATIC)
|
||||
#error "AI_FMT_FLAG_STATIC != AI_BUFFER_FMT_FLAG_STATIC"
|
||||
#endif
|
||||
#if (AI_FMT_FLAG_IS_IO != AI_BUFFER_FMT_FLAG_IS_IO)
|
||||
#error "AI_FMT_FLAG_IS_IO != AI_BUFFER_FMT_FLAG_IS_IO"
|
||||
#endif
|
||||
#if (AI_FMT_FLAG_STATIC != AI_BUFFER_FMT_FLAG_PERSISTENT)
|
||||
#error "AI_FMT_FLAG_STATIC != AI_BUFFER_FMT_FLAG_PERSISTENT"
|
||||
#endif
|
||||
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @typedef ai_array_format
|
||||
* @ingroup ai_datatypes_format
|
||||
* @brief Generic Data Format Specifier for @ref ai_array (32bits packed info)
|
||||
*/
|
||||
typedef int32_t ai_array_format;
|
||||
|
||||
/*!
|
||||
* @enum internal data format enums
|
||||
* @ingroup ai_datatypes_format
|
||||
* @brief Generic Data Format Specifier (32bits packed info)
|
||||
*/
|
||||
typedef enum {
|
||||
#define FMT_ENTRY(exp_, name_, type_id_, sign_bit_, complex_bit_, \
|
||||
pmask_, bits_, fbits_, ldiv_bits_) \
|
||||
AI_ARRAY_FMT_ENTRY(name_) = (AI_FMT_SET_COMPLEX(complex_bit_) | \
|
||||
AI_FMT_SET_SIGN(sign_bit_) | \
|
||||
AI_FMT_SET_BITS(bits_) | \
|
||||
AI_FMT_SET_FBITS(fbits_) | \
|
||||
AI_FMT_SET_PMASK(pmask_) | \
|
||||
AI_FMT_SET_TYPE(type_id_) | \
|
||||
AI_FMT_SET_LDIV(ldiv_bits_)),
|
||||
#include "formats_list.h"
|
||||
} ai_array_format_entry;
|
||||
|
||||
/*!
|
||||
* @brief Get a human readable string from the format ID value
|
||||
* @ingroup ai_datatypes_format
|
||||
* @param[in] type the @ref ai_array_format to print out
|
||||
* @return a string with a human readable name of the format
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
const char* ai_array_fmt_name(const ai_array_format type);
|
||||
|
||||
/*!
|
||||
* @brief Check if @ref ai_array_format is a exportable to an @ref ai_buffer_format
|
||||
* @ingroup ai_datatypes_format
|
||||
* @param[in] type the ai_array_format to check
|
||||
* @return true if the format is exported, false otherwise
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
ai_bool ai_array_fmt_exported(const ai_array_format type);
|
||||
|
||||
/*!
|
||||
* @brief Check if @ref ai_array_format is a valid format present in the list of
|
||||
* supported formats
|
||||
* @ingroup ai_datatypes_format
|
||||
* @param[in] type the ai_array_format to check
|
||||
* @return true if the format is valid, false otherwise
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
ai_bool ai_array_fmt_valid(const ai_array_format type);
|
||||
|
||||
/*!
|
||||
* @brief Get the complete list of supported @ref ai_array_format formats
|
||||
* @ingroup ai_datatypes_format
|
||||
* @param[out] formats a pointer to an array withj all supported formats listed
|
||||
* @return the number of supported formats
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
ai_size ai_array_fmt_get_formats(const ai_array_format** formats);
|
||||
|
||||
/*! ai_buffer section *********************************************************
|
||||
* Only 25 LSB bits are used for storing actual format bits. 7 bits are reserved
|
||||
* for format atrtributes, see @ref AI_FMT_FLAG_CONST flag
|
||||
*/
|
||||
|
||||
#define AI_BUFFER_FMT_ENTRY(name_) \
|
||||
AI_CONCAT(AI_BUFFER_FORMAT_, name_)
|
||||
|
||||
#define AI_BUFFER_FMT_NAME(type_) \
|
||||
ai_buffer_fmt_name(type_)
|
||||
|
||||
#define AI_BUFFER_FMT_VALID(type_) \
|
||||
ai_buffer_fmt_valid(type_)
|
||||
|
||||
#define AI_BUFFER_FMT_GET_FORMATS(formats_) \
|
||||
ai_buffer_fmt_get_formats(formats_)
|
||||
|
||||
#define AI_BUFFER_TO_ARRAY_FMT(fmt_) \
|
||||
ai_buffer_to_array_fmt(fmt_)
|
||||
|
||||
#define AI_BUFFER_GET_BITS_SIZE(fmt) \
|
||||
AI_ARRAY_GET_BITS_SIZE(AI_BUFFER_TO_ARRAY_FMT(fmt))
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Get a human readable string from the format ID value
|
||||
* @ingroup ai_datatypes_format
|
||||
* @param[in] type the @ref ai_buffer_format to print out
|
||||
* @return a string with a human readable name of the format
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
const char* ai_buffer_fmt_name(
|
||||
const ai_buffer_format type);
|
||||
|
||||
/*!
|
||||
* @brief Check if @ref ai_buffer_format is a valid format present in the list
|
||||
* of supported formats
|
||||
* @ingroup ai_datatypes_format
|
||||
* @param[in] type the @ref ai_buffer_format to check
|
||||
* @return true if the format is valid, false otherwise
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
ai_bool ai_buffer_fmt_valid(
|
||||
const ai_buffer_format type);
|
||||
|
||||
/*!
|
||||
* @brief Get the complete list of supported @ref ai_buffer_format formats
|
||||
* @ingroup ai_datatypes_format
|
||||
* @param[out] formats a pointer to an array with all supported formats listed
|
||||
* @return the number of supported formats
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
ai_size ai_buffer_fmt_get_formats(
|
||||
const ai_buffer_format** formats);
|
||||
|
||||
/*! Conversions section *******************************************************/
|
||||
/*!
|
||||
* @brief Convert from ai_array_format to ai_buffer_format.
|
||||
* @ingroup ai_datatypes_format
|
||||
* @param fmt the input ai_array_format to convert
|
||||
* @return the converted format as a ai_buffer_format
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
ai_buffer_format ai_array_to_buffer_fmt(
|
||||
const ai_array_format fmt);
|
||||
|
||||
/*!
|
||||
* @brief Convert from ai_buffer_format to ai_array_format.
|
||||
* @ingroup ai_datatypes_format
|
||||
* @param fmt the input ai_buffer_format to convert
|
||||
* @return the converted format as a ai_array_format
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
ai_array_format ai_buffer_to_array_fmt(
|
||||
const ai_buffer_format fmt);
|
||||
|
||||
/** helpers section ***********************************************************/
|
||||
/*!
|
||||
* @brief Computes the size in bytes given an ai_array_format and number of
|
||||
* array elements.
|
||||
* @details This routine computes from the number of elements of the array its
|
||||
* size in bytes. If the array is referred by a tensor structure, it is the task
|
||||
* of the latter to handle per-dimension padding (e.g. to align odd rows in a
|
||||
* 4-bit matrix. At array level the padding elements MUST be included in the
|
||||
* number of elements.
|
||||
* @ingroup ai_datatypes_format
|
||||
* @param[in] fmt the input array format as an ai_array_format
|
||||
* @param[in] count the number of elements stored in the data array
|
||||
* @return the size in bytes of the array given the specific format and number
|
||||
* of elements (including padding elements)
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
ai_size ai_array_get_byte_size(
|
||||
const ai_array_format fmt, const ai_size count);
|
||||
|
||||
/*!
|
||||
* @brief Computes the size in bytes given an ai_array_format and number of
|
||||
* array elements of the data fields (e.g. LUT table size excluded).
|
||||
* @details This routine computes from the number of elements of the array its
|
||||
* size in bytes. If the array is referred by a tensor structure, it is the task
|
||||
* of the latter to handle per-dimension padding (e.g. to align odd rows in a
|
||||
* 4-bit matrix. At array level the padding elements MUST be included in the
|
||||
* number of elements.
|
||||
* @ingroup ai_datatypes_format
|
||||
* @param[in] fmt the input array format as an ai_array_format
|
||||
* @param[in] count the number of elements stored in the data array
|
||||
* @return the size in bytes of the array given the specific format and number
|
||||
* of elements (including padding elements)
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
ai_size ai_array_get_data_byte_size(
|
||||
const ai_array_format fmt, const ai_size count);
|
||||
|
||||
/*!
|
||||
* @brief Computes the number of elements from ai_array_format and
|
||||
* the size in byte of the array.
|
||||
* @ingroup ai_datatypes_format
|
||||
* @param fmt the input array format as an ai_array_format
|
||||
* @param size the size in bytes of the array
|
||||
* @return the number of elements that could be stored given the format
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
ai_size ai_array_get_elems_from_size(
|
||||
const ai_array_format fmt, const ai_size byte_size);
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*AI_DATATYPES_FORMAT_H*/
|
||||
417
lib/stai/libstai/include/ai_datatypes_internal.h
Normal file
417
lib/stai/libstai/include/ai_datatypes_internal.h
Normal file
@ -0,0 +1,417 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file ai_datatypes_internal.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @brief Definitions of AI platform private APIs types
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2017 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef AI_DATATYPES_INTERNAL_H
|
||||
#define AI_DATATYPES_INTERNAL_H
|
||||
|
||||
#include "ai_datatypes.h"
|
||||
#include "ai_datatypes_defines.h"
|
||||
|
||||
/*!
|
||||
* @defgroup datatypes_internal Internal Datatypes
|
||||
* @brief Data structures used internally to implement neural networks
|
||||
*
|
||||
* The layers are defined as structs; a generic layer type defines the basic
|
||||
* layer parameters and type-specific parameters are handled by specializations
|
||||
* implemented as a C union. The layers keep also a pointer to the parent
|
||||
* network and the next layer in the network.
|
||||
* The input, output and parameters are tensor with an hard-coded maximum
|
||||
* dimension of 4. Tensors are floating point arrays with a notion of size.
|
||||
* The network is a linked list of layers, and thus it stores only the pointer
|
||||
* to the first layer.
|
||||
*/
|
||||
|
||||
/*!
|
||||
* @section Offsets
|
||||
* @ingroup datatypes_internal
|
||||
* Macros to handle (byte) stride addressing on tensors. The `AI_PTR` macro
|
||||
* is used to always cast a pointer to byte array. The macros `AI_OFFSET_X` are
|
||||
* used to compute (byte) offsets of respectively adjacents row elements, col
|
||||
* elements, channel elements and `channel_in` elements.
|
||||
* @{
|
||||
*/
|
||||
|
||||
/*! AI_STORAGE_KLASS SECTION ************************************/
|
||||
#define AI_STORAGE_KLASS_TYPE(s_) \
|
||||
( (s_)->type )
|
||||
|
||||
#define AI_STORAGE_KLASS_SIZE(s_) \
|
||||
( (s_)->size )
|
||||
|
||||
#define AI_STORAGE_KLASS_DATA(s_, type_) \
|
||||
( (type_*)((s_)->data) )
|
||||
|
||||
#define AI_STORAGE_KLASS_COPY(dst_, dst_type_, src_, src_type_) \
|
||||
{ \
|
||||
AI_ASSERT(AI_STORAGE_KLASS_SIZE(src_)>=AI_STORAGE_KLASS_SIZE(dst_)) \
|
||||
AI_STORAGE_KLASS_SIZE(dst_) = AI_STORAGE_KLASS_SIZE(src_); \
|
||||
for (ai_size i=0; i<AI_STORAGE_KLASS_SIZE(dst_); i++ ) { \
|
||||
AI_STORAGE_KLASS_DATA(dst_, dst_type_)[i] = \
|
||||
AI_STORAGE_KLASS_DATA(src_, src_type_)[i]; \
|
||||
} \
|
||||
}
|
||||
|
||||
#define AI_STORAGE_KLASS_DUMP(s_, pfx_, post_, fmt_, type_) \
|
||||
{ \
|
||||
AI_ASSERT(s_) \
|
||||
AI_DEBUG_PRINT(pfx_, AI_STORAGE_KLASS_SIZE(s_)) \
|
||||
for ( ai_u32 i=0; i<AI_STORAGE_KLASS_SIZE(s_); i++ ) { \
|
||||
if ( (i % 8)==0 ) { AI_DEBUG_PRINT("\n ") } \
|
||||
AI_DEBUG_PRINT(fmt_, AI_STORAGE_KLASS_DATA(s_, type_)[i]) \
|
||||
} \
|
||||
AI_DEBUG_PRINT(post_) \
|
||||
}
|
||||
|
||||
/*! AI_SHAPES SECTION ************************************/
|
||||
#define AI_SHAPE_2D_H(shape_) \
|
||||
AI_SHAPE_ELEM(shape_, AI_SHAPE_2D_HEIGHT)
|
||||
|
||||
#define AI_SHAPE_2D_W(shape_) \
|
||||
AI_SHAPE_ELEM(shape_, AI_SHAPE_2D_WIDTH)
|
||||
|
||||
#define AI_SHAPE_ELEM(shape_, pos_) \
|
||||
AI_STORAGE_KLASS_DATA(shape_, ai_shape_dimension)[pos_]
|
||||
|
||||
#define AI_SHAPE_GET_ELEM(shape_, pos_) \
|
||||
(((pos_) < AI_SHAPE_SIZE(shape_)) ? AI_SHAPE_ELEM(shape_, pos_) : 1)
|
||||
|
||||
#define AI_SHAPE_SET_ELEM(shape_, pos_, val_) \
|
||||
if ((pos_) < AI_SHAPE_SIZE(shape_)) { AI_SHAPE_ELEM(shape_, pos_) = (val_); }
|
||||
|
||||
#define AI_SHAPE_TYPE(shape_) \
|
||||
AI_STORAGE_KLASS_TYPE(shape_)
|
||||
|
||||
#define AI_SHAPE_SIZE(shape_) \
|
||||
AI_STORAGE_KLASS_SIZE(shape_)
|
||||
|
||||
#define AI_SHAPE_CLONE(dst_, src_) \
|
||||
AI_STORAGE_KLASS_COPY(dst_, ai_shape_dimension, src_, ai_shape_dimension)
|
||||
|
||||
#define AI_SHAPE_BCAST_CLONE(dst_, src_) \
|
||||
{ \
|
||||
for (ai_size i = 0; i < AI_SHAPE_SIZE(dst_); i++) { \
|
||||
AI_SHAPE_SET_ELEM(dst_, i, AI_SHAPE_GET_ELEM(src_, i)); \
|
||||
} \
|
||||
}
|
||||
|
||||
//#define AI_SHAPE_BATCH(shape_) AI_SHAPE_ELEM((shape_), AI_SHAPE_BATCH_CHANNEL)
|
||||
#define AI_SHAPE_H(shape_) AI_SHAPE_ELEM((shape_), AI_SHAPE_HEIGHT)
|
||||
#define AI_SHAPE_W(shape_) AI_SHAPE_ELEM((shape_), AI_SHAPE_WIDTH)
|
||||
#define AI_SHAPE_CH(shape_) AI_SHAPE_ELEM((shape_), AI_SHAPE_CHANNEL)
|
||||
#define AI_SHAPE_IN_CH(shape_) AI_SHAPE_ELEM((shape_), AI_SHAPE_IN_CHANNEL)
|
||||
#define AI_SHAPE_D(shape_) ((AI_SHAPE_SIZE((shape_)) > AI_SHAPE_DEPTH) \
|
||||
? AI_SHAPE_ELEM((shape_), AI_SHAPE_DEPTH) : 1)
|
||||
#define AI_SHAPE_E(shape_) ((AI_SHAPE_SIZE((shape_)) > AI_SHAPE_EXTENSION) \
|
||||
? AI_SHAPE_ELEM((shape_), AI_SHAPE_EXTENSION) : 1)
|
||||
#define AI_SHAPE_T(shape_) AI_SHAPE_ELEM((shape_), AI_SHAPE_TIME)
|
||||
|
||||
#define AI_CONV_SHAPE_H AI_SHAPE_W
|
||||
#define AI_CONV_SHAPE_W AI_SHAPE_CH
|
||||
#define AI_CONV_SHAPE_CH AI_SHAPE_H
|
||||
#define AI_CONV_SHAPE_IN_CH AI_SHAPE_IN_CH
|
||||
|
||||
/*! AI_STRIDES SECTION ***********************************/
|
||||
#define AI_STRIDE_2D_H(stride_) \
|
||||
AI_STRIDE_ELEM((stride_), AI_SHAPE_2D_HEIGHT)
|
||||
|
||||
#define AI_STRIDE_2D_W(stride_) \
|
||||
AI_STRIDE_ELEM((stride_), AI_SHAPE_2D_WIDTH)
|
||||
|
||||
#define AI_STRIDE_ELEM(stride_, pos_) \
|
||||
AI_STORAGE_KLASS_DATA(stride_, ai_stride_dimension)[pos_]
|
||||
|
||||
#define AI_STRIDE_GET_ELEM(stride_, pos_) \
|
||||
(((pos_) < AI_STRIDE_SIZE(stride_)) ? AI_STRIDE_ELEM(stride_, pos_) : 0)
|
||||
|
||||
#define AI_STRIDE_SET_ELEM(stride_, pos_, val_) \
|
||||
if ((pos_) < AI_STRIDE_SIZE(stride_)) AI_STRIDE_ELEM(stride_, pos_) = (val_);
|
||||
|
||||
#define AI_STRIDE_TYPE(stride_) \
|
||||
AI_STORAGE_KLASS_TYPE(stride_)
|
||||
|
||||
#define AI_STRIDE_SIZE(stride_) \
|
||||
AI_STORAGE_KLASS_SIZE(stride_)
|
||||
|
||||
#define AI_STRIDE_CLONE(dst_, src_) \
|
||||
AI_STORAGE_KLASS_COPY(dst_, ai_stride_dimension, src_, ai_stride_dimension)
|
||||
|
||||
#define AI_STRIDE_BCAST_CLONE(dst_, src_) \
|
||||
{ \
|
||||
for (ai_size i=0; i<AI_STRIDE_SIZE(dst_); i++) { \
|
||||
AI_STRIDE_SET_ELEM(dst_, i, AI_STRIDE_GET_ELEM(src_, i)); \
|
||||
} \
|
||||
}
|
||||
|
||||
//#define AI_STRIDE_BATCH(stride) AI_STRIDE_ELEM((stride), AI_SHAPE_BATCH_CHANNEL)
|
||||
#define AI_STRIDE_H(stride) AI_STRIDE_ELEM((stride), AI_SHAPE_HEIGHT)
|
||||
#define AI_STRIDE_W(stride) AI_STRIDE_ELEM((stride), AI_SHAPE_WIDTH)
|
||||
#define AI_STRIDE_CH(stride) AI_STRIDE_ELEM((stride), AI_SHAPE_CHANNEL)
|
||||
#define AI_STRIDE_IN_CH(stride) AI_STRIDE_ELEM((stride), AI_SHAPE_IN_CHANNEL)
|
||||
#define AI_STRIDE_D(stride) ((AI_STRIDE_SIZE((stride)) >= 5) ? AI_STRIDE_ELEM((stride), AI_SHAPE_DEPTH) : 0)
|
||||
#define AI_STRIDE_E(stride) ((AI_STRIDE_SIZE((stride)) == 6) ? AI_STRIDE_ELEM((stride), AI_SHAPE_EXTENSION) : 0)
|
||||
#define AI_STRIDE_T(stride) AI_STRIDE_ELEM((stride), AI_SHAPE_TIME)
|
||||
|
||||
#define AI_STRIDE_SET_H(stride, val) AI_STRIDE_SET_ELEM((stride), AI_SHAPE_HEIGHT, val)
|
||||
#define AI_STRIDE_SET_W(stride, val) AI_STRIDE_SET_ELEM((stride), AI_SHAPE_WIDTH, val)
|
||||
#define AI_STRIDE_SET_CH(stride, val) AI_STRIDE_SET_ELEM((stride), AI_SHAPE_CHANNEL, val)
|
||||
#define AI_STRIDE_SET_IN_CH(stride, val) AI_STRIDE_SET_ELEM((stride), AI_SHAPE_IN_CHANNEL, val)
|
||||
#define AI_STRIDE_SET_D(stride, val) if (AI_STRIDE_SIZE((stride)) >= 5) AI_STRIDE_SET_ELEM((stride), AI_SHAPE_DEPTH, val)
|
||||
#define AI_STRIDE_SET_E(stride, val) if (AI_STRIDE_SIZE((stride)) == 6) AI_STRIDE_SET_ELEM((stride), AI_SHAPE_EXTENSION, val)
|
||||
|
||||
/*! AI_TENSORS SECTION ***********************************/
|
||||
#define AI_TENSOR_KLASS(tensor_) \
|
||||
((tensor_) ? (tensor_)->klass : NULL)
|
||||
|
||||
#define AI_TENSOR_SHAPE(tensor_) \
|
||||
(&((tensor_)->shape))
|
||||
|
||||
#define AI_TENSOR_STRIDE(tensor_) \
|
||||
(&((tensor_)->stride))
|
||||
|
||||
#define AI_TENSOR_INFO(tensor_) \
|
||||
(&((tensor_)->info))
|
||||
|
||||
#define AI_TENSOR_ARRAY(tensor_) \
|
||||
((tensor_) ? (tensor_)->data : NULL)
|
||||
|
||||
#define AI_TENSOR_ID(tensor_) \
|
||||
((tensor_) ? AI_TENSOR_INFO(tensor_)->id : 0)
|
||||
|
||||
#define AI_TENSOR_FLAGS(tensor_) \
|
||||
((tensor_) ? AI_TENSOR_INFO(tensor_)->flags : 0)
|
||||
|
||||
#define AI_TENSOR_DATA_SIZE(tensor_) \
|
||||
((tensor_) ? AI_TENSOR_INFO(tensor_)->data_size : 0)
|
||||
|
||||
/*! AI_OFFSETS SECTION ***********************************/
|
||||
//#define AI_OFFSET_BATCH(b, stride) ((ai_ptr_offset)(b) * AI_STRIDE_BATCH(stride))
|
||||
#define AI_OFFSET_H(y, stride) ((ai_ptr_offset)(y) * AI_STRIDE_H(stride))
|
||||
#define AI_OFFSET_W(x, stride) ((ai_ptr_offset)(x) * AI_STRIDE_W(stride))
|
||||
#define AI_OFFSET_CH(ch, stride) ((ai_ptr_offset)(ch) * AI_STRIDE_CH(stride))
|
||||
#define AI_OFFSET_IN_CH(in_ch, stride) ((ai_ptr_offset)(in_ch) * \
|
||||
AI_STRIDE_IN_CH(stride))
|
||||
#define AI_OFFSET_D(d, stride) ((ai_ptr_offset)(d) * AI_STRIDE_D(stride))
|
||||
#define AI_OFFSET_E(e, stride) ((ai_ptr_offset)(e) * AI_STRIDE_E(stride))
|
||||
|
||||
#define AI_OFFSET_5D(y, x, d, e, ch, stride) ( \
|
||||
AI_OFFSET_H((y), (stride)) + AI_OFFSET_W((x), (stride)) + \
|
||||
AI_OFFSET_D((d), (stride)) + AI_OFFSET_E((e), (stride)) + \
|
||||
AI_OFFSET_CH((ch), (stride)) )
|
||||
|
||||
#define AI_OFFSET(y, x, ch, z, stride) ( \
|
||||
AI_OFFSET_H((y), (stride)) + AI_OFFSET_W((x), (stride)) + \
|
||||
AI_OFFSET_CH((ch), (stride)) + \
|
||||
((AI_STRIDE_SIZE((stride)) == 4) ? AI_OFFSET_IN_CH((z), (stride)) : AI_OFFSET_D((z), (stride))) )
|
||||
|
||||
/*! @} */
|
||||
|
||||
#define AI_GET_CONV_OUT_SIZE(in_size, filt_size, pad_l, pad_r, filt_stride) \
|
||||
((((in_size) - (filt_size) + (pad_l) + (pad_r)) / (filt_stride)) + 1)
|
||||
|
||||
|
||||
/** Tensors datatypes defines handlers ****************************************/
|
||||
#define AI_TENSOR_SIZE(tensor_) \
|
||||
get_tensor_size(tensor_, true)
|
||||
|
||||
#define AI_TENSOR_SIZE_UNPAD(tensor_) \
|
||||
get_tensor_size(tensor_, false)
|
||||
|
||||
#define AI_TENSOR_BYTE_SIZE(tensor_) \
|
||||
get_tensor_byte_size(tensor_)
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
#define AI_PLATFORM_VERSION_INIT(major_, minor_, micro_) \
|
||||
{ .major = (major_), .minor = (minor_), .micro = (micro_), .reserved = 0x0 }
|
||||
|
||||
|
||||
/** Integer tensor info extraction ********************************************/
|
||||
#define AI_INTQ_INFO_LIST_SCALE_ARRAY(list_, type_) \
|
||||
( ((list_) && (list_)->info) \
|
||||
? ((type_*)((list_)->info->scale)) : NULL )
|
||||
|
||||
#define AI_INTQ_INFO_LIST_ZEROPOINT_ARRAY(list_, type_) \
|
||||
( ((list_) && (list_)->info) \
|
||||
? ((type_*)((list_)->info->zeropoint)) : NULL )
|
||||
|
||||
#define AI_KLASS_GET_INTQ_INFO_LIST(tensor_) \
|
||||
((ai_intq_info_list*)((tensor_)->klass))
|
||||
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @brief Check whether 2 shapes have identical dimensions.
|
||||
* @ingroup datatypes_internal
|
||||
* @param shape0 the 1st tensor shape to compare
|
||||
* @param shape1 the 2nd tensor shape to compare
|
||||
* @return true if shape0 and shape1 have same dimensions. false otherwise
|
||||
*/
|
||||
AI_DECLARE_STATIC
|
||||
ai_bool ai_shape_is_same(
|
||||
const ai_shape* shape0, const ai_shape* shape1)
|
||||
{
|
||||
AI_ASSERT(shape0 && shape1)
|
||||
if (AI_SHAPE_SIZE(shape0) != AI_SHAPE_SIZE(shape1))
|
||||
return false;
|
||||
ai_size dim = AI_SHAPE_SIZE(shape0);
|
||||
while ( dim>0 ) {
|
||||
dim--;
|
||||
if ( AI_SHAPE_ELEM(shape0, dim)!=AI_SHAPE_ELEM(shape1, dim) )
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Check whether the shapes is 1*1*1... for a scalar value content.
|
||||
* @ingroup datatypes_internal
|
||||
* @param shape the tensor shape to evaluate
|
||||
* @return true if shape0 is scalar false otherwise
|
||||
*/
|
||||
AI_DECLARE_STATIC
|
||||
ai_bool ai_shape_is_scalar(
|
||||
const ai_shape* shape0)
|
||||
{
|
||||
ai_size dim = AI_SHAPE_SIZE(shape0);
|
||||
while (dim>0) {
|
||||
dim--;
|
||||
if (AI_SHAPE_ELEM(shape0, dim) != 1)
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Check if shape0 is a subshape of shape1
|
||||
* @ingroup datatypes_internal
|
||||
* @param shape0 the 1st tensor shape to compare
|
||||
* @param shape1 the 2nd tensor shape to compare
|
||||
* @return true if shape0 is a subshape of shape1 (all shape0 dimensions are
|
||||
* smallers or equal of the shape1 ones). false otherwise
|
||||
*/
|
||||
AI_DECLARE_STATIC
|
||||
ai_bool ai_shape_is_subshape(
|
||||
const ai_shape* shape0, const ai_shape* shape1)
|
||||
{
|
||||
AI_ASSERT(shape0 && shape1)
|
||||
AI_ASSERT(AI_SHAPE_SIZE(shape0)==AI_SHAPE_SIZE(shape1))
|
||||
ai_size dim = AI_SHAPE_SIZE(shape0);
|
||||
while (dim) {
|
||||
dim--;
|
||||
if ( AI_SHAPE_ELEM(shape0, dim)>AI_SHAPE_ELEM(shape1, dim) )
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/*!
|
||||
* @brief Computes the total size of a tensor given its dimensions.
|
||||
* @ingroup datatypes_internal
|
||||
* @param shape the tensor shape
|
||||
*/
|
||||
AI_DECLARE_STATIC
|
||||
ai_size ai_shape_get_size(const ai_shape* shape)
|
||||
{
|
||||
AI_ASSERT(shape)
|
||||
ai_size dim = AI_SHAPE_SIZE(shape);
|
||||
AI_ASSERT(dim > 0)
|
||||
ai_size size = 1;
|
||||
while (dim>0) {
|
||||
dim--;
|
||||
size *= AI_SHAPE_ELEM(shape, dim);
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
/*!
|
||||
* @brief Computes the size of the input image discarding the channels.
|
||||
* @ingroup datatypes_internal
|
||||
* @param shape the tensor shape
|
||||
*/
|
||||
AI_DECLARE_STATIC
|
||||
ai_size ai_shape_get_npixels(const ai_shape* shape)
|
||||
{
|
||||
AI_ASSERT(shape)
|
||||
const ai_size npixels = AI_SHAPE_W(shape) * AI_SHAPE_H(shape);
|
||||
return npixels;
|
||||
}
|
||||
|
||||
/** APIs Section *************************************************************/
|
||||
/*!
|
||||
* @brief Get packed version from major, minor, micro representaion.
|
||||
* @ingroup datatypes_internal
|
||||
* @param major major version value
|
||||
* @param minor minor version value
|
||||
* @param micro micro version value
|
||||
* @return a packed version info obtained serializing input values
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
ai_version ai_version_get(const ai_u8 major, const ai_u8 minor, const ai_u8 micro);
|
||||
|
||||
/*!
|
||||
* @brief Get un-packed version from packed version representaion.
|
||||
* @ingroup datatypes_internal
|
||||
* @param version a packed varsion info
|
||||
* @return struct with de-serialized major, minor, micro values
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
ai_platform_version ai_platform_version_get(const ai_version version);
|
||||
|
||||
/*!
|
||||
* @brief Map from ai_buffer data struct to ai_array data struct.
|
||||
* @ingroup datatypes_internal
|
||||
* @param buf a pointer to the ai_buffer to be mapped to ai_array
|
||||
* @return an initialized @ref ai_array struct representing same data
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
ai_array ai_from_buffer_to_array(const ai_buffer* buf);
|
||||
|
||||
/*!
|
||||
* @brief Map from ai_array data struct to ai_buffer data struct.
|
||||
* @ingroup datatypes_internal
|
||||
* @param array a pointer to the ai_array to be mapped to ai_buffer
|
||||
* @return an initialized @ref ai_buffer struct representing same data
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
ai_buffer ai_from_array_to_buffer(const ai_array* array);
|
||||
|
||||
/*!
|
||||
* @brief get the total number of elements of a n-dimensional tensor.
|
||||
* @ingroup datatypes_internal
|
||||
* @param t a pointer to an @ref ai_tensor
|
||||
* @param with_padding when true it considers also padded elements
|
||||
* @return the number of elements of the tensor (with/without padded ones)
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
ai_size get_tensor_size(const ai_tensor* t, const ai_bool with_padding);
|
||||
|
||||
/*!
|
||||
* @brief get the total size in bytes of elements of a n-dimensional tensor (excluding padded ones).
|
||||
* @ingroup datatypes_internal
|
||||
* @param t a pointer to an @ref ai_tensor
|
||||
* @return the total size in bytes of elements of the tensor (excluding padded ones)
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
ai_size get_tensor_byte_size(const ai_tensor* t);
|
||||
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*AI_DATATYPES_INTERNAL_H*/
|
||||
262
lib/stai/libstai/include/ai_layer_custom_interface.h
Normal file
262
lib/stai/libstai/include/ai_layer_custom_interface.h
Normal file
@ -0,0 +1,262 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file ai_layer_custom_interface.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @brief Definitions of AI platform custom layers interface APIs
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2021 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef AI_LAYER_CUSTOM_INTERFACE_H
|
||||
#define AI_LAYER_CUSTOM_INTERFACE_H
|
||||
|
||||
#include "ai_platform.h"
|
||||
#include "ai_platform_interface.h"
|
||||
|
||||
#include "layers_custom.h"
|
||||
|
||||
#define INTQ_SCALE_FLOAT (AI_BUFFER_META_FLAG_SCALE_FLOAT)
|
||||
#define INTQ_ZEROPOINT_U8 (AI_BUFFER_META_FLAG_ZEROPOINT_U8)
|
||||
#define INTQ_ZEROPOINT_S8 (AI_BUFFER_META_FLAG_ZEROPOINT_S8)
|
||||
#define INTQ_ZEROPOINT_U16 (AI_BUFFER_META_FLAG_ZEROPOINT_U16)
|
||||
#define INTQ_ZEROPOINT_S16 (AI_BUFFER_META_FLAG_ZEROPOINT_S16)
|
||||
|
||||
#define AI_TENSOR_HEIGHT (3)
|
||||
#define AI_TENSOR_WIDTH (2)
|
||||
#define AI_TENSOR_CHANNEL (1)
|
||||
#define AI_TENSOR_IN_CHANNEL (0)
|
||||
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
typedef enum {
|
||||
TYPE_NONE = 0x0,
|
||||
TYPE_FLOAT,
|
||||
TYPE_BOOL,
|
||||
TYPE_INTEGER,
|
||||
TYPE_SIGNED,
|
||||
TYPE_UNSIGNED,
|
||||
} ai_tensor_type;
|
||||
|
||||
typedef struct {
|
||||
ai_tensor_type type;
|
||||
ai_i8 bits;
|
||||
ai_i8 fbits;
|
||||
} ai_tensor_format;
|
||||
|
||||
typedef struct {
|
||||
ai_u16 flags; /*!< optional flags to store intq info attributes */
|
||||
ai_u16 size; /*!< number of elements in the the intq_info list */
|
||||
ai_float* scale; /*!< array of scales factors */
|
||||
union {
|
||||
ai_u8* zeropoint_u8; /*!< array of zeropoints as unsigned */
|
||||
ai_i8* zeropoint_s8; /*!< array of zeropoints as signed */
|
||||
};
|
||||
} ai_tensor_intq_info;
|
||||
|
||||
|
||||
/****************************************************************************
|
||||
** Layer Custom Interface APIs
|
||||
****************************************************************************/
|
||||
/*!
|
||||
* @brief acquire the custom layer from its handle
|
||||
* @ingroup ai_layer_custom_interface
|
||||
* @param layer an opaque handler to the custom layer
|
||||
* @return a pointer to ai_layer_custom if found and valid, else NULL
|
||||
*/
|
||||
AI_INTERFACE_TYPE
|
||||
ai_layer_custom* ai_layer_custom_get(
|
||||
ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief release the custom layer provided its handle
|
||||
* @ingroup ai_layer_custom_interface
|
||||
* @param layer an opaque handler to the custom layer to release
|
||||
*/
|
||||
AI_INTERFACE_TYPE
|
||||
void ai_layer_custom_release(
|
||||
ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief get the number of inputs tensors of a custom layer
|
||||
* @ingroup ai_layer_custom_interface
|
||||
* @param layer an opaque handler to the custom layer
|
||||
* @return the number of input tensors of the layer. 0 if no input tensors or error
|
||||
*/
|
||||
AI_INTERFACE_TYPE
|
||||
ai_size ai_layer_get_tensor_in_size(
|
||||
const ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief get the number of outputs tensors of a custom layer
|
||||
* @ingroup ai_layer_custom_interface
|
||||
* @param layer an opaque handler to the custom layer
|
||||
* @return the number of outputs tensors of the layer. 0 if no outputs tensors or error
|
||||
*/
|
||||
AI_INTERFACE_TYPE
|
||||
ai_size ai_layer_get_tensor_out_size(
|
||||
const ai_layer* layer);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief get the number of weights tensors of a custom layer
|
||||
* @ingroup ai_layer_custom_interface
|
||||
* @param layer an opaque handler to the custom layer
|
||||
* @return the number of weights tensors of the layer. 0 if no weights tensors or error
|
||||
*/
|
||||
AI_INTERFACE_TYPE
|
||||
ai_size ai_layer_get_tensor_weights_size(
|
||||
const ai_layer* layer);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief get the n-th (at index pos) input tensor pointer from a layer
|
||||
* @ingroup ai_layer_custom_interface
|
||||
* @param layer an opaque handler to the layer
|
||||
* @param pos the index position in the tensor list
|
||||
* @return a pointer to a tensor if found, else, if invalid or out-of-range NULL
|
||||
*/
|
||||
AI_INTERFACE_TYPE
|
||||
ai_tensor* ai_layer_get_tensor_in(
|
||||
const ai_layer* layer, const ai_u16 pos);
|
||||
|
||||
/*!
|
||||
* @brief get the n-th (at index pos) output tensor pointer from a layer
|
||||
* @ingroup ai_layer_custom_interface
|
||||
* @param layer an opaque handler to the layer
|
||||
* @param pos the index position in the tensor list
|
||||
* @return a pointer to a tensor if found, else, if invalid or out-of-range NULL
|
||||
*/
|
||||
AI_INTERFACE_TYPE
|
||||
ai_tensor* ai_layer_get_tensor_out(
|
||||
const ai_layer* layer, const ai_u16 pos);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief get the n-th (at index pos) weight tensor pointer from a layer
|
||||
* @ingroup ai_layer_custom_interface
|
||||
* @param layer an opaque handler to the layer
|
||||
* @param pos the index position in the tensor list
|
||||
* @return a pointer to a tensor if found, else, if invalid or out-of-range NULL
|
||||
*/
|
||||
AI_INTERFACE_TYPE
|
||||
ai_tensor* ai_layer_get_tensor_weights(
|
||||
const ai_layer* layer, const ai_u16 pos);
|
||||
|
||||
|
||||
/**** Layer Tensors APIs ***************************************************/
|
||||
/*!
|
||||
* @brief check if the tensor has integer quantization informations @ref ai_tensor_intq_info
|
||||
* @ingroup ai_layer_custom_interface
|
||||
* @param tensor a pointer to the tensor
|
||||
* @return true if tensot has integer quantization informations, false otherwise
|
||||
*/
|
||||
AI_INTERFACE_TYPE
|
||||
ai_bool ai_tensor_has_intq(
|
||||
const ai_tensor* t);
|
||||
|
||||
/*!
|
||||
* @brief get the tensor integer quantization informations @ref ai_tensor_intq_info
|
||||
* @ingroup ai_layer_custom_interface
|
||||
* @param tensor a pointer to the tensor
|
||||
* @return the integer quantization informations as a struct @ref ai_tensor_intq_info
|
||||
*/
|
||||
AI_INTERFACE_TYPE
|
||||
ai_tensor_intq_info ai_tensor_get_intq(
|
||||
const ai_tensor* t);
|
||||
|
||||
/*!
|
||||
* @brief get the format of the tensor see @ref ai_tensor_format
|
||||
* @ingroup ai_layer_custom_interface
|
||||
* @param tensor a pointer to the tensor
|
||||
* @return the tensor format
|
||||
*/
|
||||
AI_INTERFACE_TYPE
|
||||
ai_tensor_format ai_tensor_get_format(
|
||||
const ai_tensor* t);
|
||||
|
||||
/**** Shapes Getters ****/
|
||||
/*!
|
||||
* @brief get the dimensionality of the tensor shapes
|
||||
* @ingroup ai_layer_custom_interface
|
||||
* @param tensor a pointer to the tensor
|
||||
* @return the dimensionality of the tensor shape
|
||||
*/
|
||||
AI_INTERFACE_TYPE
|
||||
ai_size ai_tensor_get_shape_size(
|
||||
const ai_tensor* t);
|
||||
|
||||
/*!
|
||||
* @brief get the value of the shape dimensionality pos
|
||||
* @ingroup ai_layer_custom_interface
|
||||
* @param tensor a pointer to the tensor
|
||||
* @return the value of the shape dimensionality at pos of the tensor
|
||||
*/
|
||||
AI_INTERFACE_TYPE
|
||||
ai_shape_dimension ai_tensor_get_shape(
|
||||
const ai_tensor* t, const ai_u16 pos);
|
||||
|
||||
/**** Strides Getters ****/
|
||||
/*!
|
||||
* @brief get the dimensionality of the tensor strides
|
||||
* @ingroup ai_layer_custom_interface
|
||||
* @param tensor a pointer to the tensor
|
||||
* @return the dimensionality of the tensor strides @ref ai_stride
|
||||
*/
|
||||
AI_INTERFACE_TYPE
|
||||
ai_size ai_tensor_get_stride_size(
|
||||
const ai_tensor* t);
|
||||
|
||||
/*!
|
||||
* @brief get the value of the stride dimensionality pos
|
||||
* @ingroup ai_layer_custom_interface
|
||||
* @param tensor a pointer to the tensor
|
||||
* @return the value of the stride dimensionality at pos of the tensor
|
||||
*/
|
||||
AI_INTERFACE_TYPE
|
||||
ai_stride_dimension ai_tensor_get_stride(
|
||||
const ai_tensor* t, const ai_u16 pos);
|
||||
|
||||
/**** Data Storage Getters ****/
|
||||
/*!
|
||||
* @brief get tensor storage data buffer pointer
|
||||
* @ingroup ai_layer_custom_interface
|
||||
* @param tensor a pointer to the tensor
|
||||
* @return a pointer to the tensor data buffer, set to NULL if error
|
||||
*/
|
||||
AI_INTERFACE_TYPE
|
||||
ai_any_ptr ai_tensor_get_data(
|
||||
const ai_tensor* t);
|
||||
|
||||
/*!
|
||||
* @brief get number of tensor elements
|
||||
* @ingroup ai_layer_custom_interface
|
||||
* @param tensor a pointer to the tensor
|
||||
* @return the number of tensor elements or 0 if error
|
||||
*/
|
||||
AI_INTERFACE_TYPE
|
||||
ai_size ai_tensor_get_data_size(
|
||||
const ai_tensor* t);
|
||||
|
||||
/*!
|
||||
* @brief get the size in bytes of the tensor data buffer
|
||||
* @ingroup ai_layer_custom_interface
|
||||
* @param tensor a pointer to the tensor
|
||||
* @return the size in bytes of the tensor data buffer. 0 if error
|
||||
*/
|
||||
AI_INTERFACE_TYPE
|
||||
ai_size ai_tensor_get_data_byte_size(
|
||||
const ai_tensor* t);
|
||||
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*AI_LAYER_CUSTOM_INTERFACE_H*/
|
||||
62
lib/stai/libstai/include/ai_lite.h
Normal file
62
lib/stai/libstai/include/ai_lite.h
Normal file
@ -0,0 +1,62 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file ai_lite.h
|
||||
* @author STMicroelectronics
|
||||
* @brief Definitions and implementations of runtime-lite public APIs
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2022 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef AI_LITE_H
|
||||
#define AI_LITE_H
|
||||
|
||||
#include "ai_platform.h"
|
||||
#include "stai.h"
|
||||
|
||||
#define LITE_API_ENTRY \
|
||||
/* LITE_API_ENTRY */
|
||||
|
||||
#define LITE_GRAPH_INIT(_inputs, _outputs, _activations, _weights, _cb, _cb_cookie) { \
|
||||
.inputs = (stai_ptr*)(_inputs), \
|
||||
.outputs = (stai_ptr*)(_outputs), \
|
||||
.activations = (stai_ptr*)(_activations), \
|
||||
.weights = (const stai_ptr*)(_weights), \
|
||||
.cb = (_cb), \
|
||||
.cb_cookie = (_cb_cookie), \
|
||||
}
|
||||
|
||||
|
||||
STAI_API_DECLARE_BEGIN
|
||||
|
||||
typedef enum {
|
||||
LITE_OK = 0x0,
|
||||
LITE_KO_INPUTS = (0x1 << 0),
|
||||
LITE_KO_OUTPUTS = (0x1 << 1),
|
||||
LITE_KO_WEIGHTS = (0x1 << 2),
|
||||
LITE_KO_ACTIVATIONS = (0x1 << 3),
|
||||
LITE_KO_GRAPH = (0x1 << 4),
|
||||
LITE_KO_API = (0x1 << 5),
|
||||
} lite_result;
|
||||
|
||||
|
||||
typedef struct {
|
||||
stai_ptr* inputs;
|
||||
stai_ptr* outputs;
|
||||
stai_ptr* activations;
|
||||
const stai_ptr* weights;
|
||||
const stai_event_cb cb;
|
||||
void* cb_cookie;
|
||||
} lite_graph;
|
||||
|
||||
|
||||
STAI_API_DECLARE_END
|
||||
|
||||
#endif /* AI_LITE_H */
|
||||
44
lib/stai/libstai/include/ai_lite_inspect.h
Normal file
44
lib/stai/libstai/include/ai_lite_inspect.h
Normal file
@ -0,0 +1,44 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file ai_lite_inspect.h
|
||||
* @author STMicroelectronics
|
||||
* @brief Definitions and implementations of runtime-lite inspection routines
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2021 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef AI_LITE_INSPECT_H
|
||||
#define AI_LITE_INSPECT_H
|
||||
#include "ai_platform.h"
|
||||
|
||||
// #define HAS_LITE_INSPECT
|
||||
|
||||
#ifdef HAS_LITE_INSPECT
|
||||
#include "stai_debug.h"
|
||||
|
||||
#define LITE_INSPECT_CB(flags, node_id, data_ptr, data_size, data_fmt, data_id, data_pos) { \
|
||||
if (graph->cb) { \
|
||||
graph->cb((const void*)(graph->cb_cookie), \
|
||||
(const stai_flags)(flags), \
|
||||
(const int32_t)(node_id), (const void*)(data_ptr), (const int32_t)(data_size), \
|
||||
(const int32_t)(data_fmt), (const int32_t)(data_id), (const int32_t)(data_pos)); \
|
||||
} \
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#define LITE_INSPECT_CB(flags, node_id, data_ptr, data_size, data_fmt, data_id, data_pos) { \
|
||||
do { /* LITE_INSPECT_CB() */ } while (0); \
|
||||
}
|
||||
|
||||
#endif /* HAS_LITE_INSPECT */
|
||||
|
||||
#endif /* AI_LITE_INSPECT_H */
|
||||
125
lib/stai/libstai/include/ai_lite_interface.h
Normal file
125
lib/stai/libstai/include/ai_lite_interface.h
Normal file
@ -0,0 +1,125 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file ai_lite_interface.h
|
||||
* @author STMicroelectronics
|
||||
* @brief Definitions and implementations of runtime-lite codegen APIs
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2022 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef AI_LITE_INTERFACE_H
|
||||
#define AI_LITE_INTERFACE_H
|
||||
|
||||
#include "ai_lite.h"
|
||||
#include "core_assert.h"
|
||||
|
||||
|
||||
/*****************************************************************************/
|
||||
/* Generic Codegen Section */
|
||||
// #ifdef HAS_LOG
|
||||
#if 0
|
||||
#include "core_log.h"
|
||||
|
||||
#define LITE_GRAPH_START(_graph_name) \
|
||||
AI_LOG_DEBUG("[LITE GRAPH START] : " _graph_name)
|
||||
|
||||
#define LITE_GRAPH_END(_graph_name) \
|
||||
AI_LOG_DEBUG("[LITE GRAPH END] : " _graph_name)
|
||||
|
||||
#else
|
||||
|
||||
#define LITE_GRAPH_START(_graph_name) \
|
||||
/* LITE_GRAPH_START() */
|
||||
|
||||
#define LITE_GRAPH_END(_graph_name) \
|
||||
/* LITE_GRAPH_END() */
|
||||
|
||||
#endif /* HAS_LOG */
|
||||
|
||||
#define LITE_ASSERT(expr) \
|
||||
CORE_ASSERT(expr)
|
||||
|
||||
|
||||
/*****************************************************************************/
|
||||
#if defined(_MSC_VER)
|
||||
#define LITE_DECLARE_STATIC static __inline
|
||||
#define LITE_HINT_INLINE static __inline
|
||||
#define LITE_FORCE_INLINE static __inline
|
||||
#elif defined(__ICCARM__) || defined (__IAR_SYSTEMS_ICC__)
|
||||
#define LITE_DECLARE_STATIC static inline
|
||||
#define LITE_HINT_INLINE static inline
|
||||
#define LITE_FORCE_INLINE static inline
|
||||
#elif defined(__GNUC__)
|
||||
#define LITE_DECLARE_STATIC static __inline
|
||||
#define LITE_HINT_INLINE static __inline
|
||||
#define LITE_FORCE_INLINE static __inline
|
||||
#else
|
||||
#define LITE_DECLARE_STATIC static __inline
|
||||
#define LITE_HINT_INLINE static __inline
|
||||
#define LITE_FORCE_INLINE static __inline
|
||||
#endif /* _MSC_VER */
|
||||
|
||||
#define LITE_API_ENTRY /* LITE_API_ENTRY */
|
||||
|
||||
#define LITE_PACK(...) \
|
||||
__VA_ARGS__
|
||||
|
||||
#define LITE_UNUSED(_elem) \
|
||||
((void)(_elem));
|
||||
|
||||
|
||||
/*****************************************************************************/
|
||||
/* Arrays Section */
|
||||
|
||||
#define LITE_ARRAY_VALUES(...) \
|
||||
{ LITE_PACK(__VA_ARGS__) }
|
||||
|
||||
#define LITE_ARRAY_DATA(_array, _type) \
|
||||
((_type*)(_array)->data)
|
||||
|
||||
#define LITE_ARRAY_DATA_START(_array, _type) \
|
||||
((_type*)(_array)->data_start)
|
||||
|
||||
/*****************************************************************************/
|
||||
/* Tensors Section */
|
||||
|
||||
#define LITE_TENSOR_ARRAY(_tensor, _pos) \
|
||||
(((_tensor)->data) + (_pos))
|
||||
|
||||
/*****************************************************************************/
|
||||
/* Tensors List Section */
|
||||
|
||||
#define LITE_TENSOR_LIST(_chain, _pos) \
|
||||
(&(_chain)->chain[_pos])
|
||||
|
||||
#define LITE_TENSOR_IN(_chain, _pos) \
|
||||
(LITE_TENSOR_LIST(_chain, 0)->tensor[_pos])
|
||||
|
||||
#define LITE_TENSOR_OUT(_chain, _pos) \
|
||||
(LITE_TENSOR_LIST(_chain, 1)->tensor[_pos])
|
||||
|
||||
#define LITE_TENSOR_WEIGHTS(_chain, _pos) \
|
||||
(LITE_TENSOR_LIST(_chain, 2)->tensor[_pos])
|
||||
|
||||
#define LITE_TENSOR_SCRATCHS(_chain, _pos) \
|
||||
(LITE_TENSOR_LIST(_chain, 3)->tensor[_pos])
|
||||
|
||||
/*****************************************************************************/
|
||||
#define LITE_LAYER_ACQUIRE(name_, cast_type_, ptr_) \
|
||||
LITE_ASSERT(ptr_) \
|
||||
AI_CONCAT(ai_layer_, cast_type_)* name_ = \
|
||||
(AI_CONCAT(ai_layer_, cast_type_)*)(ptr_);
|
||||
|
||||
#define LITE_LAYER_RELEASE(name_, cast_type_) \
|
||||
/* LITE_LAYER_RELEASE() */
|
||||
|
||||
|
||||
#endif /* AI_LITE_INTERFACE_H */
|
||||
272
lib/stai/libstai/include/ai_lite_math_helpers.h
Normal file
272
lib/stai/libstai/include/ai_lite_math_helpers.h
Normal file
@ -0,0 +1,272 @@
|
||||
#ifndef AI_LITE_MATH_HELPERS_H
|
||||
#define AI_LITE_MATH_HELPERS_H
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file ai_lite_math_helpers.h
|
||||
* @author STMicroelectronics
|
||||
* @brief Math helpers routines header file for lite APIs.
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2022 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#include <math.h>
|
||||
#include <limits.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "ai_platform.h"
|
||||
#include "ai_platform_interface.h"
|
||||
#include "ai_datatypes_defines.h"
|
||||
|
||||
#define AI_FLOAT_TOLERANCE (6.19209290e-5F) /* Used for small calculation
|
||||
noise issues */
|
||||
#define AI_FLOAT_EPSILON (1.19209290e-7F)
|
||||
#define AI_I8_EPSILON (0.00787401F) /* 1/(2^7 - 1) */
|
||||
#define AI_I16_EPSILON (3.051851e-5F) /* 1/(2^15 - 1) */
|
||||
|
||||
#define AI_FLT_MAX (3.40282346638528859812e+38f)
|
||||
|
||||
#define AI_MIN(x,y) ( ((x)<(y)) ? (x) : (y) )
|
||||
#define AI_MAX(x,y) ( ((x)>(y)) ? (x) : (y) )
|
||||
#define AI_SIGN(x) (((x)>0) ? 1 : -1)
|
||||
#define AI_CLAMP(x, min, max) AI_MIN(AI_MAX(x,min), max)
|
||||
#define AI_ABS(x) fabsf(x)
|
||||
#define AI_ABS_DIFF(x, y) ( ((x)>(y)) ? ((x)-(y)) : ((y)-(x)) )
|
||||
#define AI_NEG(x) ( -1 * (x) )
|
||||
#define AI_NOT(x) ( ((x)==true) ? false : true)
|
||||
#define AI_RECIPROCAL(x) ( 1.0f / (x) )
|
||||
#define AI_CEIL(x) ceilf(x)
|
||||
#define AI_FLOOR(x) floorf(x)
|
||||
#define AI_FLOOR_DIV(x, y) AI_FLOOR((x)/(y)) /* floor division: x // y */
|
||||
#define AI_FLOOR_MOD(x, y) fmodf(x, y)
|
||||
#define AI_ROUND(x) roundf(x)
|
||||
#define AI_POW(x,y) powf(x, y)
|
||||
|
||||
#define AI_SQUARED_DIFF(x, y) (((x)-(y)) * ((x)-(y)))
|
||||
|
||||
#define AI_FLOAT_NEGATIVE_HALF (-0.5f + AI_FLOAT_EPSILON)
|
||||
#define AI_FLOAT_POSITIVE_HALF (0.5f)
|
||||
|
||||
|
||||
#define AI_MATH_ACOS(x) acosf(x)
|
||||
#define AI_MATH_ACOSH(x) acoshf(x)
|
||||
#define AI_MATH_ASIN(x) asinf(x)
|
||||
#define AI_MATH_ASINH(x) asinhf(x)
|
||||
#define AI_MATH_ATAN(x) atanf(x)
|
||||
#define AI_MATH_ATANH(x) atanhf(x)
|
||||
#define AI_MATH_COS(x) cosf(x)
|
||||
#define AI_MATH_COSH(x) coshf(x)
|
||||
#define AI_MATH_ERF(x) erff(x)
|
||||
#define AI_MATH_EXP(x) expf(x)
|
||||
#define AI_MATH_LOG(x) logf(x)
|
||||
#define AI_MATH_POW(x, e) powf((x), (e))
|
||||
#define AI_MATH_RSQRT(x) (1.0f / AI_MATH_SQRT(x))
|
||||
#define AI_MATH_SIN(x) sinf(x)
|
||||
#define AI_MATH_SINH(x) sinhf(x)
|
||||
#define AI_MATH_SQRT(x) ai_math_sqrt(x)
|
||||
#define AI_MATH_TAN(x) tanf(x)
|
||||
#define AI_MATH_TANH(x) tanhf(x)
|
||||
#define AI_MATH_SQUARE(x) AI_MATH_POW(x, 2.0f)
|
||||
|
||||
#define AI_MATH_ACOS(x) acosf(x)
|
||||
#define AI_MATH_ACOSH(x) acoshf(x)
|
||||
#define AI_MATH_ASIN(x) asinf(x)
|
||||
#define AI_MATH_ASINH(x) asinhf(x)
|
||||
#define AI_MATH_ATAN(x) atanf(x)
|
||||
#define AI_MATH_ATANH(x) atanhf(x)
|
||||
#define AI_MATH_COS(x) cosf(x)
|
||||
#define AI_MATH_COSH(x) coshf(x)
|
||||
#define AI_MATH_ERF(x) erff(x)
|
||||
#define AI_MATH_EXP(x) expf(x)
|
||||
#define AI_MATH_LOG(x) logf(x)
|
||||
#define AI_MATH_POW(x, e) powf((x), (e))
|
||||
#define AI_MATH_RSQRT(x) (1.0f / AI_MATH_SQRT(x))
|
||||
#define AI_MATH_SIN(x) sinf(x)
|
||||
#define AI_MATH_SINH(x) sinhf(x)
|
||||
#define AI_MATH_SQRT(x) ai_math_sqrt(x)
|
||||
#define AI_MATH_TAN(x) tanf(x)
|
||||
#define AI_MATH_TANH(x) tanhf(x)
|
||||
#define AI_MATH_SQUARE(x) AI_MATH_POW(x, 2.0f)
|
||||
#define AI_MATH_RELU_TEST(x, thr, min, max) \
|
||||
(((x)<=(thr)) ? (min) : (max))
|
||||
|
||||
#define AI_MATH_CLIP_LINEAR_REMAP(x, alpha, beta) \
|
||||
(AI_MAX(0, AI_MIN(1, ((x) * (alpha) + (beta)))))
|
||||
|
||||
#define AI_MATH_RELU_GENERIC(x, thr, alpha, max) \
|
||||
AI_MATH_RELU_TEST(x, max, AI_MATH_RELU_GENERIC_NO_MAX(x, thr, alpha), max)
|
||||
|
||||
#define AI_MATH_RELU_GENERIC_NO_MAX(x, thr, alpha) \
|
||||
AI_MATH_RELU_TEST(x, thr, ((alpha)*((x)-(thr))), x)
|
||||
|
||||
#define AI_MATH_RELU_THRESHOLDED(x, thr) \
|
||||
AI_MATH_RELU_TEST(x, thr, 0, (x))
|
||||
|
||||
#define AI_MATH_LEAKY_RELU(x, neg_slope, pos_slope) \
|
||||
AI_MATH_RELU_TEST(x, 0, (x)*(neg_slope), (x)*(pos_slope))
|
||||
// ( ((x)>0) ? (x)*(pos_slope) : (x)*(neg_slope) )
|
||||
|
||||
#define AI_MATH_PRELU(x, slope) \
|
||||
AI_MATH_RELU_TEST(x, 0, (x)*(slope), (x))
|
||||
// AI_MATH_LEAKY_RELU(x, slope, 1)
|
||||
|
||||
#define AI_MATH_RELU(x) \
|
||||
AI_MATH_RELU_TEST(x, 0, 0, x)
|
||||
// AI_MAX(x, 0)
|
||||
|
||||
#define AI_MATH_ELU(x, alpha) \
|
||||
(AI_MAX(0.0f, (x)) + AI_MIN(0.0f, (alpha) * (AI_MATH_EXP(x)-1.0f)))
|
||||
|
||||
#define AI_MATH_SELU(x, alpha, scale) \
|
||||
((scale)*AI_MATH_ELU(x, alpha))
|
||||
|
||||
#define AI_MATH_SCALED_TANH(x, alpha, beta) \
|
||||
((alpha)*AI_MATH_TANH((beta)*(x)))
|
||||
|
||||
#define AI_MATH_SIGMOID(x) \
|
||||
(1.0f / (1.0f + AI_MATH_EXP(-(x))))
|
||||
|
||||
#define AI_MATH_LOGISTIC(x)\
|
||||
(x < 0) ? (1.0f -(1.0f / (1.0f + AI_MATH_EXP(-AI_ABS(x))))) :\
|
||||
(1.0f / (1.0f + AI_MATH_EXP(-AI_ABS(x))))
|
||||
|
||||
#define AI_MATH_HARD_SIGMOID(x, alpha, beta) \
|
||||
AI_MATH_CLIP_LINEAR_REMAP(x, alpha, beta)
|
||||
|
||||
#define AI_MATH_GELU_NO_APPROXIMATE(x) \
|
||||
((x / 2.0f) * (1.0f + AI_MATH_ERF(x/AI_MATH_SQRT(2.0f))))
|
||||
|
||||
#define AI_MATH_GELU_APPROXIMATE(x) \
|
||||
((x / 2.0f) * (1.0f + AI_MATH_TANH(AI_MATH_SQRT(2.0f/PI)*(x + 0.044715f * AI_MATH_POW(x, 3.0f)))))
|
||||
|
||||
#define AI_MATH_GELU(x, approximate) \
|
||||
(((bool)approximate) ? AI_MATH_GELU_APPROXIMATE(x) : AI_MATH_GELU_NO_APPROXIMATE(x))
|
||||
|
||||
|
||||
|
||||
|
||||
/* Formula with higher accuracy */
|
||||
#define AI_MATH_SWISH(x) \
|
||||
((x) * AI_MATH_SIGMOID(x))
|
||||
|
||||
#define AI_MATH_HARD_SWISH(x) \
|
||||
((x) * AI_MATH_CLIP_LINEAR_REMAP(x, 1.0f/6, 0.5f))
|
||||
|
||||
#define AI_MATH_SOFT_PLUS(x) \
|
||||
AI_MATH_LOG(1.0f + AI_MATH_EXP(x))
|
||||
|
||||
#define AI_MATH_SOFT_SIGN(x) \
|
||||
((x) / (1.0f + AI_ABS(x)))
|
||||
|
||||
/*!
|
||||
* @brief Round float x to the nearest integer (breaking +- 0.5 ties to the nearest even integer)
|
||||
*/
|
||||
static inline ai_i32 ai_round_f2i_t2e(ai_float x)
|
||||
{
|
||||
x += x >= 0.0f ? 0.5f : -0.5f;
|
||||
ai_i32 i32_x = (ai_i32)x;
|
||||
|
||||
if (((ai_float)i32_x == x) && ((i32_x & 0x1) != 0)) {
|
||||
ai_i32 to_nearest_even = i32_x < 0 ? 1 : -1;
|
||||
i32_x += to_nearest_even;
|
||||
}
|
||||
return i32_x;
|
||||
}
|
||||
|
||||
static inline ai_u32 ai_round_f2u_t2e(ai_float x)
|
||||
{
|
||||
x += 0.5f;
|
||||
ai_u32 u32_x = (ai_u32)x;
|
||||
|
||||
if (((ai_float)u32_x) == x && ((u32_x & 0x1) != 0)) {
|
||||
u32_x -= 1;
|
||||
}
|
||||
return u32_x;
|
||||
}
|
||||
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @typedef ai_vec4_float
|
||||
* @ingroup ai_datatypes_internal
|
||||
* @brief 32bit X 4 float (optimization for embedded MCU)
|
||||
*/
|
||||
typedef struct {
|
||||
ai_float a1;
|
||||
ai_float a2;
|
||||
ai_float a3;
|
||||
ai_float a4;
|
||||
} ai_vec4_float;
|
||||
|
||||
|
||||
#define AI_VEC4_FLOAT(ptr_) \
|
||||
_get_vec4_float((ai_handle)(ptr_))
|
||||
|
||||
|
||||
AI_DECLARE_STATIC
|
||||
ai_vec4_float _get_vec4_float(const ai_handle fptr)
|
||||
{
|
||||
return *((const ai_vec4_float*)fptr);
|
||||
}
|
||||
|
||||
/*****************************************************************************/
|
||||
typedef struct {
|
||||
ai_u16 numRows; /**< number of rows of the matrix. */
|
||||
ai_u16 numCols; /**< number of columns of the matrix. */
|
||||
ai_float *pData; /**< points to the data of the matrix. */
|
||||
} ai_matrix_f32;
|
||||
|
||||
/*!
|
||||
* @brief general 2D matrix initialization
|
||||
* @ingroup ai_lite_math_helpers
|
||||
* @param S pointer to S matrix
|
||||
* @param nRows number of rows of S matrix
|
||||
* @param nColumns number of columns of S matrix
|
||||
* @param pData pointer to S matrix data
|
||||
*/
|
||||
AI_INTERFACE_ENTRY
|
||||
void st_mat_init_f32(ai_matrix_f32* S,
|
||||
const uint16_t nRows,
|
||||
const uint16_t nColumns,
|
||||
float* pData);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief general 2D matrix multiplication on float values
|
||||
* @ingroup ai_lite_math_helpers
|
||||
* @param pSrcA pointer to A matrix
|
||||
* @param pSrcB pointer to B matrix
|
||||
* @param pSrcC pointer to C matrix/array
|
||||
* @param alpha multiplier of A*B product
|
||||
* @param beta multiplier of C
|
||||
* @param tA flag for A transpose
|
||||
* @param tB flag for B transpose
|
||||
* @param pDstY matrix result
|
||||
* @return ARM_MATH_SUCCESS in case of success, ARM_MATH_SIZE_MISMATCH else
|
||||
*/
|
||||
AI_INTERFACE_ENTRY
|
||||
uint32_t st_mat_gemm_f32(const ai_matrix_f32* pSrcA,
|
||||
const ai_matrix_f32* pSrcB,
|
||||
const ai_matrix_f32* pSrcC,
|
||||
const float alpha, const float beta,
|
||||
const int8_t tA, const int8_t tB,
|
||||
ai_matrix_f32 * pDstY);
|
||||
|
||||
/*!
|
||||
* @brief platform optimized square root on a float value
|
||||
* @ingroup ai_lite_math_helpers
|
||||
* @param x input value
|
||||
* @return square root of the value
|
||||
*/
|
||||
AI_INTERFACE_ENTRY
|
||||
float ai_math_sqrt(const float x);
|
||||
|
||||
#endif /*AI_LITE_MATH_HELPERS_H*/
|
||||
572
lib/stai/libstai/include/ai_math_helpers.h
Normal file
572
lib/stai/libstai/include/ai_math_helpers.h
Normal file
@ -0,0 +1,572 @@
|
||||
#ifndef AI_MATH_HELPERS_H
|
||||
#define AI_MATH_HELPERS_H
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file ai_math_helpers.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @brief Math helpers routines header file.
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2017 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#include "ai_lite_math_helpers.h"
|
||||
|
||||
//#if defined(HAS_X86) || defined(__CC_ARM) || defined(CM4) || defined(CM7)
|
||||
#define _AI_CONV_2D_LOOP_UNROLLING_OPTIM
|
||||
//#endif
|
||||
|
||||
#define STM32_DOT_INLINE_OPTIM
|
||||
|
||||
/* Modes for element wise integer optimized implementation */
|
||||
#define AI_ELTWISE_NO_SCALAR (0)
|
||||
#define AI_ELTWISE_SCALAR1 (1)
|
||||
#define AI_ELTWISE_SCALAR2 (2)
|
||||
#define AI_ELTWISE_SCALAR_CH1 (3)
|
||||
#define AI_ELTWISE_SCALAR_CH2 (4)
|
||||
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
#if defined(STM32_DOT_INLINE_OPTIM)
|
||||
|
||||
AI_DECLARE_STATIC
|
||||
void __ai_math_dot_array(
|
||||
ai_float* out,
|
||||
const ai_float* data0,
|
||||
const ai_float* data1,
|
||||
ai_size data_size)
|
||||
{
|
||||
ai_register ai_float sum = 0.0f; /* Temporary result storage */
|
||||
|
||||
/* Run the below code for Cortex-M4 and Cortex-M3 */
|
||||
|
||||
#if defined(_AI_CONV_2D_LOOP_UNROLLING_OPTIM)
|
||||
/* First part of the processing with loop unrolling. Compute 16 outputs at a time.
|
||||
** a second loop below computes the remaining 1 to 15 samples. */
|
||||
while (data_size >= 16u) {
|
||||
ai_register ai_vec4_float ch_in_f = AI_VEC4_FLOAT(data1);
|
||||
ai_register ai_vec4_float weights_in_f = AI_VEC4_FLOAT(data0);
|
||||
sum += weights_in_f.a1 * ch_in_f.a1;
|
||||
sum += weights_in_f.a2 * ch_in_f.a2;
|
||||
sum += weights_in_f.a3 * ch_in_f.a3;
|
||||
sum += weights_in_f.a4 * ch_in_f.a4;
|
||||
data1 += 4;
|
||||
data0 += 4;
|
||||
|
||||
ch_in_f = AI_VEC4_FLOAT(data1);
|
||||
weights_in_f = AI_VEC4_FLOAT(data0);
|
||||
sum += weights_in_f.a1 * ch_in_f.a1;
|
||||
sum += weights_in_f.a2 * ch_in_f.a2;
|
||||
sum += weights_in_f.a3 * ch_in_f.a3;
|
||||
sum += weights_in_f.a4 * ch_in_f.a4;
|
||||
data1 += 4;
|
||||
data0 += 4;
|
||||
|
||||
ch_in_f = AI_VEC4_FLOAT(data1);
|
||||
weights_in_f = AI_VEC4_FLOAT(data0);
|
||||
sum += weights_in_f.a1 * ch_in_f.a1;
|
||||
sum += weights_in_f.a2 * ch_in_f.a2;
|
||||
sum += weights_in_f.a3 * ch_in_f.a3;
|
||||
sum += weights_in_f.a4 * ch_in_f.a4;
|
||||
data1 += 4;
|
||||
data0 += 4;
|
||||
|
||||
ch_in_f = AI_VEC4_FLOAT(data1);
|
||||
weights_in_f = AI_VEC4_FLOAT(data0);
|
||||
sum += weights_in_f.a1 * ch_in_f.a1;
|
||||
sum += weights_in_f.a2 * ch_in_f.a2;
|
||||
sum += weights_in_f.a3 * ch_in_f.a3;
|
||||
sum += weights_in_f.a4 * ch_in_f.a4;
|
||||
data1 += 4;
|
||||
data0 += 4;
|
||||
data_size -= 16u;
|
||||
}
|
||||
|
||||
/* First part of the processing with loop unrolling. Compute 4 outputs at a time.
|
||||
** a second loop below computes the remaining 1 to 3 samples. */
|
||||
while (data_size >= 4u) {
|
||||
/* C = A[0]* B[0] + A[1]* B[1] + A[2]* B[2] + .....+ A[blockSize-1]* B[blockSize-1] */
|
||||
/* Calculate dot product and then store the result in a temporary buffer */
|
||||
sum += (*data0++) * (*data1++);
|
||||
sum += (*data0++) * (*data1++);
|
||||
sum += (*data0++) * (*data1++);
|
||||
sum += (*data0++) * (*data1++);
|
||||
|
||||
/* Decrement the loop counter */
|
||||
data_size -= 4u;
|
||||
}
|
||||
#endif
|
||||
while (data_size > 0u) {
|
||||
/* C = A[0]* B[0] + A[1]* B[1] + A[2]* B[2] + .....+ A[blockSize-1]* B[blockSize-1] */
|
||||
/* Calculate dot product and then store the result in a temporary buffer. */
|
||||
sum += (*data0++) * (*data1++);
|
||||
|
||||
/* Decrement the loop counter */
|
||||
data_size--;
|
||||
}
|
||||
|
||||
/* Directly accumulate the result back in the destination buffer */
|
||||
*out += sum;
|
||||
}
|
||||
|
||||
|
||||
#undef AI_MATH_DOT_ARRAY
|
||||
#define AI_MATH_DOT_ARRAY(dst, src0, src1, size) \
|
||||
{ __ai_math_dot_array(dst, src0, src1, size); }
|
||||
|
||||
#else /* STM32_DOT_INLINE_OPTIM */
|
||||
|
||||
#undef AI_MATH_DOT_ARRAY
|
||||
#define AI_MATH_DOT_ARRAY(dst, src0, src1, size) \
|
||||
{ ai_math_dot_array(dst, src0, src1, size); }
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*!
|
||||
* @defgroup math_helpers Math helpers
|
||||
* @brief Common math functions
|
||||
*
|
||||
* Math functions are mapped to the underlying platform through those utility
|
||||
* functions. On x86 and ARM v7 they are mapped to the float math functions in
|
||||
* the C99 standard library; on MCUs they are mapped to the ARM DSP functions.
|
||||
*/
|
||||
|
||||
/*!
|
||||
* @brief platform optimized dot product of float vectors
|
||||
*
|
||||
* Computes the dot product between vectors and adds the result to out.
|
||||
* @ingroup math_helpers
|
||||
* @param out scalar result of the dot product
|
||||
* @param data0 the first float vector
|
||||
* @param data1 the second float vector
|
||||
* @param data_size the size of both vectors
|
||||
*/
|
||||
AI_INTERFACE_ENTRY
|
||||
void ai_math_dot_array(
|
||||
ai_float* out,
|
||||
const ai_float* data0,
|
||||
const ai_float* data1,
|
||||
const ai_size data_size);
|
||||
/*!
|
||||
* @brief ErfInv a float value
|
||||
* @ingroup math_helpers
|
||||
* @param x input value
|
||||
* @return square root of the value
|
||||
*/
|
||||
AI_INTERFACE_ENTRY ai_float ai_math_erfinv(const ai_float x);
|
||||
|
||||
/*!
|
||||
* @brief platform optimized exponential on a float value
|
||||
* @ingroup math_helpers
|
||||
* @param x input value
|
||||
* @return exponential of the value
|
||||
*/
|
||||
AI_INTERFACE_ENTRY ai_float ai_math_exp(const ai_float x);
|
||||
|
||||
/*!
|
||||
* @brief platform logical not
|
||||
* @ingroup math_helpers
|
||||
* @param x input value
|
||||
* @return not of the value
|
||||
*/
|
||||
AI_INTERFACE_ENTRY ai_bool ai_logical_not(const ai_bool x);
|
||||
|
||||
/*!
|
||||
* @brief platform optimized pow on a float value
|
||||
* @ingroup math_helpers
|
||||
* @param x input value
|
||||
* @param e input value
|
||||
* @return pow of the value ^ e
|
||||
*/
|
||||
AI_INTERFACE_ENTRY ai_float ai_math_pow(const ai_float x, const ai_float e);
|
||||
|
||||
/*!
|
||||
* @brief platform optimized tangent on a float value
|
||||
* @ingroup math_helpers
|
||||
* @param x input value
|
||||
* @return hyperbolic tangent of the value
|
||||
*/
|
||||
AI_INTERFACE_ENTRY ai_float ai_math_tanh(const ai_float x);
|
||||
|
||||
/*!
|
||||
* @brief platform optimized relu on a float value
|
||||
* @ingroup math_helpers
|
||||
* @param x input value
|
||||
* @return relu of the value ( x if x>0 else 0)
|
||||
*/
|
||||
AI_INTERFACE_ENTRY ai_float ai_math_relu(const ai_float x);
|
||||
|
||||
/*!
|
||||
* @brief platform optimized parametric relu on a float value
|
||||
* @ingroup math_helpers
|
||||
* @param x input value
|
||||
* @param slope input value
|
||||
* @return parametric relu of the value
|
||||
*/
|
||||
AI_INTERFACE_ENTRY ai_float ai_math_prelu(const ai_float x, const ai_float slope);
|
||||
|
||||
/*!
|
||||
* @brief platform optimized parametric sigmoid on a float value
|
||||
* @ingroup math_helpers
|
||||
* @param x input value
|
||||
* @return sigmoid of the value
|
||||
*/
|
||||
AI_INTERFACE_ENTRY ai_float ai_math_sigmoid(const ai_float x);
|
||||
|
||||
/*!
|
||||
* @brief platform optimized parametric hard sigmoid on a float value
|
||||
* @ingroup math_helpers
|
||||
* @param x input value
|
||||
* @return hard sigmoid of the value
|
||||
*/
|
||||
AI_INTERFACE_ENTRY ai_float ai_math_hard_sigmoid(const ai_float x); // const ai_float alpha, const ai_float beta);
|
||||
|
||||
/*!
|
||||
* @brief platform optimized parametric swish on a float value
|
||||
* @ingroup math_helpers
|
||||
* @param x input value
|
||||
* @return swish of the value
|
||||
*/
|
||||
AI_INTERFACE_ENTRY ai_float ai_math_swish(const ai_float x);
|
||||
|
||||
/*!
|
||||
* @brief platform optimized parametric hard_swish on a float value
|
||||
* @ingroup math_helpers
|
||||
* @param x input value
|
||||
* @return hard_swish of the value
|
||||
*/
|
||||
AI_INTERFACE_ENTRY ai_float ai_math_hard_swish(const ai_float x);
|
||||
|
||||
/*!
|
||||
* @brief platform optimized parametric gelu on a float value
|
||||
* @ingroup math_helpers
|
||||
* @param x input value
|
||||
* @param aaproximate input value
|
||||
* @return gelu of the value
|
||||
*/
|
||||
AI_INTERFACE_ENTRY ai_float ai_math_gelu(const ai_float x, const ai_bool approximate);
|
||||
|
||||
/*!
|
||||
* @brief platform optimized parametric sign function on a float value
|
||||
* @ingroup math_helpers
|
||||
* @param x input value
|
||||
* @return sign of the value
|
||||
*/
|
||||
AI_INTERFACE_ENTRY ai_float ai_math_sign(const ai_float x);
|
||||
|
||||
/*!
|
||||
* @brief optimized parametric rectified linear unit on a float value
|
||||
* @ingroup math_helpers
|
||||
* @param x input value
|
||||
* @param slope parameter value
|
||||
* @return x if x is positive and x*slope otherwise
|
||||
*/
|
||||
AI_INTERFACE_ENTRY ai_float ai_fast_prelu(const ai_float x, const ai_float slope);
|
||||
|
||||
AI_INTERFACE_ENTRY void ai_div(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_div_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_div_f32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_div_buffer_f32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_div_s32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_div_buffer_s32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_div_s16(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_div_buffer_s16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_div_s8(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_div_buffer_s8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_div_u32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_div_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_div_u16(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_div_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_div_u8(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_div_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_div_buffer_INT8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop,
|
||||
const ai_handle pScale1, const ai_handle pZp1, const ai_handle pScale2, const ai_handle pZp2,
|
||||
const ai_handle pScaleout, const ai_handle pZpout, const ai_i32 scalar_op);
|
||||
AI_INTERFACE_ENTRY void ai_div_buffer_UINT8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop,
|
||||
const ai_handle pScale1, const ai_handle pZp1, const ai_handle pScale2, const ai_handle pZp2,
|
||||
const ai_handle pScaleout, const ai_handle pZpout, const ai_i32 scalar_op);
|
||||
|
||||
AI_INTERFACE_ENTRY void ai_bitshift_right(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_bitshift_right_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_bitshift_right_u32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_bitshift_right_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_bitshift_right_u16(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_bitshift_right_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_bitshift_right_u8(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_bitshift_right_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_bitshift_left(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_bitshift_left_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_bitshift_left_u32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_bitshift_left_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_bitshift_left_u16(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_bitshift_left_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_bitshift_left_u8(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_bitshift_left_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
|
||||
AI_INTERFACE_ENTRY void ai_floor_div(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_floor_div_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_floor_mod(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_floor_mod_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
|
||||
AI_INTERFACE_ENTRY void ai_mod(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_mod_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_mod_f32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_mod_buffer_f32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_mod_s32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_mod_buffer_s32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_mod_s16(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_mod_buffer_s16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_mod_s8(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_mod_buffer_s8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_mod_u32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_mod_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_mod_u16(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_mod_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_mod_u8(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_mod_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
|
||||
AI_INTERFACE_ENTRY void ai_max(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_max_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_max_f32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_max_buffer_f32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_max_s32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_max_buffer_s32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_max_s16(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_max_buffer_s16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_max_s8(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_max_buffer_s8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_max_u32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_max_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_max_u16(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_max_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_max_u8(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_max_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_max_buffer_INT8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop,
|
||||
const ai_handle pScale1, const ai_handle pZp1, const ai_handle pScale2, const ai_handle pZp2,
|
||||
const ai_handle pScaleout, const ai_handle pZpout, const ai_i32 scalar_op);
|
||||
AI_INTERFACE_ENTRY void ai_max_buffer_UINT8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop,
|
||||
const ai_handle pScale1, const ai_handle pZp1, const ai_handle pScale2, const ai_handle pZp2,
|
||||
const ai_handle pScaleout, const ai_handle pZpout, const ai_i32 scalar_op);
|
||||
|
||||
AI_INTERFACE_ENTRY void ai_min(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_min_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_min_f32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_min_buffer_f32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_min_s32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_min_buffer_s32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_min_s16(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_min_buffer_s16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_min_s8(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_min_buffer_s8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_min_u32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_min_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_min_u16(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_min_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_min_u8(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_min_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_min_buffer_INT8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop,
|
||||
const ai_handle pScale1, const ai_handle pZp1, const ai_handle pScale2, const ai_handle pZp2,
|
||||
const ai_handle pScaleout, const ai_handle pZpout, const ai_i32 scalar_op);
|
||||
AI_INTERFACE_ENTRY void ai_min_buffer_UINT8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop,
|
||||
const ai_handle pScale1, const ai_handle pZp1, const ai_handle pScale2, const ai_handle pZp2,
|
||||
const ai_handle pScaleout, const ai_handle pZpout, const ai_i32 scalar_op);
|
||||
|
||||
AI_INTERFACE_ENTRY void ai_mul(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_mul_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_mul_f32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_mul_buffer_f32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_mul_s32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_mul_buffer_s32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_mul_s16(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_mul_buffer_s16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_mul_s8(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_mul_buffer_s8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_mul_u32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_mul_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_mul_u16(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_mul_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_mul_u8(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_mul_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_mul_buffer_INT8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop,
|
||||
const ai_handle pScale1, const ai_handle pZp1, const ai_handle pScale2, const ai_handle pZp2,
|
||||
const ai_handle pScaleout, const ai_handle pZpout, const ai_i32 scalar_op);
|
||||
AI_INTERFACE_ENTRY void ai_mul_buffer_UINT8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop,
|
||||
const ai_handle pScale1, const ai_handle pZp1, const ai_handle pScale2, const ai_handle pZp2,
|
||||
const ai_handle pScaleout, const ai_handle pZpout, const ai_i32 scalar_op);
|
||||
|
||||
AI_INTERFACE_ENTRY void ai_pow(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_pow_buffer(ai_handle out, const ai_handle b, const ai_handle e, const ai_size loop);
|
||||
|
||||
AI_INTERFACE_ENTRY void ai_sub(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_sub_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_sub_f32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_sub_buffer_f32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_sub_s32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_sub_buffer_s32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_sub_s16(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_sub_buffer_s16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_sub_s8(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_sub_buffer_s8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_sub_u32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_sub_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_sub_u16(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_sub_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_sub_u8(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_sub_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_sub_buffer_INT8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop,
|
||||
const ai_handle pScale1, const ai_handle pZp1, const ai_handle pScale2, const ai_handle pZp2,
|
||||
const ai_handle pScaleout, const ai_handle pZpout, const ai_i32 scalar_op);
|
||||
AI_INTERFACE_ENTRY void ai_sub_buffer_UINT8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop,
|
||||
const ai_handle pScale1, const ai_handle pZp1, const ai_handle pScale2, const ai_handle pZp2,
|
||||
const ai_handle pScaleout, const ai_handle pZpout, const ai_i32 scalar_op);
|
||||
|
||||
AI_INTERFACE_ENTRY void ai_sum(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_sum_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_sum_f32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_sum_buffer_f32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_sum_s32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_sum_buffer_s32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_sum_s16(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_sum_buffer_s16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_sum_s8(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_sum_buffer_s8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_sum_u32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_sum_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_sum_u16(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_sum_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_sum_u8(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_sum_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_sum_buffer_INT8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop,
|
||||
const ai_handle pScale1, const ai_handle pZp1, const ai_handle pScale2, const ai_handle pZp2,
|
||||
const ai_handle pScaleout, const ai_handle pZpout, const ai_i32 scalar_op);
|
||||
AI_INTERFACE_ENTRY void ai_sum_buffer_UINT8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop,
|
||||
const ai_handle pScale1, const ai_handle pZp1, const ai_handle pScale2, const ai_handle pZp2,
|
||||
const ai_handle pScaleout, const ai_handle pZpout, const ai_i32 scalar_op);
|
||||
|
||||
AI_INTERFACE_ENTRY void ai_and(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_and_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_or(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_or_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_xor(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_xor_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
|
||||
AI_INTERFACE_ENTRY void ai_greater(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_greater_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_greater_f32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_greater_buffer_f32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_greater_s32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_greater_buffer_s32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_greater_s16(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_greater_buffer_s16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_greater_s8(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_greater_buffer_s8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_greater_u32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_greater_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_greater_u16(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_greater_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_greater_u8(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_greater_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
|
||||
AI_INTERFACE_ENTRY void ai_greater_or_equal(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_greater_or_equal_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_greater_or_equal_f32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_greater_or_equal_buffer_f32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_greater_or_equal_s32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_greater_or_equal_buffer_s32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_greater_or_equal_s16(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_greater_or_equal_buffer_s16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_greater_or_equal_s8(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_greater_or_equal_buffer_s8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_greater_or_equal_u32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_greater_or_equal_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_greater_or_equal_u16(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_greater_or_equal_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_greater_or_equal_u8(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_greater_or_equal_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
|
||||
AI_INTERFACE_ENTRY void ai_less(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_less_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_less_f32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_less_buffer_f32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_less_s32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_less_buffer_s32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_less_s16(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_less_buffer_s16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_less_s8(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_less_buffer_s8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_less_u32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_less_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_less_u16(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_less_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_less_u8(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_less_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
|
||||
AI_INTERFACE_ENTRY void ai_less_or_equal(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_less_or_equal_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_less_or_equal_f32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_less_or_equal_buffer_f32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_less_or_equal_s32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_less_or_equal_buffer_s32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_less_or_equal_s16(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_less_or_equal_buffer_s16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_less_or_equal_s8(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_less_or_equal_buffer_s8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_less_or_equal_u32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_less_or_equal_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_less_or_equal_u16(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_less_or_equal_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_less_or_equal_u8(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_less_or_equal_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
|
||||
AI_INTERFACE_ENTRY void ai_equal(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_equal_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_equal_f32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_equal_buffer_f32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_equal_s32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_equal_buffer_s32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_equal_s16(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_equal_buffer_s16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_equal_s8(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_equal_buffer_s8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_equal_u32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_equal_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_equal_u16(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_equal_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_equal_u8(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_equal_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
|
||||
AI_INTERFACE_ENTRY void ai_not_equal(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_not_equal_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_not_equal_f32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_not_equal_buffer_f32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_not_equal_s32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_not_equal_buffer_s32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_not_equal_s16(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_not_equal_buffer_s16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_not_equal_s8(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_not_equal_buffer_s8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_not_equal_u32(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_not_equal_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_not_equal_u16(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_not_equal_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
AI_INTERFACE_ENTRY void ai_not_equal_u8(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_not_equal_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
|
||||
AI_INTERFACE_ENTRY void ai_squared_diff(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_squared_diff_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
|
||||
AI_INTERFACE_ENTRY void ai_atan2(ai_handle out, const ai_handle a, const ai_handle b);
|
||||
AI_INTERFACE_ENTRY void ai_atan2_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /* AI_MATH_HELPERS_H */
|
||||
985
lib/stai/libstai/include/ai_platform.h
Normal file
985
lib/stai/libstai/include/ai_platform.h
Normal file
@ -0,0 +1,985 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file ai_platform.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @brief Definitions of AI platform public APIs types
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2017 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef AI_PLATFORM_H
|
||||
#define AI_PLATFORM_H
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stddef.h>
|
||||
|
||||
#define __STDC_FORMAT_MACROS 1
|
||||
#include <inttypes.h>
|
||||
|
||||
#ifndef AI_PLATFORM_API_MAJOR
|
||||
#define AI_PLATFORM_API_MAJOR (1)
|
||||
#endif
|
||||
#ifndef AI_PLATFORM_API_MINOR
|
||||
#define AI_PLATFORM_API_MINOR (2)
|
||||
#endif
|
||||
#ifndef AI_PLATFORM_API_MICRO
|
||||
#define AI_PLATFORM_API_MICRO (0)
|
||||
#endif
|
||||
|
||||
#define AI_PLATFORM_API_VERSION \
|
||||
AI_VERSION(AI_PLATFORM_API_MAJOR, \
|
||||
AI_PLATFORM_API_MINOR, \
|
||||
AI_PLATFORM_API_MICRO)
|
||||
|
||||
|
||||
#ifndef AI_TOOLS_API_VERSION_MAJOR
|
||||
#define AI_TOOLS_API_VERSION_MAJOR (1)
|
||||
#endif
|
||||
#ifndef AI_TOOLS_API_VERSION_MINOR
|
||||
#define AI_TOOLS_API_VERSION_MINOR (5)
|
||||
#endif
|
||||
#ifndef AI_TOOLS_API_VERSION_MICRO
|
||||
#define AI_TOOLS_API_VERSION_MICRO (0)
|
||||
#endif
|
||||
|
||||
/*****************************************************************************/
|
||||
#define AI_TOOLS_API_VERSION \
|
||||
AI_VERSION(AI_TOOLS_API_VERSION_MAJOR, \
|
||||
AI_TOOLS_API_VERSION_MINOR, \
|
||||
AI_TOOLS_API_VERSION_MICRO)
|
||||
|
||||
#define AI_TOOLS_API_VERSION_1_3 \
|
||||
AI_VERSION(1, 3, 0)
|
||||
|
||||
#define AI_TOOLS_API_VERSION_1_4 \
|
||||
AI_VERSION(1, 4, 0)
|
||||
|
||||
#define AI_TOOLS_API_VERSION_1_5 \
|
||||
AI_VERSION(1, 5, 0)
|
||||
|
||||
/*****************************************************************************/
|
||||
#ifdef __cplusplus
|
||||
#define AI_API_DECLARE_BEGIN extern "C" {
|
||||
#define AI_API_DECLARE_END }
|
||||
#define ai_register /* register */
|
||||
#else
|
||||
#include <stdbool.h>
|
||||
#define AI_API_DECLARE_BEGIN /* AI_API_DECLARE_BEGIN */
|
||||
#define AI_API_DECLARE_END /* AI_API_DECLARE_END */
|
||||
#define ai_register register
|
||||
#endif
|
||||
|
||||
/*****************************************************************************/
|
||||
#define AI_FLAG_NONE (0x0)
|
||||
|
||||
/*****************************************************************************/
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @typedef ai_flags
|
||||
* @ingroup ai_platform
|
||||
* @brief bitmask for flags management
|
||||
*/
|
||||
typedef uint32_t ai_flags;
|
||||
|
||||
/*****************************************************************************/
|
||||
#define AI_CONCAT_ARG(a, b) a ## b
|
||||
#define AI_CONCAT(a, b) AI_CONCAT_ARG(a, b)
|
||||
|
||||
/*! AI_CAST SECTION ***********************************/
|
||||
#define AI_CAST(type_, expr_) ((type_)(expr_))
|
||||
|
||||
/*****************************************************************************/
|
||||
#define AI_MAGIC_SIGNATURE \
|
||||
(0xa1facade)
|
||||
|
||||
#define AI_PACK(...) \
|
||||
__VA_ARGS__
|
||||
|
||||
/*****************************************************************************/
|
||||
#define AI_SHAPE_BCWH (0x01u)
|
||||
|
||||
/*!
|
||||
* @typedef ai_shape_dimension
|
||||
* @ingroup ai_platform
|
||||
* @brief shape dimension type to be used in shape related structs @ref ai_buffer_shape
|
||||
*/
|
||||
typedef uint32_t ai_shape_dimension;
|
||||
|
||||
/*****************************************************************************/
|
||||
#if defined(_MSC_VER)
|
||||
#define AI_API_ENTRY __declspec(dllexport)
|
||||
#define AI_ALIGNED(x) /* AI_ALIGNED(x) */
|
||||
#elif defined(__ICCARM__) || defined (__IAR_SYSTEMS_ICC__)
|
||||
#define AI_API_ENTRY /* AI_API_ENTRY */
|
||||
#define AI_ALIGNED(x) AI_CONCAT(AI_ALIGNED_,x)
|
||||
#define AI_ALIGNED_1 _Pragma("data_alignment = 1")
|
||||
#define AI_ALIGNED_2 _Pragma("data_alignment = 2")
|
||||
#define AI_ALIGNED_4 _Pragma("data_alignment = 4")
|
||||
#define AI_ALIGNED_8 _Pragma("data_alignment = 8")
|
||||
#define AI_ALIGNED_16 _Pragma("data_alignment = 16")
|
||||
#define AI_ALIGNED_32 _Pragma("data_alignment = 32")
|
||||
#elif defined(__CC_ARM)
|
||||
#define AI_API_ENTRY __attribute__((visibility("default")))
|
||||
#define AI_ALIGNED(x) __attribute__((aligned (x)))
|
||||
/* Keil disallows anonymous union initialization by default */
|
||||
#pragma anon_unions
|
||||
#elif defined(__GNUC__)
|
||||
//#define AI_API_ENTRY __attribute__((visibility("default")))
|
||||
#define AI_API_ENTRY /* AI_API_ENTRY */
|
||||
#define AI_ALIGNED(x) __attribute__((aligned(x)))
|
||||
#else
|
||||
/* Dynamic libraries are not supported by the compiler */
|
||||
#define AI_API_ENTRY /* AI_API_ENTRY */
|
||||
#define AI_ALIGNED(x) /* AI_ALIGNED(x) */
|
||||
#endif
|
||||
|
||||
#define AI_HANDLE_PTR(ptr_) ((ai_handle)(ptr_))
|
||||
#define AI_HANDLE_NULL AI_HANDLE_PTR(NULL)
|
||||
|
||||
#define AI_HANDLE_FUNC_PTR(func) ((ai_handle_func)(func))
|
||||
|
||||
#define AI_UNUSED(x) (void)(x);
|
||||
|
||||
#define AI_DEPRECATED /* AI_DEPRECATED */
|
||||
|
||||
#define AI_LEGACY /* AI_LEGACY */
|
||||
|
||||
#define AI_MAGIC_MARKER (0xA1FACADE)
|
||||
|
||||
|
||||
#if defined(__cplusplus)
|
||||
#define AI_STRUCT_INIT {}
|
||||
#define AI_C_ARRAY_INIT {}
|
||||
#else
|
||||
#define AI_STRUCT_INIT {0}
|
||||
#define AI_C_ARRAY_INIT {0}
|
||||
#endif
|
||||
|
||||
#define AI_ERROR_FMT AIU32_FMT
|
||||
|
||||
#define AI_IS_UNSIGNED(type) \
|
||||
((((type)0) - 1) > 0)
|
||||
|
||||
#define AI_CUSTOM_SIZE(type) \
|
||||
(ai_custom_type_signature)((AI_IS_UNSIGNED(type)) \
|
||||
? (0x80|(sizeof(type)&0x7f)) : (sizeof(type)&0x7f))
|
||||
|
||||
/*! network buffers struct handlers *******************************************/
|
||||
#ifdef __cplusplus
|
||||
|
||||
#define AI_NETWORK_PARAMS_INIT(params_, activations_) \
|
||||
{ \
|
||||
{{ params_, activations_ }} \
|
||||
}
|
||||
|
||||
#define AI_NETWORK_BUFFERS_INIT(weights_buffers_, activations_buffers_) \
|
||||
{ \
|
||||
AI_MAGIC_SIGNATURE, AI_PACK(weights_buffers_), AI_PACK(activations_buffers_) \
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#define AI_NETWORK_PARAMS_INIT(params_, activations_) \
|
||||
{ \
|
||||
.params = params_, \
|
||||
.activations = activations_ \
|
||||
}
|
||||
|
||||
#define AI_NETWORK_BUFFERS_INIT(weights_buffers_, activations_buffers_) \
|
||||
{ \
|
||||
.map_signature = AI_MAGIC_SIGNATURE, \
|
||||
.map_weights = AI_PACK(weights_buffers_), \
|
||||
.map_activations = AI_PACK(activations_buffers_) \
|
||||
}
|
||||
|
||||
#endif // __cplusplus
|
||||
|
||||
|
||||
/*! binary padded bits macro helpers *****************************************/
|
||||
#define AI_PBITS_MASK \
|
||||
(0x1F)
|
||||
|
||||
#define AI_PBITS_SHIFTS \
|
||||
(5)
|
||||
|
||||
#define AI_PBITS_PADDED_BYTES_COUNT(bits_) \
|
||||
(((ai_u32)(bits_) + 7) >> 3)
|
||||
|
||||
#define AI_PBITS_PADDED_WORDS_COUNT(bits_) \
|
||||
(((ai_size)(bits_) + AI_PBITS_MASK) >> AI_PBITS_SHIFTS)
|
||||
|
||||
#define AI_PBITS_GET_WORD(word_ptr_, bits_) \
|
||||
(((ai_pbits*)(word_ptr_)) + ((bits_) >> AI_PBITS_SHIFTS))
|
||||
|
||||
#define AI_PAD_CHANNELS(format_, channels_) \
|
||||
((AI_BUFFER_FMT_GET_BITS(format_)==1) ? (AI_PBITS_PADDED_WORDS_COUNT(channels_) << AI_PBITS_SHIFTS) : (channels_))
|
||||
|
||||
|
||||
/*! ai_intq_info struct handlers *********************************************/
|
||||
#define INTQ_CONST const
|
||||
// #define INTQ_CONST
|
||||
|
||||
#define AI_INTQ_INFO_LIST(list_) \
|
||||
((list_)->info)
|
||||
|
||||
#define AI_INTQ_INFO_LIST_FLAGS(list_) \
|
||||
((list_) ? (list_)->flags : 0)
|
||||
|
||||
#define AI_INTQ_INFO_LIST_SIZE(list_) \
|
||||
((list_) ? (list_)->size : 0)
|
||||
|
||||
#define AI_HAS_INTQ_INFO_LIST(list_) \
|
||||
((list_) ? (((list_)->info) && ((list_)->size>0)) : false)
|
||||
|
||||
#define AI_INTQ_INFO_LIST_SCALE(list_, type_, pos_) \
|
||||
(((list_) && (list_)->info && ((pos_)<(list_)->size)) \
|
||||
? ((type_*)((list_)->info->scale))[(pos_)] : 1.0f)
|
||||
|
||||
#define AI_INTQ_INFO_LIST_ZEROPOINT(list_, type_, pos_) \
|
||||
(((list_) && (list_)->info && ((pos_)<(list_)->size)) \
|
||||
? ((type_*)((list_)->info->zeropoint))[(pos_)] : 0)
|
||||
|
||||
/*! ai_buffer format handlers ************************************************/
|
||||
|
||||
/*!
|
||||
* @enum buffer format definition
|
||||
* @ingroup ai_platform
|
||||
*
|
||||
* 32 bit signed format list.
|
||||
*/
|
||||
typedef int32_t ai_buffer_format;
|
||||
|
||||
/*! ai_buffer_meta flags & macros ********************************************/
|
||||
#define AI_BUFFER_META_HAS_INTQ_INFO (0x1U << 0)
|
||||
#define AI_BUFFER_META_FLAG_SCALE_FLOAT (0x1U << 0)
|
||||
#define AI_BUFFER_META_FLAG_ZEROPOINT_U8 (0x1U << 1)
|
||||
#define AI_BUFFER_META_FLAG_ZEROPOINT_S8 (0x1U << 2)
|
||||
#define AI_BUFFER_META_FLAG_ZEROPOINT_U16 (0x1U << 3)
|
||||
#define AI_BUFFER_META_FLAG_ZEROPOINT_S16 (0x1U << 4)
|
||||
|
||||
/*! ai_buffer format variable flags & macros *********************************/
|
||||
#define AI_BUFFER_FMT_MASK (0x01FFFFFF)
|
||||
#define AI_BUFFER_FMT_TYPE_NONE (0x0)
|
||||
#define AI_BUFFER_FMT_TYPE_FLOAT (0x1)
|
||||
#define AI_BUFFER_FMT_TYPE_Q (0x2)
|
||||
#define AI_BUFFER_FMT_TYPE_BOOL (0x3)
|
||||
|
||||
#define AI_BUFFER_FMT_FLAG_CONST (0x1U<<30)
|
||||
#define AI_BUFFER_FMT_FLAG_STATIC (0x1U<<29)
|
||||
#define AI_BUFFER_FMT_FLAG_IS_IO (0x1U<<27)
|
||||
#define AI_BUFFER_FMT_FLAG_PERSISTENT (0x1U<<29)
|
||||
|
||||
|
||||
#define AI_BUFFER_FMT_PACK(value_, mask_, bits_) \
|
||||
( ((value_) & (mask_)) << (bits_) )
|
||||
|
||||
#define AI_BUFFER_FMT_UNPACK(fmt_, mask_, bits_) \
|
||||
( (AI_BUFFER_FMT_OBJ(fmt_) >> (bits_)) & (mask_) )
|
||||
|
||||
#define AI_BUFFER_FMT_OBJ(fmt_) \
|
||||
((ai_buffer_format)(fmt_))
|
||||
|
||||
#define AI_BUFFER_FMT_GET_FLOAT(fmt_) \
|
||||
AI_BUFFER_FMT_UNPACK(fmt_, 0x1, 24)
|
||||
|
||||
#define AI_BUFFER_FMT_GET_SIGN(fmt_) \
|
||||
AI_BUFFER_FMT_UNPACK(fmt_, 0x1, 23)
|
||||
|
||||
#define AI_BUFFER_FMT_GET_TYPE(fmt_) \
|
||||
AI_BUFFER_FMT_UNPACK(fmt_, 0xF, 17)
|
||||
|
||||
#define AI_BUFFER_FMT_GET_BITS(fmt_) \
|
||||
AI_BUFFER_FMT_UNPACK(fmt_, 0x7F, 7)
|
||||
|
||||
#define AI_BUFFER_FMT_SET_BITS(bits_) \
|
||||
AI_BUFFER_FMT_PACK((bits_), 0x7F, 7)
|
||||
|
||||
#define AI_BUFFER_FMT_GET_FBITS(fmt_) \
|
||||
( (ai_i8)AI_BUFFER_FMT_UNPACK(fmt_, 0x7F, 0) - 64 )
|
||||
|
||||
#define AI_BUFFER_FMT_SET_FBITS(fbits_) \
|
||||
AI_BUFFER_FMT_PACK((fbits_)+64, 0x7F, 0)
|
||||
|
||||
#define AI_BUFFER_FMT_SET(type_id_, sign_bit_, float_bit_, bits_, fbits_) \
|
||||
AI_BUFFER_FMT_OBJ( \
|
||||
AI_BUFFER_FMT_PACK(0, 0x1, 24) | \
|
||||
AI_BUFFER_FMT_PACK(sign_bit_, 0x1, 23) | \
|
||||
AI_BUFFER_FMT_PACK(0, 0x3, 21) | \
|
||||
AI_BUFFER_FMT_PACK(type_id_, 0xF, 17) | \
|
||||
AI_BUFFER_FMT_PACK(0, 0x7, 14) | \
|
||||
AI_BUFFER_FMT_SET_BITS(bits_) | \
|
||||
AI_BUFFER_FMT_SET_FBITS(fbits_) \
|
||||
)
|
||||
|
||||
#define AI_BUFFER_FMT_SET_COMPLEX(type_id_, sign_bit_, bits_, fbits_) \
|
||||
AI_BUFFER_FMT_OBJ( \
|
||||
AI_BUFFER_FMT_PACK(1, 0x1, 24) | \
|
||||
AI_BUFFER_FMT_PACK(sign_bit_, 0x1, 23) | \
|
||||
AI_BUFFER_FMT_PACK(0, 0x3, 21) | \
|
||||
AI_BUFFER_FMT_PACK(type_id_, 0xF, 17) | \
|
||||
AI_BUFFER_FMT_PACK(0, 0x7, 14) | \
|
||||
AI_BUFFER_FMT_SET_BITS(bits_) | \
|
||||
AI_BUFFER_FMT_SET_FBITS(fbits_) \
|
||||
)
|
||||
|
||||
#define AI_BUFFER_FMT_SAME(fmt1_, fmt2_) \
|
||||
( AI_BUFFER_FMT_GET(fmt1_) == AI_BUFFER_FMT_GET(fmt2_) )
|
||||
|
||||
#define AI_BUFFER_FMT_GET(fmt_) \
|
||||
(AI_BUFFER_FMT_OBJ(fmt_) & AI_BUFFER_FMT_MASK)
|
||||
|
||||
#define AI_BUFFER_FORMAT(buf_) \
|
||||
AI_BUFFER_FMT_GET((buf_)->format)
|
||||
|
||||
|
||||
/*!
|
||||
* @define shape type index
|
||||
* @ingroup ai_platform
|
||||
* @brief positional ID for generic shapes C structs
|
||||
*/
|
||||
#define AI_SHAPE_EXTENSION (0x5)
|
||||
#define AI_SHAPE_DEPTH (0x4)
|
||||
#define AI_SHAPE_HEIGHT (0x3)
|
||||
#define AI_SHAPE_WIDTH (0x2)
|
||||
#define AI_SHAPE_CHANNEL (0x1)
|
||||
#define AI_SHAPE_IN_CHANNEL (0x0)
|
||||
#define AI_SHAPE_BATCH (0x0)
|
||||
#define AI_SHAPE_TIME (0x0)
|
||||
|
||||
|
||||
AI_DEPRECATED
|
||||
#define AI_BUFFER_WIDTH(buf_) \
|
||||
((buf_)->shape.data[AI_SHAPE_WIDTH])
|
||||
|
||||
AI_DEPRECATED
|
||||
#define AI_BUFFER_HEIGHT(buf_) \
|
||||
((buf_)->shape.data[AI_SHAPE_HEIGHT])
|
||||
|
||||
AI_DEPRECATED
|
||||
#define AI_BUFFER_CHANNELS(buf_) \
|
||||
((buf_)->shape.data[AI_SHAPE_CHANNEL])
|
||||
|
||||
AI_DEPRECATED
|
||||
#define AI_BUFFER_N_BATCHES(buf_) \
|
||||
((buf_)->shape.data[AI_SHAPE_BATCH])
|
||||
|
||||
#define AI_BUFFER_DATA(buf_, type_) \
|
||||
((type_*)((buf_)->data))
|
||||
|
||||
#define AI_BUFFER_META_INFO(buf_) \
|
||||
((buf_)->meta_info)
|
||||
|
||||
#define AI_BUFFER_META_INFO_INTQ(meta_) \
|
||||
((meta_) && ((meta_)->flags & AI_BUFFER_META_HAS_INTQ_INFO)) \
|
||||
? ((meta_)->intq_info) : NULL
|
||||
|
||||
#define AI_BUFFER_META_INFO_INTQ_GET_SIZE(meta_) \
|
||||
( (AI_BUFFER_META_INFO_INTQ(meta_)) \
|
||||
? AI_INTQ_INFO_LIST_SIZE(AI_BUFFER_META_INFO_INTQ(meta_)) \
|
||||
: 0 )
|
||||
|
||||
#define AI_BUFFER_META_INFO_INTQ_GET_SCALE(meta_, pos_) \
|
||||
( (AI_BUFFER_META_INFO_INTQ(meta_)) \
|
||||
? AI_INTQ_INFO_LIST_SCALE(AI_BUFFER_META_INFO_INTQ(meta_), ai_float, pos_) \
|
||||
: 0 )
|
||||
|
||||
#define AI_BUFFER_META_INFO_INTQ_GET_ZEROPOINT(meta_, pos_) \
|
||||
( (AI_BUFFER_META_INFO_INTQ(meta_)) \
|
||||
? ((AI_INTQ_INFO_LIST_FLAGS(AI_BUFFER_META_INFO_INTQ(meta_))&AI_BUFFER_META_FLAG_ZEROPOINT_U8) \
|
||||
? AI_INTQ_INFO_LIST_ZEROPOINT(AI_BUFFER_META_INFO_INTQ(meta_), ai_u8, pos_) \
|
||||
: AI_INTQ_INFO_LIST_ZEROPOINT(AI_BUFFER_META_INFO_INTQ(meta_), ai_i8, pos_) ) \
|
||||
: 0 )
|
||||
|
||||
#define AI_BUFFER_META_INFO_INIT(flags_, intq_info_) { \
|
||||
.flags = (flags_), \
|
||||
.intq_info = AI_PACK(intq_info_) \
|
||||
}
|
||||
|
||||
#define AI_BUFFER_SIZE(buf_) \
|
||||
ai_buffer_get_size(buf_, true)
|
||||
|
||||
#define AI_BUFFER_SIZE_UNPAD(buf_) \
|
||||
ai_buffer_get_size(buf_, false)
|
||||
|
||||
#define AI_BUFFER_BYTE_SIZE(count_, fmt_) \
|
||||
ai_buffer_get_byte_size(count_, fmt_)
|
||||
|
||||
#define AI_BUFFER_FLAGS(buf_) \
|
||||
((buf_) ? (buf_)->flags : 0x0)
|
||||
|
||||
#define AI_BUFFER_SHAPE_INIT(type_, size_, ...) \
|
||||
{ \
|
||||
.type = (type_), \
|
||||
.size = (size_), \
|
||||
.data = (ai_shape_dimension[]){ __VA_ARGS__ } \
|
||||
}
|
||||
|
||||
#define AI_BUFFER_SHAPE_INIT_FROM_ARRAY(type_, size_, array_ptr_) \
|
||||
{ \
|
||||
.type = (type_), \
|
||||
.size = (size_), \
|
||||
.data = (ai_shape_dimension*)(array_ptr_) \
|
||||
}
|
||||
|
||||
#define AI_BUFFER_SHAPE_SIZE(buf_) \
|
||||
((buf_) ? (buf_)->shape.size : 0)
|
||||
|
||||
#define AI_BUFFER_SHAPE_TYPE(buf_) \
|
||||
((buf_) ? (buf_)->shape.type : 0)
|
||||
|
||||
#if defined(HAS_AI_ASSERT) && defined(AI_ASSERT)
|
||||
|
||||
#define AI_BUFFER_SET_SHAPE_ELEM(buf_, pos_, value_) { \
|
||||
AI_ASSERT(buf_) \
|
||||
(buf_)->shape.data[pos_] = (value_); \
|
||||
}
|
||||
|
||||
#define AI_BUFFER_SHAPE_ELEM(buf_, pos_) \
|
||||
(((pos_)<AI_BUFFER_SHAPE_SIZE(buf_)) ? (buf_)->shape.data[pos_] : 0)
|
||||
|
||||
#else
|
||||
|
||||
#define AI_BUFFER_SET_SHAPE_ELEM(buf_, pos_, value_) { \
|
||||
(buf_)->shape.data[pos_] = (value_); \
|
||||
}
|
||||
|
||||
#define AI_BUFFER_SHAPE_ELEM(buf_, pos_) \
|
||||
(buf_)->shape.data[pos_]
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
AI_DEPRECATED
|
||||
#define AI_BUFFER_OBJ_INIT(format_, h_, w_, ch_, n_batches_, data_) \
|
||||
{ .format = (ai_buffer_format)(format_), \
|
||||
.data = (ai_handle)(data_), \
|
||||
.meta_info = NULL, \
|
||||
.flags = AI_FLAG_NONE, \
|
||||
.size = (h_) * (w_) * AI_PAD_CHANNELS(format_, ch_), \
|
||||
.shape = AI_BUFFER_SHAPE_INIT(AI_SHAPE_BCWH, 4, (n_batches_), (ch_), (w_), (h_)), \
|
||||
}
|
||||
|
||||
/* 7.1 new macro API */
|
||||
#define AI_BUFFER_INIT(flags_, format_, shape_, size_, meta_info_, data_) \
|
||||
{ .format = (ai_buffer_format)(format_), \
|
||||
.data = (ai_handle)(data_), \
|
||||
.meta_info = (meta_info_), \
|
||||
.flags = (flags_), \
|
||||
.size = (size_), \
|
||||
.shape = AI_PACK(shape_) \
|
||||
}
|
||||
|
||||
/* 7.1 new macro API */
|
||||
#define AI_BUFFER_INIT_STATIC(type_, flags_, format_, shape_, size_, meta_info_, ...) \
|
||||
{ .format = (ai_buffer_format)(format_), \
|
||||
.data = (ai_handle)((type_[]){__VA_ARGS__}), \
|
||||
.meta_info = (meta_info_), \
|
||||
.flags = (flags_), \
|
||||
.size = (size_), \
|
||||
.shape = AI_PACK(shape_) \
|
||||
}
|
||||
|
||||
/*****************************************************************************/
|
||||
#define AI_NETWORK_BUFFERS_FIELD_DECLARE \
|
||||
ai_signature map_signature; /*! structure signature (required!) */ \
|
||||
ai_buffer_array map_weights; /*! info about weights array buffers (required!) */ \
|
||||
ai_buffer_array map_activations; /*! info about activations array buffers (required!) */
|
||||
|
||||
#define AI_NETWORK_PARAMS_FIELDS_DECLARE \
|
||||
union { \
|
||||
struct { \
|
||||
ai_buffer params; /*! info about params buffer(required!) */ \
|
||||
ai_buffer activations; /*! info about activations buffer (required!) */ \
|
||||
}; \
|
||||
struct { \
|
||||
AI_NETWORK_BUFFERS_FIELD_DECLARE \
|
||||
}; \
|
||||
};
|
||||
|
||||
/*****************************************************************************/
|
||||
#define AI_BUFFER_ARRAY_OBJ_INIT(flags_, size_, buffer_array_) \
|
||||
{ \
|
||||
.flags = (ai_u16)(flags_), \
|
||||
.size = (ai_u16)(size_), \
|
||||
.buffer = (ai_buffer*)(buffer_array_) \
|
||||
}
|
||||
|
||||
#define AI_BUFFER_ARRAY_OBJ_INIT_STATIC(flags_, size_, ...) \
|
||||
{ \
|
||||
.flags = (ai_u16)(flags_), \
|
||||
.size = (ai_u16)(size_), \
|
||||
.buffer = (ai_buffer*)((ai_buffer[]){__VA_ARGS__}) \
|
||||
}
|
||||
|
||||
#define AI_BUFFER_ARRAY_SANE(buf_array_) \
|
||||
ai_buffer_array_sane(buf_array_)
|
||||
|
||||
#define AI_BUFFER_ARRAY_FLAGS(buf_array_) \
|
||||
((AI_BUFFER_ARRAY_SANE(buf_array_)) ? (buf_array_)->flags : AI_FLAG_NONE)
|
||||
|
||||
#define AI_BUFFER_ARRAY_SIZE(buf_array_) \
|
||||
((AI_BUFFER_ARRAY_SANE(buf_array_)) ? (buf_array_)->size : 0)
|
||||
|
||||
#define AI_BUFFER_ARRAY_ITEM(buf_array_, pos_) \
|
||||
((AI_BUFFER_ARRAY_SANE(buf_array_)) ? ((buf_array_)->buffer + (pos_)) : NULL)
|
||||
|
||||
#define AI_BUFFER_ARRAY_ITEM_SET_ADDRESS(buf_array_, pos_, address_) \
|
||||
ai_buffer_array_item_set_address(buf_array_, pos_, address_)
|
||||
|
||||
|
||||
/*!
|
||||
* @enum buffer formats enum list
|
||||
* @ingroup ai_platform
|
||||
*
|
||||
* List of supported ai_buffer format types.
|
||||
*/
|
||||
enum {
|
||||
AI_BUFFER_FORMAT_NONE = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_NONE, 0, 0, 0, 0),
|
||||
AI_BUFFER_FORMAT_FLOAT = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_FLOAT, 1, 0, 32, 0),
|
||||
|
||||
AI_BUFFER_FORMAT_U1 = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 0, 0, 1, 0),
|
||||
AI_BUFFER_FORMAT_U8 = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 0, 0, 8, 0),
|
||||
AI_BUFFER_FORMAT_U16 = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 0, 0, 16, 0),
|
||||
AI_BUFFER_FORMAT_U32 = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 0, 0, 32, 0),
|
||||
|
||||
AI_BUFFER_FORMAT_S1 = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 1, 0, 1, 0),
|
||||
AI_BUFFER_FORMAT_S8 = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 1, 0, 8, 0),
|
||||
AI_BUFFER_FORMAT_S16 = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 1, 0, 16, 0),
|
||||
AI_BUFFER_FORMAT_S32 = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 1, 0, 32, 0),
|
||||
|
||||
AI_BUFFER_FORMAT_Q = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 1, 0, 0, 0),
|
||||
AI_BUFFER_FORMAT_Q7 = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 1, 0, 8, 7),
|
||||
AI_BUFFER_FORMAT_Q15 = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 1, 0, 16, 15),
|
||||
|
||||
AI_BUFFER_FORMAT_UQ = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 0, 0, 0, 0),
|
||||
AI_BUFFER_FORMAT_UQ7 = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 0, 0, 8, 7),
|
||||
AI_BUFFER_FORMAT_UQ15 = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 0, 0, 16, 15),
|
||||
|
||||
AI_BUFFER_FORMAT_BOOL = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_BOOL, 0, 0, 8, 0),
|
||||
};
|
||||
|
||||
/*****************************************************************************/
|
||||
#define AI_ERROR_INIT(type_, code_) { \
|
||||
.type = AI_ERROR_##type_, \
|
||||
.code = AI_ERROR_CODE_##code_ \
|
||||
}
|
||||
|
||||
/* printf formats */
|
||||
#define SSIZET_FMT "%" PRIu32
|
||||
#define AII32_FMT "%" PRId32
|
||||
#define AIU32_FMT "%" PRIu32
|
||||
#define AII64_FMT "%" PRId64
|
||||
#define AIU64_FMT "%" PRIu64
|
||||
|
||||
|
||||
#define AI_VERSION(major_, minor_, micro_) \
|
||||
(((major_)<<24) | ((minor_)<<16) | ((micro_)<<8))
|
||||
|
||||
|
||||
typedef uint8_t ai_custom_type_signature;
|
||||
|
||||
typedef void* ai_handle;
|
||||
typedef const void* ai_handle_const;
|
||||
|
||||
typedef float ai_float;
|
||||
typedef double ai_double;
|
||||
|
||||
typedef bool ai_bool;
|
||||
|
||||
typedef char ai_char;
|
||||
|
||||
typedef uint32_t ai_size;
|
||||
typedef int16_t ai_short_size;
|
||||
|
||||
typedef uintptr_t ai_uptr;
|
||||
|
||||
typedef unsigned int ai_uint;
|
||||
typedef uint8_t ai_u8;
|
||||
typedef uint16_t ai_u16;
|
||||
typedef uint32_t ai_u32;
|
||||
typedef uint64_t ai_u64;
|
||||
|
||||
typedef int ai_int;
|
||||
typedef int8_t ai_i8;
|
||||
typedef int16_t ai_i16;
|
||||
typedef int32_t ai_i32;
|
||||
typedef int64_t ai_i64;
|
||||
|
||||
typedef uint64_t ai_macc;
|
||||
|
||||
typedef int32_t ai_pbits;
|
||||
|
||||
typedef uint32_t ai_signature;
|
||||
|
||||
typedef void (*ai_handle_func)(ai_handle);
|
||||
|
||||
/*****************************************************************************/
|
||||
/*!
|
||||
* @struct ai_error
|
||||
* @ingroup ai_platform
|
||||
* @brief Structure encoding details about the last error.
|
||||
*/
|
||||
typedef struct ai_error_ {
|
||||
ai_u32 type : 8; /*!< Error type represented by @ref ai_error_type */
|
||||
ai_u32 code : 24; /*!< Error code represented by @ref ai_error_code */
|
||||
} ai_error;
|
||||
|
||||
/*****************************************************************************/
|
||||
/*!
|
||||
* @struct ai_intq_info
|
||||
* @ingroup ai_platform
|
||||
* @brief an element of the ai_intq_info_list entry. It reports an array for the
|
||||
* scale and zeropoint values for each buffer. Optional flags are also present
|
||||
*/
|
||||
typedef struct ai_intq_info_ {
|
||||
INTQ_CONST ai_float* scale;
|
||||
INTQ_CONST ai_handle zeropoint;
|
||||
} ai_intq_info;
|
||||
|
||||
/*!
|
||||
* @struct ai_intq_info_list
|
||||
* @ingroup ai_platform
|
||||
* @brief list reporting meta info for quantized networks integer support
|
||||
* when size > 1 it means a per channel out quantization
|
||||
*/
|
||||
typedef struct ai_intq_info_list_ {
|
||||
ai_u16 flags; /*!< optional flags to store intq info attributes */
|
||||
ai_u16 size; /*!< number of elements in the the intq_info list */
|
||||
INTQ_CONST ai_intq_info* info; /*!< pointer to an array of metainfo
|
||||
* associated to the intq_info list */
|
||||
} ai_intq_info_list;
|
||||
|
||||
/*****************************************************************************/
|
||||
/*!
|
||||
* @struct ai_buffer_meta_info
|
||||
* @ingroup ai_platform
|
||||
* @brief Optional meta attributes associated with the I/O buffer.
|
||||
* This datastruct is used also for network querying, where the data field may
|
||||
* may be NULL.
|
||||
*/
|
||||
typedef struct ai_buffer_meta_info_ {
|
||||
ai_u32 flags; /*!< meta info flags */
|
||||
ai_intq_info_list* intq_info; /*!< meta info related to integer format */
|
||||
} ai_buffer_meta_info;
|
||||
|
||||
/*!
|
||||
* @struct ai_buffer_shape
|
||||
* @ingroup ai_platform
|
||||
* @brief Memory buffer shape datatype definition.
|
||||
*/
|
||||
typedef struct ai_buffer_shape_ {
|
||||
ai_u32 type : 8; /*!< shape type: reserved for compatibility */
|
||||
ai_u32 size : 24; /*!< size: shape cardinality */
|
||||
ai_shape_dimension* data; /*!< pointer to shape tuple array */
|
||||
} ai_buffer_shape;
|
||||
|
||||
/*!
|
||||
* @struct ai_buffer
|
||||
* @ingroup ai_platform
|
||||
* @brief Memory buffer storing data (optional) with a shape, size and type.
|
||||
* This datastruct is used also for network querying, where the data field may
|
||||
* may be NULL.
|
||||
*/
|
||||
typedef struct ai_buffer_ {
|
||||
ai_buffer_format format; /*!< buffer format */
|
||||
ai_handle data; /*!< pointer to buffer data */
|
||||
ai_buffer_meta_info* meta_info; /*!< pointer to buffer metadata info */
|
||||
/* New 7.1 fields */
|
||||
ai_flags flags; /*!< shape optional flags */
|
||||
ai_size size; /*!< number of elements of the buffer (including optional padding) */
|
||||
ai_buffer_shape shape; /*!< n-dimensional shape info */
|
||||
} ai_buffer;
|
||||
|
||||
/*!
|
||||
* @struct ai_buffer_array
|
||||
* @ingroup ai_platform
|
||||
* @brief Array of @ref ai_buffer.
|
||||
*/
|
||||
typedef struct ai_buffer_array_ {
|
||||
ai_u16 flags; /*!< buffer array flags */
|
||||
ai_u16 size; /*!< buffer array size */
|
||||
ai_buffer* buffer; /*!< buffer array buffers pointer */
|
||||
} ai_buffer_array;
|
||||
|
||||
/* enums section */
|
||||
|
||||
/*!
|
||||
* @enum ai_error_type
|
||||
* @ingroup ai_platform
|
||||
*
|
||||
* Generic enum to list network error types.
|
||||
*/
|
||||
typedef enum {
|
||||
AI_ERROR_NONE = 0x00, /*!< No error */
|
||||
AI_ERROR_TOOL_PLATFORM_API_MISMATCH = 0x01,
|
||||
AI_ERROR_TYPES_MISMATCH = 0x02,
|
||||
AI_ERROR_INVALID_HANDLE = 0x10,
|
||||
AI_ERROR_INVALID_STATE = 0x11,
|
||||
AI_ERROR_INVALID_INPUT = 0x12,
|
||||
AI_ERROR_INVALID_OUTPUT = 0x13,
|
||||
AI_ERROR_INVALID_PARAM = 0x14,
|
||||
AI_ERROR_INVALID_SIGNATURE = 0x15,
|
||||
AI_ERROR_INVALID_SIZE = 0x16,
|
||||
AI_ERROR_INVALID_VALUE = 0x17,
|
||||
AI_ERROR_INIT_FAILED = 0x30,
|
||||
AI_ERROR_ALLOCATION_FAILED = 0x31,
|
||||
AI_ERROR_DEALLOCATION_FAILED = 0x32,
|
||||
AI_ERROR_CREATE_FAILED = 0x33,
|
||||
} ai_error_type;
|
||||
|
||||
/*!
|
||||
* @enum ai_error_code
|
||||
* @ingroup ai_platform
|
||||
*
|
||||
* Generic enum to list network error codes.
|
||||
*/
|
||||
typedef enum {
|
||||
AI_ERROR_CODE_NONE = 0x0000, /*!< No error */
|
||||
AI_ERROR_CODE_NETWORK = 0x0010,
|
||||
AI_ERROR_CODE_NETWORK_PARAMS = 0x0011,
|
||||
AI_ERROR_CODE_NETWORK_WEIGHTS = 0x0012,
|
||||
AI_ERROR_CODE_NETWORK_ACTIVATIONS = 0x0013,
|
||||
AI_ERROR_CODE_LAYER = 0x0014,
|
||||
AI_ERROR_CODE_TENSOR = 0x0015,
|
||||
AI_ERROR_CODE_ARRAY = 0x0016,
|
||||
AI_ERROR_CODE_INVALID_PTR = 0x0017,
|
||||
AI_ERROR_CODE_INVALID_SIZE = 0x0018,
|
||||
AI_ERROR_CODE_INVALID_FORMAT = 0x0019,
|
||||
AI_ERROR_CODE_OUT_OF_RANGE = 0x0020,
|
||||
AI_ERROR_CODE_INVALID_BATCH = 0x0021,
|
||||
AI_ERROR_CODE_MISSED_INIT = 0x0030,
|
||||
AI_ERROR_CODE_IN_USE = 0x0040,
|
||||
AI_ERROR_CODE_LOCK = 0x0041,
|
||||
} ai_error_code;
|
||||
|
||||
/*!
|
||||
* @struct ai_platform_version
|
||||
* @ingroup ai_platform
|
||||
* @brief Datastruct storing platform version info
|
||||
*/
|
||||
typedef struct ai_platform_version_ {
|
||||
ai_u8 major;
|
||||
ai_u8 minor;
|
||||
ai_u8 micro;
|
||||
ai_u8 reserved;
|
||||
} ai_platform_version;
|
||||
|
||||
/*!
|
||||
* @struct ai_network_params
|
||||
* @ingroup ai_platform
|
||||
*
|
||||
* Datastructure to pass parameters during network initialization.
|
||||
*/
|
||||
typedef struct ai_network_params_ {
|
||||
AI_NETWORK_PARAMS_FIELDS_DECLARE
|
||||
} ai_network_params;
|
||||
|
||||
/*!
|
||||
* @struct ai_network_buffers
|
||||
* @ingroup ai_platform
|
||||
*
|
||||
* Datastructure to pass network buffers during network initialization.
|
||||
*/
|
||||
typedef struct ai_network_buffers_ {
|
||||
AI_NETWORK_BUFFERS_FIELD_DECLARE
|
||||
} ai_network_buffers;
|
||||
|
||||
/*!
|
||||
* @struct ai_network_report
|
||||
* @ingroup ai_platform
|
||||
*
|
||||
* Datastructure to query a network report with some relevant network detail.
|
||||
*/
|
||||
typedef struct ai_network_report_ {
|
||||
const char* model_name;
|
||||
const char* model_signature;
|
||||
const char* model_datetime;
|
||||
|
||||
const char* compile_datetime;
|
||||
|
||||
const char* runtime_revision;
|
||||
ai_platform_version runtime_version;
|
||||
|
||||
const char* tool_revision;
|
||||
ai_platform_version tool_version;
|
||||
ai_platform_version tool_api_version;
|
||||
|
||||
ai_platform_version api_version;
|
||||
ai_platform_version interface_api_version;
|
||||
|
||||
ai_macc n_macc;
|
||||
|
||||
ai_u16 n_inputs;
|
||||
ai_u16 n_outputs;
|
||||
ai_buffer* inputs;
|
||||
ai_buffer* outputs;
|
||||
|
||||
AI_NETWORK_PARAMS_FIELDS_DECLARE
|
||||
|
||||
ai_u32 n_nodes;
|
||||
|
||||
ai_signature signature;
|
||||
} ai_network_report;
|
||||
|
||||
/*!
|
||||
* @enum ai_scatter_nd_reduction
|
||||
* @ingroup ai_platform
|
||||
* @brief reduction operation in scatter_nd layer
|
||||
*/
|
||||
typedef enum {
|
||||
AI_SCATTER_ND_NONE = 0x0,
|
||||
AI_SCATTER_ND_OPERATION
|
||||
} ai_scatter_nd_reduction;
|
||||
|
||||
/*!
|
||||
* @enum ai_upsample_mode
|
||||
* @ingroup ai_platform
|
||||
* @brief allowed mode in upsample layer
|
||||
*/
|
||||
typedef enum {
|
||||
AI_UPSAMPLE_ZEROS = 0x0,
|
||||
AI_UPSAMPLE_NEAREST,
|
||||
AI_UPSAMPLE_BILINEAR,
|
||||
AI_UPSAMPLE_TRILINEAR
|
||||
} ai_upsample_mode;
|
||||
|
||||
/*!
|
||||
* @enum ai_resize_mode
|
||||
* @ingroup ai_platform
|
||||
* @brief allowed mode in resize layer
|
||||
*/
|
||||
typedef enum {
|
||||
AI_RESIZE_ZEROS = 0x0,
|
||||
AI_RESIZE_NEAREST,
|
||||
AI_RESIZE_LINEAR,
|
||||
AI_RESIZE_CUBIC
|
||||
} ai_resize_mode;
|
||||
|
||||
/*!
|
||||
* @enum ai_coord_transf_mode
|
||||
* @ingroup ai_platform
|
||||
* @brief coordinate_transformation_mode in resize layer
|
||||
*/
|
||||
typedef enum {
|
||||
AI_HALF_PIXEL = 0x0,
|
||||
AI_PYTORCH_HALF_PIXEL,
|
||||
AI_ALIGN_CORNERS,
|
||||
AI_ASYMMETRIC,
|
||||
AI_TF_HALF_PIXEL_FOR_NN,
|
||||
AI_TF_CROP_AND_RESIZE
|
||||
} ai_coord_transf_mode;
|
||||
|
||||
typedef enum {
|
||||
AI_ROUND_PREFER_FLOOR = 0x0,
|
||||
AI_ROUND_PREFER_CEIL,
|
||||
AI_ROUND_FLOOR,
|
||||
AI_ROUND_CEIL
|
||||
} ai_nearest_mode;
|
||||
|
||||
typedef enum {
|
||||
AI_PAD_CONSTANT = 0x0,
|
||||
AI_PAD_REFLECT,
|
||||
AI_PAD_EDGE,
|
||||
AI_PAD_8BIT_CH1ST_CONSTANT,
|
||||
} ai_pad_mode;
|
||||
|
||||
#define OUTPUT_PADDING_FLAG (1 << 0)
|
||||
#define CHANNEL_FIRST_FLAG (1 << 1)
|
||||
/* Padding pattern supported: */
|
||||
/* 0 = (1, 1, 1,1), 1 = (0, 0, 2, 2) */
|
||||
#define CHANNEL_PADDING_PATTERN (1 << 2)
|
||||
/* Carefull when changing those definitions
|
||||
bit0 shall always select output padding (Valid vs Same)
|
||||
bit1 shall always select Channel first /channel lst format
|
||||
bit2 shall always select padding pattern (1, 1, 1, 1) (stride1) or (0, 0, 2, 2) (stride2)
|
||||
*/
|
||||
typedef enum {
|
||||
AI_LAYER_FORMAT_CHANNEL_LAST_VALID = 0x0,
|
||||
AI_LAYER_FORMAT_CHANNEL_LAST_SAME = 0x1,
|
||||
AI_LAYER_FORMAT_CHANNEL_FIRST_VALID = 0x2,
|
||||
AI_LAYER_FORMAT_CHANNEL_FIRST_SAME = 0x3,
|
||||
AI_LAYER_FORMAT_CHANNEL_FIRST_SAME2 = 0x7,
|
||||
} ai_layer_format_type;
|
||||
|
||||
/*! ai_platform public APIs **************************************************/
|
||||
|
||||
/*!
|
||||
* @brief get the total number of elements of an ai_buffer.
|
||||
* @ingroup ai_platform
|
||||
* @param buffer a pointer to an @ref ai_buffer
|
||||
* @param with_padding when true it considers also padded elements
|
||||
* @return the number of elements of the buffer (with/without padded ones)
|
||||
*/
|
||||
AI_API_ENTRY
|
||||
ai_size ai_buffer_get_size(const ai_buffer* buffer, const ai_bool with_padding);
|
||||
|
||||
/*!
|
||||
* @brief get the size in bytes of an ai_buffer (given the number of elements and format).
|
||||
* @ingroup ai_platform
|
||||
* @param count the number of elements composing the buffer
|
||||
* @param fmt the format of the ai_buffer
|
||||
* @return the size in bytes of the buffer
|
||||
*/
|
||||
AI_API_ENTRY
|
||||
ai_size ai_buffer_get_byte_size(const ai_size count, const ai_buffer_format fmt);
|
||||
|
||||
/*!
|
||||
* @brief get total size in bytes of a buffer array.
|
||||
* @ingroup ai_platform
|
||||
* @param barray a pointer to the buffer array
|
||||
* @return the total size in bytes of all the buffer arrays
|
||||
*/
|
||||
AI_API_ENTRY
|
||||
ai_bool ai_buffer_array_is_empty(const ai_buffer_array* barray);
|
||||
|
||||
/*!
|
||||
* @brief get total size in bytes of a buffer array.
|
||||
* @ingroup ai_platform
|
||||
* @param barray a pointer to the buffer array
|
||||
* @return the total size in bytes of all the buffer arrays
|
||||
*/
|
||||
AI_API_ENTRY
|
||||
ai_bool ai_buffer_array_is_valid(const ai_buffer_array* barray);
|
||||
|
||||
/*!
|
||||
* @brief check if a buffer array is valid - i.e. not empty.
|
||||
* @ingroup ai_platform
|
||||
* @param barray a pointer to the buffer array
|
||||
* @return true if the array is consistent and not empty, false otherwise
|
||||
*/
|
||||
AI_API_ENTRY
|
||||
ai_bool ai_buffer_array_sane(const ai_buffer_array* barray);
|
||||
|
||||
/*!
|
||||
* @brief get total size in bytes of a buffer array.
|
||||
* @ingroup ai_platform
|
||||
* @param barray a pointer to the buffer array
|
||||
* @return the total size in bytes of all the buffer arrays
|
||||
*/
|
||||
AI_API_ENTRY
|
||||
ai_size ai_buffer_array_get_byte_size(const ai_buffer_array* barray);
|
||||
|
||||
/*!
|
||||
* @brief set the address of buffer array item @pos
|
||||
* @ingroup ai_platform
|
||||
* @param barray a pointer to the buffer array
|
||||
* @param pos the index of the element in the array
|
||||
* @param address the address to set
|
||||
* @return true if successful, false otherwise
|
||||
*/
|
||||
AI_API_ENTRY
|
||||
ai_bool ai_buffer_array_item_set_address(
|
||||
ai_buffer_array* barray, const ai_u32 pos, ai_handle address);
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*AI_PLATFORM_H*/
|
||||
1144
lib/stai/libstai/include/ai_platform_interface.h
Normal file
1144
lib/stai/libstai/include/ai_platform_interface.h
Normal file
File diff suppressed because it is too large
Load Diff
416
lib/stai/libstai/include/ai_reloc_network.h
Normal file
416
lib/stai/libstai/include/ai_reloc_network.h
Normal file
@ -0,0 +1,416 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file ai_reloc_network.h
|
||||
* @author MCD/AIS Team
|
||||
* @brief Relocatable network support
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* <h2><center>© Copyright (c) 2019,2021 STMicroelectronics.
|
||||
* All rights reserved.</center></h2>
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file in
|
||||
* the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef __AI_RELOC_NETWORK_H__
|
||||
#define __AI_RELOC_NETWORK_H__
|
||||
|
||||
#include <ai_platform_interface.h>
|
||||
|
||||
/* -----------------------------------------------------------------------------
|
||||
* AI RELOC definition
|
||||
* -----------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
/*
|
||||
* v1.0 : initial version until v7.0 tools
|
||||
* v2.0 : update the init fct to support fragmented activations/weights buffer
|
||||
*/
|
||||
|
||||
/* version of the AI RELOC runtime (bootstrap) */
|
||||
#define AI_RELOC_RT_VERSION_MAJOR 2
|
||||
#define AI_RELOC_RT_VERSION_MINOR 0
|
||||
|
||||
/* AI RT executing mode definitions */
|
||||
#define AI_RELOC_RT_LOAD_MODE_XIP (1 << 0) /* (default) only the data/bss section are
|
||||
copied in RAM, code is executed in-place */
|
||||
#define AI_RELOC_RT_LOAD_MODE_COPY (1 << 1) /* code and data sections are copied in RAM */
|
||||
|
||||
/* AI RT error definitions */
|
||||
#define AI_RELOC_RT_ERR_NONE (0)
|
||||
#define AI_RELOC_RT_ERR_INVALID_BIN (-1) /* Invalid binary object */
|
||||
#define AI_RELOC_RT_ERR_MEMORY (-2) /* RAM size is insufficient */
|
||||
#define AI_RELOC_RT_ERR_NOT_SUPPORTED (-3) /* feature/option not supported */
|
||||
#define AI_RELOC_RT_ERR_PARAM (-4) /* param not valid */
|
||||
|
||||
/*
|
||||
* AI RELOC flags (32b) - part of the binary header
|
||||
*
|
||||
* b31..b24 : 8b - RT version major.minor (4b+4b)
|
||||
*
|
||||
* b23..b20 : 4b - fields reserved for post-process script
|
||||
*
|
||||
* Variant fields
|
||||
*
|
||||
* b19..b12 : 8b - compilation options: ARM tool-chain, FPU, FLOAT-ABI
|
||||
* b19..b16: 4b - ARM tool-chain - 1000b: GNU Arm Embedded Tool-chain
|
||||
* b15: 1b - reserved
|
||||
* b14.b13: 2b - Floating-point ABI used - '00b':soft, '01b':softfp, '10b':hard
|
||||
* b12: 1b - FPU is used
|
||||
* b11..b0 : 12b - CPUID (Part Number fields of the @0xE000ED00 CPUID register)
|
||||
*/
|
||||
|
||||
/* CPUID/Cortex-mM definition */
|
||||
#define AI_RELOC_ARM_CORTEX_M0P (0xC60UL)
|
||||
#define AI_RELOC_ARM_CORTEX_M3 (0xC23UL)
|
||||
#define AI_RELOC_ARM_CORTEX_M4 (0xC24UL)
|
||||
#define AI_RELOC_ARM_CORTEX_M7 (0xC27UL)
|
||||
#define AI_RELOC_ARM_CORTEX_M33 (0xD21UL)
|
||||
#define AI_RELOC_ARM_CORTEX_M55 (0xD22UL)
|
||||
|
||||
/* Tool-chain definition (ONLY this tool-chain is currently supported)*/
|
||||
#define AI_RELOC_TOOLCHAIN_ARM_EMBEDDED (0x8UL)
|
||||
|
||||
/* Floating-point ABI definition (in relation with the tool-chain) */
|
||||
#define AI_RELOC_TOOLCHAIN_FP_ABI_SOFT (0x0UL)
|
||||
#define AI_RELOC_TOOLCHAIN_FP_ABI_SOFTFP (0x1UL)
|
||||
#define AI_RELOC_TOOLCHAIN_FP_ABI_HARD (0x2UL)
|
||||
|
||||
/* Getter/setter macros to read */
|
||||
#define AI_RELOC_RT_SET_FLAGS(_var) (((AI_RELOC_RT_VERSION_MAJOR << 4 |\
|
||||
AI_RELOC_RT_VERSION_MINOR << 0) << 24) |\
|
||||
((_var) & 0xFFFFF) )
|
||||
|
||||
#define AI_RELOC_RT_GET_MAJOR(_flags) (int)(((_flags) >> 28) & 0xF)
|
||||
#define AI_RELOC_RT_GET_MINOR(_flags) (int)(((_flags) >> 24) & 0xF)
|
||||
|
||||
#define AI_RELOC_RT_GET_VARIANT(_flags) ((_flags) & 0xFFFFF)
|
||||
#define AI_RELOC_RT_GET_POST_OPTIONS(_flags) ((_flags >> 20) & 0xF)
|
||||
#define AI_RELOC_RT_GET_CPUID(_flags) ((_flags >> 0) & 0xFFF)
|
||||
#define AI_RELOC_RT_GET_COPTS(_flags) ((_flags >> 12) & 0xF)
|
||||
#define AI_RELOC_RT_FPU_USED(_flags) (((_flags) >> 12) & 1)
|
||||
|
||||
|
||||
/* AI RELOC RT context definition */
|
||||
struct ai_reloc_rt_ctx {
|
||||
volatile uint32_t state; /* current state */
|
||||
ai_handle network; /* real handle of the network instance */
|
||||
uint32_t ram_addr; /* loaded base address for the RAM sections */
|
||||
uint32_t rom_addr; /* loaded base address for the ROM sections */
|
||||
uint32_t ram_alloc_addr; /* base address of the allocated buffer (optional) */
|
||||
const char *c_name; /* c-name of model */
|
||||
const uint32_t act_size; /* requested RAM size for the activations */
|
||||
const uint32_t weights_size; /* size for the weights */
|
||||
ai_observer_exec_ctx obs_ctx; /* RT low-level context for the observer */
|
||||
};
|
||||
|
||||
#define AI_RELOC_RT_STATE_NOT_INITIALIZED (0)
|
||||
#define AI_RELOC_RT_STATE_INITIALIZED (1 << 0)
|
||||
#define AI_RELOC_RT_STATE_XIP_MODE (1 << 1)
|
||||
|
||||
|
||||
#if defined(AI_NETWORK_RELOC)
|
||||
|
||||
/* -----------------------------------------------------------------------------
|
||||
* This part is only used during the compilation of the network.c to
|
||||
* generate the entry points (see linker and relocatable_pp.py files).
|
||||
* -----------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#if !defined(C_NAME)
|
||||
|
||||
#include <network_data.h>
|
||||
|
||||
#define C_NAME network
|
||||
#define C_NAME_UP NETWORK
|
||||
|
||||
#else
|
||||
|
||||
#if !defined(C_INC_DATA_FILE)
|
||||
#error C_INC_DATA_FILE should be defined
|
||||
#endif
|
||||
|
||||
#if !defined(C_NAME_UP)
|
||||
#error C_NAME_UP should be defined (=str.upper(C_NAME))
|
||||
#endif
|
||||
|
||||
#include C_INC_DATA_FILE
|
||||
|
||||
#endif
|
||||
|
||||
#if !defined(VARIANT)
|
||||
/* Default variant definition */
|
||||
#define VARIANT ( (AI_RELOC_TOOLCHAIN_ARM_EMBEDDED << 16) | (AI_RELOC_TOOLCHAIN_FP_ABI_HARD << 13) |\
|
||||
(1UL << 12) | AI_RELOC_ARM_CORTEX_M4)
|
||||
#endif
|
||||
|
||||
|
||||
#if !defined(__GNUC__)
|
||||
#error "AI_NETWORK_RELOC code generation is only supported with a GCC-based tool-chain"
|
||||
#endif
|
||||
|
||||
#define MAKE_FN_(_x, _e) ai_ ## _x ## _e
|
||||
#define MAKE_DEF_(_x, _e) AI_ ## _x ## _e
|
||||
|
||||
#define _DATA_WEIGHTS(name) MAKE_DEF_(name, _DATA_WEIGHTS)
|
||||
#define _DATA_ACTIVATIONS(name) MAKE_DEF_(name, _DATA_ACTIVATIONS)
|
||||
|
||||
#define _MODEL_NAME(name) MAKE_DEF_(name, _MODEL_NAME)
|
||||
#define _ACT_SIZE(name) MAKE_DEF_(name, _DATA_ACTIVATIONS_SIZE)
|
||||
#define _WEIGHTS_SIZE(name) MAKE_DEF_(name, _DATA_WEIGHTS_SIZE)
|
||||
|
||||
#define _CREATE(name) MAKE_FN_(name, _create)
|
||||
#define _INIT(name) MAKE_FN_(name, _init)
|
||||
#define _RUN(name) MAKE_FN_(name, _run)
|
||||
#define _REPORT(name) MAKE_FN_(name, _get_report)
|
||||
#define _ERROR(name) MAKE_FN_(name, _get_error)
|
||||
#define _DESTROY(name) MAKE_FN_(name, _destroy)
|
||||
#define _FORWARD(name) MAKE_FN_(name, _forward)
|
||||
#define _DATA_PARAMS_GET(name) MAKE_FN_(name, _data_params_get)
|
||||
|
||||
static ai_bool ai_network_init_v2(ai_handle hdl, const ai_handle *weights, const ai_handle *activations)
|
||||
{
|
||||
ai_network_params params;
|
||||
ai_bool (*fct_)(ai_network_params* params) = _DATA_PARAMS_GET(C_NAME);
|
||||
fct_(¶ms);
|
||||
|
||||
for (int idx=0; idx < params.map_activations.size; idx++)
|
||||
AI_BUFFER_ARRAY_ITEM_SET_ADDRESS(¶ms.map_activations, idx, activations[idx]);
|
||||
|
||||
for (int idx=0; idx < params.map_weights.size; idx++)
|
||||
AI_BUFFER_ARRAY_ITEM_SET_ADDRESS(¶ms.map_weights, idx, weights[idx]);
|
||||
|
||||
return _INIT(C_NAME)(hdl, ¶ms);
|
||||
}
|
||||
|
||||
/*
|
||||
* Entry table to handle the offset of network entry point and
|
||||
* the RT context.
|
||||
*/
|
||||
struct ai_reloc_network_entries {
|
||||
ai_error (*create)(ai_handle* network, const ai_buffer* network_config);
|
||||
ai_bool (*init)(ai_handle network, const ai_network_params* params);
|
||||
ai_bool (*init_v2)(ai_handle network, const ai_handle *weights, const ai_handle *act);
|
||||
ai_i32 (*run)(ai_handle network, const ai_buffer* input, ai_buffer* output);
|
||||
ai_bool (*report)(ai_handle network, ai_network_report* report);
|
||||
ai_error (*error)(ai_handle network);
|
||||
ai_handle (*destroy)(ai_handle network);
|
||||
ai_i32 (*forward)(ai_handle network, const ai_buffer* input);
|
||||
ai_bool (*plt_obs_register_s)(ai_handle network, ai_observer_exec_ctx *ctx);
|
||||
ai_bool (*plt_obs_unregister_s)(ai_handle network, ai_observer_exec_ctx *ctx);
|
||||
ai_bool (*plt_obs_node_info)(ai_handle network, ai_observer_node *node_info);
|
||||
struct ai_reloc_rt_ctx *rt_ctx;
|
||||
};
|
||||
|
||||
#define AI_RELOC_NETWORK()\
|
||||
static struct ai_reloc_rt_ctx __attribute__((used, section (".network_rt_ctx"), )) _network_rt_ctx = { 0, 0, 0, 0, 0, _MODEL_NAME(C_NAME_UP), _ACT_SIZE(C_NAME_UP), _WEIGHTS_SIZE(C_NAME_UP) }; \
|
||||
const struct ai_reloc_network_entries __attribute__((used, section (".network_rt_init"), visibility("default"))) _network_entries = { \
|
||||
.create = _CREATE(C_NAME), \
|
||||
.init = _INIT(C_NAME), \
|
||||
.init_v2 = ai_network_init_v2, \
|
||||
.run = _RUN(C_NAME), \
|
||||
.report = _REPORT(C_NAME), \
|
||||
.error = _ERROR(C_NAME), \
|
||||
.destroy = _DESTROY(C_NAME), \
|
||||
.forward = _FORWARD(C_NAME), \
|
||||
.plt_obs_register_s = ai_platform_observer_register_s, \
|
||||
.plt_obs_unregister_s = ai_platform_observer_unregister_s, \
|
||||
.plt_obs_node_info = ai_platform_observer_node_info, \
|
||||
.rt_ctx = &_network_rt_ctx,\
|
||||
}; \
|
||||
const uint32_t __attribute__((used, section (".network_rt_flags"), visibility("default"))) _network_flags = AI_RELOC_RT_SET_FLAGS(VARIANT);\
|
||||
|
||||
|
||||
#else
|
||||
|
||||
#define AI_RELOC_NETWORK()
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
typedef struct _ai_rel_network_info {
|
||||
const char* c_name; /* c-name of the model */
|
||||
ai_u32 variant; /* 32-b word to handle the reloc rt version,
|
||||
the used ARM Embedded compiler,
|
||||
Cortex-Mx (CPUID) and if the FPU is requested */
|
||||
ai_size code_sz; /* size of the code (header + txt + rodata + data + got + rel sections) */
|
||||
ai_handle weights; /* address of the weights (= @ of the object + offset) */
|
||||
ai_size weights_sz; /* size (in bytes) of the weights */
|
||||
ai_size acts_sz; /* minimum requested RAM size (in bytes) for the activations buffer */
|
||||
ai_size rt_ram_xip; /* minimum requested RAM size to install it, XIP mode */
|
||||
ai_size rt_ram_copy; /* minimum requested RAM size to install it, COPY mode */
|
||||
} ai_rel_network_info;
|
||||
|
||||
|
||||
/* -----------------------------------------------------------------------------
|
||||
* Public API declaration
|
||||
* -----------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
/*!
|
||||
* @brief utility function to retrieve the dimensioning information
|
||||
* to install the relocatable binary network
|
||||
*
|
||||
* @param obj address of the binary object
|
||||
* @rt rt a pointer to the ai_rel_network_info struct where to
|
||||
* store info.
|
||||
*
|
||||
* @return an error type/code pair indicating both the error type and code
|
||||
* see @ref ai_error for struct definition
|
||||
*/
|
||||
AI_API_ENTRY
|
||||
ai_error ai_rel_network_rt_get_info(const void* obj, ai_rel_network_info* rt);
|
||||
|
||||
/*!
|
||||
* @brief install and create an instance of the network
|
||||
*
|
||||
* @param obj address of the binary object
|
||||
* @param ram_addr indicate the address of the RAM to install
|
||||
* the different sections according the requested mode.
|
||||
* @param ram_size indicate the size of the provided RAM
|
||||
* @param mode indicate the expected executing mode
|
||||
* - AI_RELOC_RT_LOAD_MODE_XIP: code is executed in place
|
||||
* - AI_RELOC_RT_LOAD_MODE_COPY: code is copied in ram before.
|
||||
* @param hdl a pointer to a ai_handle object to store the reference
|
||||
* (opaque object) of the instance.
|
||||
*
|
||||
* @return an error type/code pair indicating both the error type and code
|
||||
* see @ref ai_error for struct definition
|
||||
*
|
||||
* Note: If ram_size or ram_addr parameters are null, requested memory buffer
|
||||
* is allocated with the AI_RELOC_MALLOC/AI_RELOC_FREE functions.
|
||||
* _crc_cb can be the NULL pointers to have a default behavior.
|
||||
*/
|
||||
AI_API_ENTRY
|
||||
ai_error ai_rel_network_load_and_create(const void* obj, ai_handle ram_addr,
|
||||
ai_size ram_size, uint32_t mode,
|
||||
ai_handle* hdl);
|
||||
|
||||
/*!
|
||||
* @brief initialize the instance of the network
|
||||
*
|
||||
* @param hdl network handle (@ref ai_rel_network_load_and_create())
|
||||
* @param weights array of weights buffers
|
||||
* @param act array of activations buffers
|
||||
*
|
||||
* @return false if the handle is invalid, weights and act addresses are
|
||||
* invalid, otherwise network instance is fully initialized.
|
||||
* Note that ai_rel_network_get_error() can be used to have more details
|
||||
* about the error.
|
||||
*/
|
||||
AI_API_ENTRY
|
||||
ai_bool ai_rel_network_init(ai_handle hdl, const ai_handle *weights,
|
||||
const ai_handle *act);
|
||||
|
||||
/*!
|
||||
* @brief retrieve the network information
|
||||
*
|
||||
* @param hdl network handle (@ref ai_rel_network_load_and_create())
|
||||
* @param report reference report object to store the informations
|
||||
*
|
||||
* @return false if the handle is invalid, report is NULL,
|
||||
* otherwise network instance is fully initialized.
|
||||
*
|
||||
* Note: in case of error the error type could be queried by
|
||||
* using @ref ai_rel_network_get_error
|
||||
*/
|
||||
AI_API_ENTRY
|
||||
ai_bool ai_rel_network_get_report(ai_handle hdl, ai_network_report* report);
|
||||
|
||||
/*!
|
||||
* @brief return the last error
|
||||
*
|
||||
* @param hdl network handle (@ref ai_rel_network_load_and_create())
|
||||
*
|
||||
* @return an error type/code pair indicating both the error type and code
|
||||
* see @ref ai_error for struct definition
|
||||
*/
|
||||
AI_API_ENTRY
|
||||
ai_error ai_rel_network_get_error(ai_handle hdl);
|
||||
|
||||
/*!
|
||||
* @brief run the network and return the predicted output
|
||||
*
|
||||
* @param hdl network handle (@ref ai_rel_network_load_and_create())
|
||||
* @param
|
||||
*
|
||||
* @return number of input batches processed (default 1) or <= 0 if it fails
|
||||
*
|
||||
* Note: in case of error the error type could be queried by
|
||||
* using @ref ai_rel_network_get_error
|
||||
*/
|
||||
AI_API_ENTRY
|
||||
ai_i32 ai_rel_network_run(ai_handle hdl, const ai_buffer* input, ai_buffer* output);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief un-install and destroy the instantiated network
|
||||
*
|
||||
* @param hdl network handle (@ref ai_rel_network_load_and_create())
|
||||
*
|
||||
* @return AI_HANDLE_NULL if network was destroyed correctly
|
||||
*/
|
||||
AI_API_ENTRY
|
||||
ai_handle ai_rel_network_destroy(ai_handle hdl);
|
||||
|
||||
/*!
|
||||
* @brief register an observer context. Allows to register a client CB which
|
||||
* will be called before or/and after the execution of a c-node with
|
||||
* the references of the used tensors (see @ref ai_observer_node).
|
||||
*
|
||||
* @param hdl network handle (@ref ai_rel_network_load_and_create())
|
||||
* @param cb reference of the user callback function
|
||||
* @param cookie reference of a user object/ctx
|
||||
* @param flags indicate expected events (see AI_OBSERVER_XX_EVT flag definition)
|
||||
*
|
||||
* @return false if the registration has failed (network error is updated)
|
||||
* else true (network error is updated).
|
||||
*/
|
||||
AI_API_ENTRY
|
||||
ai_bool ai_rel_platform_observer_register(ai_handle hdl,
|
||||
ai_observer_node_cb cb, ai_handle cookie, ai_u32 flags);
|
||||
|
||||
/*!
|
||||
* @brief un-register the observer context.
|
||||
*
|
||||
* @param hdl network handle (@ref ai_rel_network_load_and_create())
|
||||
* @param cb reference of the user callback function
|
||||
* @param cookie reference of a user object/ctx
|
||||
*
|
||||
* @return false if the un-registration has failed (network error is updated)
|
||||
* else true
|
||||
*
|
||||
* Note: in case of error the error type could be queried by
|
||||
* using @ref ai_rel_network_get_error
|
||||
*/
|
||||
AI_API_ENTRY
|
||||
ai_bool ai_rel_platform_observer_unregister(ai_handle hdl,
|
||||
ai_observer_node_cb cb, ai_handle cookie);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief return the info of a requested c-node (defined by the
|
||||
* c_idx field). Should be called after the initialization phase.
|
||||
*
|
||||
* @param hdl network handle (@ref ai_rel_network_load_and_create())
|
||||
* @param node_info a pointer to a reference of the node description
|
||||
*
|
||||
* @return true if the node_info->c_idx designates a valid index else
|
||||
* false
|
||||
*
|
||||
* Note: in case of error the error type could be queried by
|
||||
* using @ref ai_rel_network_get_error
|
||||
*/
|
||||
AI_API_ENTRY
|
||||
ai_bool ai_rel_platform_observer_node_info(ai_handle hdl,
|
||||
ai_observer_node *node_info);
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif
|
||||
37
lib/stai/libstai/include/core_assert.h
Normal file
37
lib/stai/libstai/include/core_assert.h
Normal file
@ -0,0 +1,37 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file core_assert.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @brief header file of core assert routine
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2023 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef CORE_ASSERT_H
|
||||
#define CORE_ASSERT_H
|
||||
|
||||
#ifdef HAS_AI_ASSERT
|
||||
// Override __FILE__ macro to disable full path asserts()
|
||||
// Need to add during build also -Wbuiltin-macro-redefined options to avoid warnings
|
||||
#undef __FILE__
|
||||
#define __FILE__ (__builtin_strrchr("/" __BASE_FILE__, '/') + 1)
|
||||
|
||||
#include <assert.h>
|
||||
#define CORE_ASSERT(expr) \
|
||||
assert(expr); /* CORE_ASSERT */
|
||||
|
||||
#else
|
||||
#define CORE_ASSERT(expr) \
|
||||
(void)0; /* CORE_ASSERT */
|
||||
|
||||
#endif /* HAS_AI_ASSERT */
|
||||
|
||||
#endif /* CORE_ASSERT_H */
|
||||
273
lib/stai/libstai/include/core_common.h
Normal file
273
lib/stai/libstai/include/core_common.h
Normal file
@ -0,0 +1,273 @@
|
||||
#ifndef CORE_COMMON_H
|
||||
#define CORE_COMMON_H
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file core_common.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @brief header file of common core datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2018 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#include "ai_platform.h"
|
||||
#include "ai_platform_interface.h"
|
||||
#include "core_datatypes.h"
|
||||
|
||||
/*!
|
||||
* @defgroup core_common Common Core Library Routines
|
||||
* @brief Common macros, datatypes and routines of core common module
|
||||
* @details This module contains the definitons and handling of the @ref ai_node
|
||||
* datastructures. An ai_node is a generic abstraction for a network node that
|
||||
* could be either a fixed function layer or an operator. Ideally the platform
|
||||
* interface defined in api module should handle an process generic nodes in the
|
||||
* network, not relying on the fact that they are layers or operators datastructs
|
||||
* Specific implementative details should be kept inside layers and operators
|
||||
* modules. The core module implements additionally common routines used in the
|
||||
* layers and operators modules.
|
||||
*/
|
||||
|
||||
/******************************************************************************/
|
||||
#ifdef HAS_AI_ASSERT
|
||||
#define ASSERT_ARRAY_SANITY(a_) \
|
||||
AI_ASSERT((a_) && (a_)->size>0)
|
||||
|
||||
#define ASSERT_ARRAY_DATA_SANITY(a_) \
|
||||
ASSERT_ARRAY_SANITY(a_) \
|
||||
AI_ASSERT((a_)->data && (a_)->data_start)
|
||||
|
||||
#define ASSERT_TENSOR_SANITY(t_) \
|
||||
AI_ASSERT((t_) && (t_)->data) \
|
||||
AI_ASSERT(CORE_TENSOR_GET_SHAPE_SIZE(t_)>0) \
|
||||
ASSERT_ARRAY_SANITY((t_)->data)
|
||||
|
||||
#define ASSERT_TENSOR_LIST_SANITY(tlist_) \
|
||||
AI_ASSERT((tlist_) && (GET_TENSOR_LIST_SIZE(tlist_)>0)) \
|
||||
|
||||
#define ASSERT_TENSOR_DATA_SANITY(t_) \
|
||||
ASSERT_TENSOR_SANITY(t_) \
|
||||
ASSERT_ARRAY_DATA_SANITY((t_)->data)
|
||||
|
||||
#define ASSERT_NODE_SANITY(node_) \
|
||||
do { \
|
||||
AI_ASSERT(AI_NODE_OBJ(node_)->tensors && AI_NODE_OBJ(node_)->tensors->chain) \
|
||||
ASSERT_TENSOR_SANITY(GET_TENSOR_IN(AI_NODE_OBJ(node_)->tensors, 0)) \
|
||||
ASSERT_TENSOR_SANITY(GET_TENSOR_OUT(AI_NODE_OBJ(node_)->tensors, 0)) \
|
||||
} while (0);
|
||||
#else
|
||||
#define ASSERT_ARRAY_SANITY(a_) /* ASSERT_ARRAY_SANITY */
|
||||
#define ASSERT_ARRAY_DATA_SANITY(a_) /* ASSERT_ARRAY_DATA_SANITY */
|
||||
#define ASSERT_TENSOR_SANITY(t_) /* ASSERT_TENSOR_SANITY */
|
||||
#define ASSERT_TENSOR_LIST_SANITY(tlist_) /* ASSERT_TENSOR_LIST_SANITY */
|
||||
#define ASSERT_TENSOR_DATA_SANITY(t_) /* ASSERT_TENSOR_DATA_SANITY */
|
||||
#define ASSERT_NODE_SANITY(node_) /* ASSERT_NODE_SANITY */
|
||||
#endif /*HAS_AI_ASSERT*/
|
||||
|
||||
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
/* Suppress unused function warnings */
|
||||
#define AI_UNUSED_FUNCTION __attribute__((unused))
|
||||
/* Manage false positives in address sanitizer */
|
||||
#define AI_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address))
|
||||
#else
|
||||
#define AI_UNUSED_FUNCTION /* AI_UNUSED_FUNCTION */
|
||||
#define AI_NO_SANITIZE_ADDRESS /* AI_NO_SANITIZE_ADDRESS */
|
||||
#endif
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
#define AI_NODE_TYPE(type_) \
|
||||
((ai_node_type)((ai_u32)(type_)&0xFFFF))
|
||||
|
||||
#define AI_NODE_OBJ(obj_) \
|
||||
((ai_node*)(obj_))
|
||||
|
||||
#define AI_NODE_FUNC(func_) \
|
||||
((node_func)(func_))
|
||||
|
||||
#define AI_NODE_COMMON_FIELDS_DECLARE \
|
||||
ai_node_type type; /*!< node type id (see @ref ai_node_type) */ \
|
||||
ai_id_obj id; /*!< node object instance id (see @ref ai_id_obj) */ \
|
||||
ai_flags flags; /*!< node object flags */ \
|
||||
ai_klass_obj klass; /*!< opaque handler to specific layer implementations */ \
|
||||
ai_network* network; /*!< handle to global network context */ \
|
||||
struct ai_node_s* next; /*!< the next node object in the sequence */ \
|
||||
node_func forward; /*!< forward function for the node */ \
|
||||
AI_CONST ai_tensor_chain* tensors; /*!< pointer to node tensor chain */
|
||||
|
||||
#define AI_NODE_STATEFUL_FIELDS_DECLARE \
|
||||
AI_NODE_COMMON_FIELDS_DECLARE \
|
||||
ai_handle state; \
|
||||
node_func init; \
|
||||
node_func update; \
|
||||
node_func destroy;
|
||||
|
||||
#define AI_NODE_COMMON_INIT(type_, id_, flags_, klass_, network_, next_, forward_) \
|
||||
.type = AI_NODE_TYPE(type_), \
|
||||
.id = AI_ID_OBJ(id_), \
|
||||
.flags = (flags_), \
|
||||
.klass = AI_KLASS_OBJ(klass_), \
|
||||
.network = AI_NETWORK_OBJ(network_), \
|
||||
.next = AI_NODE_OBJ(next_), \
|
||||
.forward = AI_NODE_FUNC(forward_)
|
||||
|
||||
/*****************************************************************************/
|
||||
/** Network Tensors Chains / Lists Handlers **/
|
||||
/*****************************************************************************/
|
||||
#define AI_FOR_EACH_TENSOR_CHAIN_DO(tlist_ptr_, chain_) \
|
||||
ai_tensor_list* tlist_ptr_ = (chain_)->chain; \
|
||||
for (; tlist_ptr_<(((chain_)->chain)+((chain_)->size)); tlist_ptr_++)
|
||||
|
||||
#define AI_FOR_EACH_TENSOR_LIST_DO(idx_, t_ptr_, tlist_ptr_) \
|
||||
ai_tensor* t_ptr_ = NULL; \
|
||||
for (ai_size idx_ = 0; (idx_ < GET_TENSOR_LIST_SIZE(tlist_ptr_)) && \
|
||||
((t_ptr_ = GET_TENSOR_LIST_ITEM(tlist_ptr_, idx_)) != NULL); ++idx_)
|
||||
|
||||
|
||||
#define GET_TENSOR_LIST_INFO(list_) \
|
||||
((list_)->info)
|
||||
|
||||
#define GET_TENSOR_LIST_META(list_, pos_) \
|
||||
(&(GET_TENSOR_LIST_INFO(list_)->meta[pos_]))
|
||||
|
||||
#define GET_TENSOR_LIST_STATE(list_, pos_) \
|
||||
(&(GET_TENSOR_LIST_INFO(list_)->state[pos_]))
|
||||
|
||||
#define GET_TENSOR_LIST_BUFFER(list_, pos_) \
|
||||
(&(GET_TENSOR_LIST_INFO(list_)->buffer[pos_]))
|
||||
|
||||
#define GET_TENSOR_LIST_ITEM(list_, pos_) \
|
||||
((NULL!=GET_TENSOR_LIST_ITEMS(list_)) \
|
||||
? GET_TENSOR_LIST_ITEMS(list_)[(pos_)] : NULL)
|
||||
|
||||
#define GET_TENSOR_LIST_ITEMS(list_) \
|
||||
((list_)->tensor)
|
||||
|
||||
#define GET_TENSOR_LIST_SIZE(list_) \
|
||||
((NULL!=(list_)) ? (list_)->size : 0)
|
||||
|
||||
#define GET_TENSOR_CHAIN_SIZE(chain_) \
|
||||
((NULL!=(chain_)) ? (chain_)->size : 0)
|
||||
|
||||
#define GET_TENSOR_LIST(chain_, type_) \
|
||||
((AI_CONCAT(AI_TENSOR_CHAIN_, type_)<(chain_)->size) \
|
||||
? &(chain_)->chain[AI_CONCAT(AI_TENSOR_CHAIN_, type_)] : NULL)
|
||||
|
||||
#define GET_TENSOR_LIST_IN(chain_) \
|
||||
(GET_TENSOR_LIST(chain_, INPUT))
|
||||
|
||||
#define GET_TENSOR_LIST_OUT(chain_) \
|
||||
(GET_TENSOR_LIST(chain_, OUTPUT))
|
||||
|
||||
#define GET_TENSOR_LIST_WEIGTHS(chain_) \
|
||||
(GET_TENSOR_LIST(chain_, WEIGHTS))
|
||||
|
||||
#define GET_TENSOR_LIST_SCRATCH(chain_) \
|
||||
(GET_TENSOR_LIST(chain_, SCRATCH))
|
||||
|
||||
#define GET_TENSOR_IN(chain_, pos_) \
|
||||
(GET_TENSOR_LIST_ITEM(GET_TENSOR_LIST_IN(chain_), (pos_)))
|
||||
|
||||
#define GET_TENSOR_OUT(chain_, pos_) \
|
||||
(GET_TENSOR_LIST_ITEM(GET_TENSOR_LIST_OUT(chain_), (pos_)))
|
||||
|
||||
#define GET_TENSOR_WEIGHTS(chain_, pos_) \
|
||||
(GET_TENSOR_LIST_ITEM(GET_TENSOR_LIST_WEIGTHS(chain_), (pos_)))
|
||||
|
||||
#define GET_TENSOR_SCRATCH(chain_, pos_) \
|
||||
(GET_TENSOR_LIST_ITEM(GET_TENSOR_LIST_SCRATCH(chain_), (pos_)))
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @struct ai_node_type
|
||||
* @ingroup core_common
|
||||
* @brief generic network node numeric type ID
|
||||
*
|
||||
*/
|
||||
typedef uint16_t ai_node_type;
|
||||
|
||||
/*!
|
||||
* @typedef void (*node_func)(struct ai_node_s* node)
|
||||
* @ingroup core_common
|
||||
* @brief Callback signatures for all forward functions
|
||||
*/
|
||||
typedef void (*node_func)(struct ai_node_s* node);
|
||||
|
||||
/*!
|
||||
* @typedef ai_float (*func_nl_el)(const ai_float x)
|
||||
* @ingroup core_common
|
||||
* @brief Fuction pointer for generic elementwise transforms
|
||||
*
|
||||
* This function pointer abstracts a generic nonlinear function applied to a
|
||||
* single element. See @ref ai_math_sqrt in @ref math_helpers as examples.
|
||||
*/
|
||||
typedef ai_float (*func_nl_el)(const ai_float x);
|
||||
|
||||
/*!
|
||||
* @struct ai_node
|
||||
* @ingroup core_common
|
||||
* @brief Structure encoding a generic node of the network
|
||||
*
|
||||
* The node struct includes information about the network it belong to, the
|
||||
* next node in a sequential network and the forward function. The forward
|
||||
* functions are implemented in the @ref layers module.
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_node_s {
|
||||
AI_NODE_COMMON_FIELDS_DECLARE
|
||||
} ai_node;
|
||||
|
||||
/*!
|
||||
* @struct ai_node_stateful
|
||||
* @ingroup core_common
|
||||
* @brief Structure encoding a stateful node of the network
|
||||
*
|
||||
* The node struct includes information about the network it belong to, the
|
||||
* next node in a sequential network and the init, update and forward functions.
|
||||
* The node functions are implemented in the @ref layers module.
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_node_stateful_s {
|
||||
AI_NODE_STATEFUL_FIELDS_DECLARE
|
||||
} ai_node_stateful;
|
||||
|
||||
/*!
|
||||
* @brief initialize core module
|
||||
* @ingroup core_common
|
||||
* @return false if initialization fails, false otherwise
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
ai_bool core_init(void);
|
||||
|
||||
/*!
|
||||
* @brief get 1st error raised during processing
|
||||
* @ingroup core_common
|
||||
* @param[out] error the @ref ai_error recorded during processing
|
||||
* @return the 1st error generated during processing. If no errors AI_ERROR_NONE
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
ai_error core_get_error(ai_error* error);
|
||||
|
||||
/*!
|
||||
* @brief set error recorded during processing
|
||||
* @ingroup core_common
|
||||
* @param[out] error the @ref ai_error to set
|
||||
* @param[in] type the specific error type to set
|
||||
* @param[in] code the specific error code to set
|
||||
* @return true if the error is set, false in case a precedent error was already
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
ai_bool core_set_error(
|
||||
ai_error* error, const ai_error_type type, const ai_error_code code);
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*CORE_COMMON_H*/
|
||||
159
lib/stai/libstai/include/core_convert.h
Normal file
159
lib/stai/libstai/include/core_convert.h
Normal file
@ -0,0 +1,159 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file core_convert.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @brief header file of core utils routines
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2018 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef CORE_CONVERT_H
|
||||
#define CORE_CONVERT_H
|
||||
|
||||
#include "ai_platform.h"
|
||||
#include "ai_platform_interface.h"
|
||||
|
||||
#include "core_common.h"
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @defgroup core_convert Core Convert Routines
|
||||
* @brief Implementation of core node format convertion routines
|
||||
* (Q7 to float, ... etc.)
|
||||
*/
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Convert tensors from float to quantized or viceversa
|
||||
* @ingroup core_convert
|
||||
* @param[in] pNode in a handler to node (layer or operator)
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void node_convert(ai_node *pNode);
|
||||
|
||||
/*!
|
||||
* @brief Convert integer tensors between QM.N formats (8/16 bits)
|
||||
* @ingroup core_convert
|
||||
* @param[in] pNode in a handler to node (layer or operator)
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void node_convert_fixed(ai_node *pNode);
|
||||
|
||||
/*!
|
||||
* @brief Convert integer tensors between signed and usigned (int8/uint8) formats
|
||||
* @ingroup core_convert
|
||||
* @param[in] pNode in a handler to node (layer or operator)
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void node_convert_integer(ai_node *pNode);
|
||||
|
||||
/*!
|
||||
* @brief Convert float tensor to binary
|
||||
* @ingroup core_convert
|
||||
* @param[in] pNode in a handler to node (layer or operator)
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void node_convert_if32os1(ai_node *pNode);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Convert binary tensor to float
|
||||
* @ingroup core_convert
|
||||
* @param[in] pNode in a handler to node (layer or operator)
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void node_convert_is8os1(ai_node *pNode);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Convert binary tensor to signed int 8 bit
|
||||
* @ingroup core_convert
|
||||
* @param[in] pNode in a handler to node (layer or operator)
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void node_convert_is1os8(ai_node *pNode);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Convert binary tensor to signed int 16 bit
|
||||
* @ingroup core_convert
|
||||
* @param[in] pNode in a handler to node (layer or operator)
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void node_convert_is1os16(ai_node *pNode);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Convert binary tensor to float
|
||||
* @ingroup core_convert
|
||||
* @param[in] pNode in a handler to node (layer or operator)
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void node_convert_is1of32(ai_node *pNode);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Convert signed int 16 bit tensor to float
|
||||
* @ingroup core_convert
|
||||
* @param[in] pNode in a handler to node (layer or operator)
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void node_convert_is16of32(ai_node *pNode);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Convert unsigned int 16 bit tensor to float
|
||||
* @ingroup core_convert
|
||||
* @param[in] pNode in a handler to node (layer or operator)
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void node_convert_iu16of32(ai_node *pNode);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Convert float tensor to signed int 16 bit
|
||||
* @ingroup core_convert
|
||||
* @param[in] pNode in a handler to node (layer or operator)
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void node_convert_if32os16(ai_node *pNode);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Convert float tensor to unsigned int 16 bit
|
||||
* @ingroup core_convert
|
||||
* @param[in] pNode in a handler to node (layer or operator)
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void node_convert_if32ou16(ai_node *pNode);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Convert signed int 16 bit tensor to unsigned int 16 bit
|
||||
* @ingroup core_convert
|
||||
* @param[in] pNode in a handler to node (layer or operator)
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void node_convert_is16ou16(ai_node *pNode);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Convert a shape struct into a stride struct
|
||||
* @ingroup core_convert
|
||||
* @param[in] in a pointer to a shape to convert
|
||||
* @return a condverted stride datastruct
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void core_shape_to_stride(ai_stride* out, const ai_shape* in);
|
||||
|
||||
|
||||
#endif /*CORE_CONVERT_H*/
|
||||
62
lib/stai/libstai/include/core_datatypes.h
Normal file
62
lib/stai/libstai/include/core_datatypes.h
Normal file
@ -0,0 +1,62 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file core_datatypes.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @brief header file of core module private defines and datatypes
|
||||
* to public nor codegen tool
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2018 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef AI_CORE_DATATYPES_H
|
||||
#define AI_CORE_DATATYPES_H
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
/*!
|
||||
* @defgroup Core Module Datatypes
|
||||
* @brief Data structures and defines used by core module
|
||||
*/
|
||||
|
||||
/*!
|
||||
* @brief platform runtime core library version
|
||||
*/
|
||||
#ifndef AI_PLATFORM_RUNTIME_MAJOR
|
||||
#define AI_PLATFORM_RUNTIME_MAJOR (10)
|
||||
#endif
|
||||
#ifndef AI_PLATFORM_RUNTIME_MINOR
|
||||
#define AI_PLATFORM_RUNTIME_MINOR (1)
|
||||
#endif
|
||||
#ifndef AI_PLATFORM_RUNTIME_MICRO
|
||||
#define AI_PLATFORM_RUNTIME_MICRO (0)
|
||||
#endif
|
||||
|
||||
|
||||
#define AI_ID_OBJ(id) \
|
||||
((ai_id_obj)(id))
|
||||
|
||||
#define AI_C_ARRAY_COUNT(array_) \
|
||||
( sizeof(array_) / sizeof((array_)[0]) )
|
||||
|
||||
#define AI_C_ARRAY_BYTE_SIZE(array_) \
|
||||
( sizeof(array_) )
|
||||
|
||||
|
||||
/*!
|
||||
* @typedef ai_id_obj
|
||||
* @ingroup core_datatypes
|
||||
* @brief numeric identifier for generic object instances (e.g. layers,
|
||||
* operators, etc.) It is used by codegen tool to keep tracks of specific
|
||||
* instances created
|
||||
*/
|
||||
typedef uint16_t ai_id_obj;
|
||||
|
||||
#endif /*AI_CORE_DATATYPES_H*/
|
||||
130
lib/stai/libstai/include/core_log.h
Normal file
130
lib/stai/libstai/include/core_log.h
Normal file
@ -0,0 +1,130 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file core_log.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @brief header file of core log interfaces
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2018 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef CORE_LOG_H
|
||||
#define CORE_LOG_H
|
||||
|
||||
#include "ai_platform.h"
|
||||
#include "ai_datatypes_defines.h"
|
||||
|
||||
/*!
|
||||
* @defgroup core_log Logger core routines wrapper interface
|
||||
* @brief Common macros, datatypes and routines of ai logger module
|
||||
* @details This header defines the wrapping macros interfaces to handle the
|
||||
* global logger module. These macro are defined when the macro HAS_LOG is
|
||||
* defined, otherwise they are all set to NOP routines and no logger code is
|
||||
* compiled at all. When the macro HAS_LOG is defined, only the log messages
|
||||
* having an enum id >= the value of the macro are compiled. Thus to include in
|
||||
* compilation only log messages up to the error level the value of HAS_LOG must
|
||||
* be equal the the enum value of LOG_ERROR macro (i.e. 3). a value of 6 means
|
||||
* to include all log messages up to the lower LOG_TRACE level.
|
||||
*/
|
||||
|
||||
#if defined HAS_LOG && (HAS_LOG>=0)
|
||||
#include "ai_log.h"
|
||||
#define AI_LOG_SECTION(...) \
|
||||
{ __VA_ARGS__ }
|
||||
|
||||
#define AI_LOG_ACQUIRE() \
|
||||
ai_log_acquire()
|
||||
#define AI_LOG_SET_LEVEL(level_) \
|
||||
AI_WRAP_FUNC(ai_log_set_level(level_);)
|
||||
#define AI_LOG_SET_QUIET(onoff_) \
|
||||
AI_WRAP_FUNC(ai_log_set_quiet(onoff_);)
|
||||
#define AI_LOG_SET_LOCK_FN(fn_, udata_) \
|
||||
AI_WRAP_FUNC(ai_log_set_lock(fn_, udata_);)
|
||||
#define AI_LOG_CHANNEL_PUSH(level_, fn_, udata_) \
|
||||
AI_WRAP_FUNC(ai_log_channel_push(level_, fn_, udata_);)
|
||||
#define AI_LOG_CHANNEL_POP(fn_, udata_) \
|
||||
AI_WRAP_FUNC(ai_log_channel_pop(fn_, udata_);)
|
||||
#ifdef LOG_USE_FILE
|
||||
#define AI_LOG_SET_FILE_POINTER(fp_) \
|
||||
AI_WRAP_FUNC(ai_log_set_fp(fp_);)
|
||||
#else
|
||||
#define AI_LOG_SET_FILE_POINTER(fp_) \
|
||||
AI_WRAP_FUNC(/*AI_LOG_SET_FILE_POINTER()*/)
|
||||
#endif
|
||||
#else
|
||||
#define AI_LOG_SECTION(...) AI_WRAP_FUNC(/*AI_LOG_SECTION()*/)
|
||||
|
||||
#define AI_LOG_ACQUIRE() (NULL)
|
||||
#define AI_LOG_SET_LEVEL(level_) AI_WRAP_FUNC(/*AI_LOG_SET_LEVEL()*/)
|
||||
#define AI_LOG_SET_QUIET(onoff_) AI_WRAP_FUNC(/*AI_LOG_SET_QUIET()*/)
|
||||
#define AI_LOG_SET_LOCK_FN(fn_, udata_) AI_WRAP_FUNC(/*AI_LOG_SET_LOCK_FN()*/)
|
||||
#define AI_LOG_CHANNEL_PUSH(level_, fn_, udata_) AI_WRAP_FUNC(/*AI_LOG_CHANNEL_PUSH()*/)
|
||||
#define AI_LOG_CHANNEL_POP(fn_, udata_) AI_WRAP_FUNC(/*AI_LOG_CHANNEL_POP()*/)
|
||||
#define AI_LOG_SET_FILE_POINTER(fp_) AI_WRAP_FUNC(/*AI_LOG_SET_FILE_POINTER()*/)
|
||||
#endif
|
||||
|
||||
#if defined HAS_LOG
|
||||
#define AI_LOG_PRINT(level, fmt, ...) \
|
||||
AI_WRAP_FUNC(ai_log_print(level, fmt, ##__VA_ARGS__);)
|
||||
#else
|
||||
#define AI_LOG_PRINT(level, fmt, ...) \
|
||||
AI_WRAP_FUNC(/*AI_LOG_PRINT(...)*/)
|
||||
#endif
|
||||
|
||||
#if defined HAS_LOG && (HAS_LOG>=LOG_SUDO)
|
||||
#define AI_LOG_SUDO(fmt, ...) \
|
||||
AI_WRAP_FUNC(ai_log_log(LOG_SUDO, __FILE__, __LINE__, fmt LOG_CR, ##__VA_ARGS__);)
|
||||
#else
|
||||
#define AI_LOG_SUDO(fmt, ...) AI_WRAP_FUNC(/*AI_LOG_SUDO()*/)
|
||||
#endif
|
||||
|
||||
#if defined HAS_LOG && (HAS_LOG>=LOG_TRACE)
|
||||
#define AI_LOG_TRACE(fmt, ...) \
|
||||
AI_WRAP_FUNC(ai_log_log(LOG_TRACE, __FILE__, __LINE__, fmt LOG_CR, ##__VA_ARGS__);)
|
||||
#else
|
||||
#define AI_LOG_TRACE(fmt, ...) AI_WRAP_FUNC(/*AI_LOG_TRACE()*/)
|
||||
#endif
|
||||
|
||||
#if defined HAS_LOG && (HAS_LOG>=LOG_DEBUG)
|
||||
#define AI_LOG_DEBUG(fmt, ...) \
|
||||
AI_WRAP_FUNC(ai_log_log(LOG_DEBUG, __FILE__, __LINE__, fmt LOG_CR, ##__VA_ARGS__);)
|
||||
#else
|
||||
#define AI_LOG_DEBUG(fmt, ...) AI_WRAP_FUNC(/*AI_LOG_DEBUG()*/)
|
||||
#endif
|
||||
|
||||
#if defined HAS_LOG && (HAS_LOG>=LOG_INFO)
|
||||
#define AI_LOG_INFO(fmt, ...) \
|
||||
AI_WRAP_FUNC(ai_log_log(LOG_INFO, __FILE__, __LINE__, fmt LOG_CR, ##__VA_ARGS__);)
|
||||
#else
|
||||
#define AI_LOG_INFO(fmt, ...) AI_WRAP_FUNC(/*AI_LOG_INFO()*/)
|
||||
#endif
|
||||
|
||||
#if defined HAS_LOG && (HAS_LOG>=LOG_WARN)
|
||||
#define AI_LOG_WARN(fmt, ...) \
|
||||
AI_WRAP_FUNC(ai_log_log(LOG_WARN, __FILE__, __LINE__, fmt LOG_CR, ##__VA_ARGS__);)
|
||||
#else
|
||||
#define AI_LOG_WARN(fmt, ...) AI_WRAP_FUNC(/*AI_LOG_WARN()*/)
|
||||
#endif
|
||||
|
||||
#if defined HAS_LOG && (HAS_LOG>=LOG_ERROR)
|
||||
#define AI_LOG_ERROR(fmt, ...) \
|
||||
AI_WRAP_FUNC(ai_log_log(LOG_ERROR, __FILE__, __LINE__, fmt LOG_CR, ##__VA_ARGS__);)
|
||||
#else
|
||||
#define AI_LOG_ERROR(fmt, ...) AI_WRAP_FUNC(/*AI_LOG_ERROR()*/)
|
||||
#endif
|
||||
|
||||
#if defined HAS_LOG && (HAS_LOG>=LOG_FATAL)
|
||||
#define AI_LOG_FATAL(fmt, ...) \
|
||||
AI_WRAP_FUNC(ai_log_log(LOG_FATAL, __FILE__, __LINE__, fmt LOG_CR, ##__VA_ARGS__);)
|
||||
#else
|
||||
#define AI_LOG_FATAL(fmt, ...) AI_WRAP_FUNC(/*AI_LOG_FATAL()*/)
|
||||
#endif
|
||||
|
||||
#endif /*CORE_LOG_H*/
|
||||
92
lib/stai/libstai/include/core_net_inspect.h
Normal file
92
lib/stai/libstai/include/core_net_inspect.h
Normal file
@ -0,0 +1,92 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file core_net_inspect.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @brief header file of core network inspection APIs
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2018 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef CORE_NET_INSPECT_H
|
||||
#define CORE_NET_INSPECT_H
|
||||
|
||||
#include "core_net_inspect_interface.h"
|
||||
|
||||
#include "core_common.h"
|
||||
#include "layers_common.h"
|
||||
|
||||
/*!
|
||||
* @defgroup core_net_inspect Core Network Inspection routines
|
||||
* @brief Implementation of core network inspection routines that allows to
|
||||
* inspect on a node basis a generated network model
|
||||
* @details A network context @ref ai_network basically contains a chained list
|
||||
* of nodes @ref ai_node that have an associated forward function.
|
||||
* Each ai)network context and ai_node datastructs have as a required member
|
||||
* field an opaque handler (i.e. a void pointer) to a klass object.
|
||||
* This handler is intended to be used as a platform specific node context
|
||||
* that implements specific target platform routines.
|
||||
* The inspector module basically acts as a plugin that exploiting these features
|
||||
* by temporary creating an hidden inspection context (see
|
||||
* @ref ai_core_inspect_net_klass) associated to the network and
|
||||
* linking it by re-routing the klass field to this inspection context. The
|
||||
* inspection context saves as part of its state (by a stack push operation), the
|
||||
* internal state of the network (all node / network klass pointers and actual
|
||||
* forward functions).
|
||||
* Thus, for each node it re-routes all node's forward functions to a dedicated
|
||||
* inspection forward function (see @ref _forward_inspect_validate() routine)
|
||||
* This routine is the core of the mechanism and it allows to inspect a network
|
||||
* node by node. Some additional inspection could thus be done inside the
|
||||
* _forward_inspect_validate() routine before and after the actual node
|
||||
* forward function is called;
|
||||
*
|
||||
*/
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @defgroup core_net_inspect Network Inspection Core
|
||||
* @brief Implementation of the validation network routines
|
||||
*/
|
||||
|
||||
/*!
|
||||
* @brief Initialize the network inspection context on a given network
|
||||
* @ingroup core net inspect
|
||||
* @param network opaque handler to the network instance
|
||||
* @param cfg a pointer to the inspector configuration we want to use
|
||||
* @return true if execution of the API is fine, false otherwise
|
||||
*/
|
||||
AI_API_ENTRY
|
||||
ai_bool ai_network_inspect_init(
|
||||
ai_handle network, const ai_inspect_config* cfg);
|
||||
|
||||
/*!
|
||||
* @brief Get a summary report from the inspected network
|
||||
* @ingroup core net inspect
|
||||
* @param network opaque handler to the network instance
|
||||
* @param report a pointer to the report provided back by the inspection
|
||||
* @return true if execution of the API is fine, false otherwise
|
||||
*/
|
||||
AI_API_ENTRY
|
||||
ai_bool ai_network_inspect_get_report(
|
||||
ai_handle network, ai_inspect_net_report* report);
|
||||
|
||||
/*!
|
||||
* @brief Destroy the network inspection context on a given network
|
||||
* @ingroup core net inspect
|
||||
* @param network opaque handler to the network instance
|
||||
* @return true if execution of the API is fine, false otherwise
|
||||
*/
|
||||
AI_API_ENTRY
|
||||
ai_bool ai_network_inspect_destroy(ai_handle network);
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /* CORE_NET_INSPECT_H */
|
||||
117
lib/stai/libstai/include/core_net_inspect_interface.h
Normal file
117
lib/stai/libstai/include/core_net_inspect_interface.h
Normal file
@ -0,0 +1,117 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file core_net_inspect_interface.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @brief header file of core network inspection interface APIs
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2018 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef CORE_NET_INSPECT_INTERFACE_H
|
||||
#define CORE_NET_INSPECT_INTERFACE_H
|
||||
|
||||
#include "ai_platform.h"
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @defgroup core_validation Validation Core
|
||||
* @brief Implementation of the validation network interface headers
|
||||
*/
|
||||
|
||||
|
||||
/*!
|
||||
* @struct ai_inspect_node_info
|
||||
* @brief network node inspection context: there is one of this datastruct
|
||||
* for each node of the network
|
||||
*/
|
||||
typedef struct ai_inspect_node_info_s {
|
||||
ai_u16 type; /*!< node type info @see ai_node datastruct */
|
||||
ai_u16 id; /*!< node id assigned by codegen tool to identify
|
||||
the specific node instance */
|
||||
ai_u16 batch_id; /*!< current node batch processed */
|
||||
ai_u16 n_batches; /*!< total number of node batches to process */
|
||||
ai_float elapsed_ms; /*!< node performance analysys: time in
|
||||
milliseconds to execute the node forward
|
||||
function */
|
||||
ai_u16 in_size; /*!< number of node's input activation buffers */
|
||||
ai_u16 out_size; /*!< number of node's output activation buffers */
|
||||
ai_buffer* in; /*!< input node activation buffer see @ref ai_buffer */
|
||||
ai_buffer* out; /*!< output node activation buffer see @ref ai_buffer */
|
||||
} ai_inspect_node_info;
|
||||
|
||||
/*!
|
||||
* @struct ai_inspect_net_report
|
||||
* @brief network inspection report context
|
||||
*/
|
||||
typedef struct ai_inspect_net_report_s {
|
||||
ai_u32 id; /*!< id of the report */
|
||||
ai_signature signature; /*!< network identification checksum */
|
||||
ai_u32 num_inferences; /*!< total number of inferences processed
|
||||
during the inspection */
|
||||
ai_u32 n_nodes; /*!< number of nodes in the network */
|
||||
ai_float elapsed_ms; /*!< network total time (in ms) for processing
|
||||
num_inferences inferences */
|
||||
ai_inspect_node_info* node; /*!< pointer to the array of size n_nodes where
|
||||
a single node report is reported. see @ref
|
||||
ai_inspect_node_info datastruct */
|
||||
} ai_inspect_net_report;
|
||||
|
||||
/*!
|
||||
* @enum net inspector inspection mode
|
||||
* @brief configuration flags to set net inspection mode
|
||||
*/
|
||||
typedef enum {
|
||||
VALIDATION_INSPECT = (0x1<<0), /**< Network validation inspection mode */
|
||||
STORE_ALL_IO_ACTIVATIONS = (0x1<<7), /**< Store all I/O activations on snapshot datastruct */
|
||||
} ai_inspect_mode;
|
||||
|
||||
typedef enum {
|
||||
AI_NODE_EXEC_PRE_FORWARD_STAGE = 0x0,
|
||||
AI_NODE_EXEC_POST_FORWARD_STAGE = 0x1,
|
||||
} ai_node_exec_stage;
|
||||
|
||||
/*!
|
||||
* @brief function pointer to callback report
|
||||
*/
|
||||
typedef void (*ai_inspect_report_cb_func)(
|
||||
const ai_handle cookie,
|
||||
const ai_inspect_net_report* report);
|
||||
|
||||
/*!
|
||||
* @brief function pointer to node execute
|
||||
*/
|
||||
typedef void (*ai_inspect_exec_node_cb_func)(
|
||||
const ai_handle cookie,
|
||||
const ai_inspect_node_info* node_info,
|
||||
const ai_node_exec_stage stage);
|
||||
|
||||
/*!
|
||||
* @struct ai_inspect_config
|
||||
* @brief inspection config datastruct
|
||||
*/
|
||||
typedef struct ai_inspect_config_s {
|
||||
ai_u8 validation_mode; /*!< validation mode flags
|
||||
see @ref ai_inspect_mode */
|
||||
ai_u8 log_level; /*!< log class level see @ref LOG_SUDO */
|
||||
ai_bool log_quiet; /*!< log class quiet mode */
|
||||
ai_inspect_report_cb_func on_report_destroy; /*!< callback function
|
||||
called when a report datastruct
|
||||
is released from memory */
|
||||
ai_inspect_exec_node_cb_func on_exec_node; /*!< callback function
|
||||
called when a node is executed (pre & post) */
|
||||
ai_handle cookie;
|
||||
} ai_inspect_config;
|
||||
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /* CORE_NET_INSPECT_INTERFACE_H */
|
||||
365
lib/stai/libstai/include/core_private.h
Normal file
365
lib/stai/libstai/include/core_private.h
Normal file
@ -0,0 +1,365 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file core_private.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @brief private header file of common private core private module defines
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2019 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef CORE_PRIVATE_H
|
||||
#define CORE_PRIVATE_H
|
||||
|
||||
#include "ai_datatypes_format.h"
|
||||
#include "ai_datatypes_internal.h"
|
||||
#include "ai_math_helpers.h"
|
||||
|
||||
#include "core_assert.h"
|
||||
#include "core_log.h"
|
||||
|
||||
/*!
|
||||
* @defgroup core_private Core Library Private macros and datatypes
|
||||
* @brief Common macros, datatypes and routines for core private rounites
|
||||
* @details This module contains the definitons and implementations of some
|
||||
* internal routines and datatypes that are supposed to not be exposed as
|
||||
* public headers. So usually this file should be include only on .c files or
|
||||
* headers that are private as well
|
||||
*/
|
||||
|
||||
/*** Foreground Colors ****************************************************/
|
||||
#define CORE_COLOR_BLACK "\x1b[30m"
|
||||
#define CORE_COLOR_RED "\x1b[31m"
|
||||
#define CORE_COLOR_GREEN "\x1b[32m"
|
||||
#define CORE_COLOR_YELLOW "\x1b[33m"
|
||||
#define CORE_COLOR_BLUE "\x1b[94m"
|
||||
#define CORE_COLOR_MAGENTA "\x1b[35m"
|
||||
#define CORE_COLOR_CYAN "\x1b[36m"
|
||||
#define CORE_COLOR_WHYTE "\x1b[37m"
|
||||
#define CORE_COLOR_DEFAULT "\x1b[39m"
|
||||
#define CORE_COLOR_LGRAY "\x1b[90m"
|
||||
#define CORE_COLOR_LRED "\x1b[91m"
|
||||
#define CORE_COLOR_LGREEN "\x1b[92m"
|
||||
#define CORE_COLOR_LYELLOW "\x1b[93m"
|
||||
#define CORE_COLOR_LBLUE "\x1b[94m"
|
||||
#define CORE_COLOR_LMAGENTA "\x1b[95m"
|
||||
#define CORE_COLOR_LCYAN "\x1b[96m"
|
||||
#define CORE_COLOR_LWHITE "\x1b[97m"
|
||||
|
||||
/*** Text Attributes Colors *********************************************/
|
||||
#define CORE_COLOR_OFF "\x1b[0m"
|
||||
#define CORE_COLOR_BOLD "\x1b[1m"
|
||||
#define CORE_COLOR_UNDERLINE "\x1b[4m"
|
||||
#define CORE_COLOR_BLINK "\x1b[5m"
|
||||
#define CORE_COLOR_BOLD_OFF "\x1b[21m"
|
||||
#define CORE_COLOR_UNDERLINE_OFF "\x1b[24m"
|
||||
#define CORE_COLOR_BLINK_OFF "\x1b[25m"
|
||||
|
||||
/*** Background Colors ****************************************************/
|
||||
#define CORE_COLOR_BG_BLACK "\x1b[40m"
|
||||
#define CORE_COLOR_BG_RED "\x1b[41m"
|
||||
#define CORE_COLOR_BG_GREEN "\x1b[42m"
|
||||
#define CORE_COLOR_BG_YELLOW "\x1b[43m"
|
||||
#define CORE_COLOR_BG_BLUE "\x1b[44m"
|
||||
#define CORE_COLOR_BG_MAGENTA "\x1b[45m"
|
||||
#define CORE_COLOR_BG_CYAN "\x1b[46m"
|
||||
#define CORE_COLOR_BG_WHITE "\x1b[47m"
|
||||
#define CORE_COLOR_BG_DEFAULT "\x1b[49m"
|
||||
#define CORE_COLOR_BG_LGRAY "\x1b[100m"
|
||||
#define CORE_COLOR_BG_LRED "\x1b[101m"
|
||||
#define CORE_COLOR_BG_LGREEN "\x1b[102m"
|
||||
#define CORE_COLOR_BG_LYELLOW "\x1b[103m"
|
||||
#define CORE_COLOR_BG_LBLUE "\x1b[104m"
|
||||
#define CORE_COLOR_BG_LMAGENTA "\x1b[105m"
|
||||
#define CORE_COLOR_BG_LCYAN "\x1b[106m"
|
||||
#define CORE_COLOR_BG_LWHITE "\x1b[107m"
|
||||
|
||||
|
||||
/*****************************************************************************/
|
||||
#define CORE_ADDRESS_RANGE_INIT(start_, end_) \
|
||||
core_address_range_init(start_, end_)
|
||||
|
||||
#define CORE_GET_BUFFER_META_INFO(meta_info_, tensor_ptr_) \
|
||||
core_get_buffer_meta_info(meta_info_, tensor_ptr_)
|
||||
|
||||
#define CORE_ADDRESS_RANGE_END(range_) \
|
||||
( (ai_ptr)(((range_)->start)+((range_)->size)) )
|
||||
|
||||
#define CORE_ADDRESS_RANGE_OVERLAP(overlap_) \
|
||||
( ((overlap_)->start) && (((overlap_)->size)>0) )
|
||||
|
||||
#define CORE_ADDRESS_RANGE_OVERLAP_PARTIAL(overlap_, ref_) \
|
||||
( ((overlap_)->start) && (((overlap_)->size)<((ref_)->size)) )
|
||||
|
||||
#define CORE_MEMORY_OVERLAP_INIT(partial_, range_, chain_id_, tensor_id_) { \
|
||||
.partial = (partial_), .range = AI_PACK(range_), \
|
||||
.chain_id = (chain_id_), .tensor_id = (tensor_id_) \
|
||||
}
|
||||
|
||||
#define CORE_OFFSET(offset_, max_) \
|
||||
((ai_i32)(((offset_)<0) ? AI_MAX((max_) - (offset_), 0) : AI_MIN(offset_, max_)))
|
||||
|
||||
/*****************************************************************************/
|
||||
/** Network Context Handlers **/
|
||||
/*****************************************************************************/
|
||||
|
||||
/*****************************************************************************/
|
||||
/** Network Tensors Handlers **/
|
||||
/*****************************************************************************/
|
||||
#define AI_TENSOR_HAS_INTQ_INFO \
|
||||
AI_BUFFER_META_HAS_INTQ_INFO
|
||||
|
||||
#define CORE_TENSOR_GET_SHAPE_SIZE(tensor_) \
|
||||
ai_shape_get_size(AI_TENSOR_SHAPE(tensor_))
|
||||
|
||||
|
||||
#define CORE_ASSERT_SHAPE_MATCH(x, y) \
|
||||
do { \
|
||||
AI_ASSERT(AI_SHAPE_H(y) == 1 || AI_SHAPE_H(x)==1 || AI_SHAPE_H(y)==AI_SHAPE_H(x)) \
|
||||
AI_ASSERT(AI_SHAPE_W(y) == 1 || AI_SHAPE_W(x)==1 || AI_SHAPE_W(y)==AI_SHAPE_W(x)) \
|
||||
AI_ASSERT(AI_SHAPE_D(y) == 1 || AI_SHAPE_D(x)==1 || AI_SHAPE_D(y)==AI_SHAPE_D(x)) \
|
||||
AI_ASSERT(AI_SHAPE_E(y) == 1 || AI_SHAPE_E(x)==1 || AI_SHAPE_E(y)==AI_SHAPE_E(x)) \
|
||||
AI_ASSERT(AI_SHAPE_CH(y) == 1 || AI_SHAPE_CH(x)==1|| AI_SHAPE_CH(y)==AI_SHAPE_CH(x)) \
|
||||
AI_ASSERT(AI_SHAPE_IN_CH(y) == 1 || AI_SHAPE_IN_CH(x)==1|| AI_SHAPE_IN_CH(y)==AI_SHAPE_IN_CH(x)) \
|
||||
} while(0);
|
||||
|
||||
|
||||
#define AI_TENSOR_ARRAY_BYTE_SIZE(t_) \
|
||||
AI_ARRAY_OBJ_BYTE_SIZE(AI_ARRAY_OBJ(t_->data))
|
||||
|
||||
#define AI_TENSOR_ARRAY_GET_DATA_ADDR(t_) \
|
||||
AI_HANDLE_PTR(AI_ARRAY_OBJ_DATA_START(t_->data, void))
|
||||
|
||||
#define AI_TENSOR_ARRAY_UPDATE_DATA_ADDR(t_, addr_) \
|
||||
{ ai_array *arr_ = AI_ARRAY_OBJ(t_->data); \
|
||||
const uintptr_t off_ = (uintptr_t)arr_->data - (uintptr_t)arr_->data_start; \
|
||||
arr_->data_start = AI_PTR(addr_); \
|
||||
arr_->data = AI_PTR((uintptr_t)addr_ + off_); \
|
||||
}
|
||||
|
||||
#define AI_TENSOR_INTEGER_GET_SIZE(t_) \
|
||||
((t_->klass) ? (AI_KLASS_GET_INTQ_INFO_LIST(t_))->size : 0)
|
||||
|
||||
#define AI_TENSOR_INTEGER_GET_SCALE(t_, idx_) \
|
||||
AI_INTQ_INFO_LIST_SCALE(AI_KLASS_GET_INTQ_INFO_LIST(t_), ai_float, idx_)
|
||||
|
||||
#define AI_TENSOR_INTEGER_GET_ZEROPOINT_I8(t_, idx_) \
|
||||
AI_INTQ_INFO_LIST_ZEROPOINT(AI_KLASS_GET_INTQ_INFO_LIST(t_), ai_i8, idx_)
|
||||
|
||||
#define AI_TENSOR_INTEGER_GET_ZEROPOINT_U8(t_, idx_) \
|
||||
AI_INTQ_INFO_LIST_ZEROPOINT(AI_KLASS_GET_INTQ_INFO_LIST(t_), ai_u8, idx_)
|
||||
|
||||
#define AI_TENSOR_FMT_GET_SIGN(t_) \
|
||||
AI_BUFFER_FMT_GET_SIGN(AI_ARRAY_OBJ(t_->data)->format)
|
||||
|
||||
#define AI_TENSOR_FMT_GET_BITS(t_) \
|
||||
AI_BUFFER_FMT_GET_BITS(AI_ARRAY_OBJ(t_->data)->format)
|
||||
|
||||
#define AI_TENSOR_FMT_GET_FBITS(t_) \
|
||||
AI_BUFFER_FMT_GET_FBITS(AI_ARRAY_OBJ(t_->data)->format)
|
||||
|
||||
#define AI_TENSOR_FMT_GET_TYPE(t_) \
|
||||
AI_BUFFER_FMT_GET_TYPE(AI_ARRAY_OBJ(t_->data)->format)
|
||||
|
||||
#define AI_TENSOR_GET_FMT(t_) \
|
||||
AI_FMT_OBJ(AI_ARRAY_OBJ(t_->data)->format)
|
||||
|
||||
|
||||
/*****************************************************************************/
|
||||
/** Network Buffers Handlers **/
|
||||
/*****************************************************************************/
|
||||
#define AI_FOR_EACH_BUFFER_ARRAY_ITEM(buffer_ptr_, buffer_array_ptr_, start_pos_, end_pos_) \
|
||||
ai_buffer* buffer_ptr_ = AI_BUFFER_ARRAY_ITEM(buffer_array_ptr_, \
|
||||
CORE_OFFSET(end_pos_, AI_BUFFER_ARRAY_SIZE(buffer_array_ptr_))); \
|
||||
for ( ; buffer_ptr_ && AI_BUFFER_ARRAY_SIZE(buffer_array_ptr_) && \
|
||||
(buffer_ptr_>=AI_BUFFER_ARRAY_ITEM(buffer_array_ptr_, \
|
||||
CORE_OFFSET(start_pos_, AI_BUFFER_ARRAY_SIZE(buffer_array_ptr_)))); buffer_ptr_--)
|
||||
|
||||
/*****************************************************************************/
|
||||
/** Network Arrays Handlers **/
|
||||
/*****************************************************************************/
|
||||
#define AI_ARRAY_OBJ_FMT(array_) \
|
||||
AI_FMT_OBJ(AI_ARRAY_OBJ(array_)->format)
|
||||
|
||||
#define AI_ARRAY_OBJ_FMT_GET(array_) \
|
||||
AI_FMT_GET(AI_ARRAY_OBJ_FMT(array_))
|
||||
|
||||
#define AI_ARRAY_OBJ_SIZE(array_) \
|
||||
(AI_ARRAY_OBJ(array_)->size)
|
||||
|
||||
#define AI_ARRAY_OBJ_BYTE_SIZE(array_) \
|
||||
AI_SIZE(AI_ARRAY_GET_BYTE_SIZE(AI_ARRAY_OBJ_FMT(array_), \
|
||||
AI_ARRAY_OBJ_SIZE(array_)))
|
||||
|
||||
#define AI_ARRAY_OBJ_DATA_SIZE(array_) \
|
||||
AI_ARRAY_GET_DATA_BYTE_SIZE(AI_ARRAY_OBJ_FMT(array_), \
|
||||
AI_ARRAY_OBJ_SIZE(array_))
|
||||
|
||||
#define AI_ARRAY_OBJ_DATA(array_, type_) \
|
||||
AI_CAST(type_*, AI_ARRAY_OBJ(array_)->data)
|
||||
|
||||
#define AI_ARRAY_OBJ_DATA_START(array_, type_) \
|
||||
AI_CAST(type_*, AI_ARRAY_OBJ(array_)->data_start)
|
||||
|
||||
#define AI_ARRAY_OBJ_ELEM(array_, type_, pos_) \
|
||||
AI_ARRAY_OBJ_DATA(array_, type_)[(pos_)]
|
||||
|
||||
|
||||
/*****************************************************************************/
|
||||
/** Network Tensors Chains / Lists Handlers **/
|
||||
/*****************************************************************************/
|
||||
#define SET_TENSOR_IN(chain_, pos_) \
|
||||
(GET_TENSOR_LIST_IN(chain_)->tensor[(pos_)])
|
||||
|
||||
#define SET_TENSOR_OUT(chain_, pos_) \
|
||||
(GET_TENSOR_LIST_OUT(chain_)->tensor[(pos_)])
|
||||
|
||||
#define AI_NODE_IO_GET(node_, in_, out_) \
|
||||
ASSERT_NODE_SANITY(node_) \
|
||||
ai_tensor* in_ = GET_TENSOR_IN((node_)->tensors, 0); \
|
||||
ai_tensor* out_ = GET_TENSOR_OUT((node_)->tensors, 0); \
|
||||
ASSERT_TENSOR_SANITY(in_) \
|
||||
ASSERT_TENSOR_SANITY(out_)
|
||||
|
||||
|
||||
/*****************************************************************************/
|
||||
#define AI_BITS_TO_BYTES(bits_) \
|
||||
(((bits_)+0x7) >> 3)
|
||||
|
||||
#define AI_BYTES_TO_BITS(bytes_) \
|
||||
((bytes_) << 3)
|
||||
|
||||
|
||||
/*****************************************************************************/
|
||||
/** Network Nodes Handlers **/
|
||||
/*****************************************************************************/
|
||||
#define AI_NODE_IS_FIRST(node) \
|
||||
(AI_NODE_OBJ(node)==AI_NODE_OBJ(AI_NODE_OBJ(node)->network->input_node))
|
||||
|
||||
#define AI_NODE_IS_LAST(node_) \
|
||||
((AI_NODE_OBJ(node_)==AI_NODE_OBJ(node_)->next) || \
|
||||
(AI_NODE_OBJ(node_)->next==NULL))
|
||||
|
||||
#define AI_FOR_EACH_NODE_DO(node_, nodes_) \
|
||||
for (ai_node* node_ = AI_NODE_OBJ(nodes_); (node_); \
|
||||
node_ = ((AI_NODE_IS_LAST(node_)) ? NULL : (node_)->next))
|
||||
|
||||
/*****************************************************************************/
|
||||
typedef struct {
|
||||
ai_ptr start;
|
||||
ai_size size;
|
||||
} ai_address_range;
|
||||
|
||||
typedef struct {
|
||||
ai_address_range range;
|
||||
ai_u16 chain_id;
|
||||
ai_u16 tensor_id;
|
||||
ai_bool partial;
|
||||
} ai_memory_overlap;
|
||||
|
||||
/*****************************************************************************/
|
||||
AI_DECLARE_STATIC
|
||||
ai_address_range core_address_range_init(
|
||||
const ai_handle start, const ai_handle end)
|
||||
{
|
||||
ai_address_range r;
|
||||
|
||||
r.start = (ai_ptr)((start<end) ? start : end);
|
||||
r.size = (ai_size) ((start<end)
|
||||
? ((ai_uptr)end-(ai_uptr)start) : ((ai_uptr)start-(ai_uptr)end));
|
||||
return r;
|
||||
}
|
||||
|
||||
AI_DECLARE_STATIC
|
||||
ai_buffer_meta_info* core_get_buffer_meta_info(
|
||||
ai_buffer_meta_info* meta,
|
||||
const ai_tensor* t)
|
||||
{
|
||||
if (!meta) return NULL;
|
||||
AI_ASSERT(t && t->data)
|
||||
ai_bool ok;
|
||||
|
||||
meta->flags = 0x0;
|
||||
meta->intq_info = AI_KLASS_GET_INTQ_INFO_LIST(t);
|
||||
ok = (meta->intq_info && (meta->intq_info->size>0));
|
||||
meta->flags |= (ok) ? AI_BUFFER_META_HAS_INTQ_INFO : 0x0;
|
||||
return (ok) ? meta : NULL;
|
||||
}
|
||||
|
||||
|
||||
#if 0
|
||||
#include <stdio.h>
|
||||
#include <stdarg.h>
|
||||
|
||||
AI_DECLARE_STATIC
|
||||
void _dump_file_print(
|
||||
const char* fname, const char* fmt, ...)
|
||||
{
|
||||
static FILE* fp = NULL;
|
||||
if (fname) {
|
||||
if (!fp) {
|
||||
fp = fopen(fname, "a");
|
||||
}
|
||||
}
|
||||
|
||||
if (fp) {
|
||||
va_list args;
|
||||
va_start(args, fmt);
|
||||
vfprintf(fp, fmt, args);
|
||||
va_end(args);
|
||||
fflush(fp);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
AI_DECLARE_STATIC
|
||||
void _dump_bytearray(
|
||||
const char* fname,
|
||||
const ai_handle src, const ai_size src_size, const ai_u8 src_id,
|
||||
const char* name)
|
||||
{
|
||||
static FILE* fp = NULL;
|
||||
if (fname && src && (src_size>0)) {
|
||||
if (!fp) {
|
||||
fp = fopen(fname, "a");
|
||||
}
|
||||
}
|
||||
|
||||
if (fp) {
|
||||
switch (src_id) {
|
||||
case 1:
|
||||
{
|
||||
const ai_float* src_value = (const ai_float*)src;
|
||||
fprintf(fp, "ai_float %s[%u] = {%f", name, src_size, src_value[0]);
|
||||
for (ai_size i=1; i<src_size; i++) { fprintf(fp, ", %f", src_value[i]); }
|
||||
} break;
|
||||
case 2:
|
||||
{
|
||||
const ai_i8* src_value = (const ai_i8*)src;
|
||||
fprintf(fp, "ai_i8 %s[%u] = {%d", name, src_size, src_value[0]);
|
||||
for (ai_size i=1; i<src_size; i++) { fprintf(fp, ", %d", src_value[i]); }
|
||||
} break;
|
||||
case 3:
|
||||
{
|
||||
const ai_u8* src_value = (const ai_u8*)src;
|
||||
fprintf(fp, "ai_u8 %s[%u] = {%u", name, src_size, src_value[0]);
|
||||
for (ai_size i=1; i<src_size; i++) { fprintf(fp, ", %u", src_value[i]); }
|
||||
} break;
|
||||
default:
|
||||
fprintf(fp, "format not supported: %u {", src_id);
|
||||
break;
|
||||
}
|
||||
fprintf(fp, "};\n");
|
||||
fflush(fp);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* CORE_PRIVATE_H */
|
||||
53
lib/stai/libstai/include/datatypes_network.h
Normal file
53
lib/stai/libstai/include/datatypes_network.h
Normal file
@ -0,0 +1,53 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file datatypes_network.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @brief Definitions of code generated network types
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2017 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef DATATYPES_NETWORK_H
|
||||
#define DATATYPES_NETWORK_H
|
||||
|
||||
/*
|
||||
* Header to be overriden by the generated version
|
||||
* by including with <> the include directories are searched in the order
|
||||
* specified in the compiler
|
||||
* To enable the override, put the generated path before the API path
|
||||
*/
|
||||
|
||||
#include "ai_platform.h"
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
#ifdef AI_OVERRIDE_CUSTOM_TYPES
|
||||
#warning "Warning: Custom Types have been already defined!\n"
|
||||
#endif
|
||||
|
||||
#define AI_CUSTOM_TYPES_COUNT (3)
|
||||
|
||||
#define AI_CUSTOM_TYPES_SIGNATURE_DECLARE(name) \
|
||||
const ai_custom_type_signature name[AI_CUSTOM_TYPES_COUNT+1] = { \
|
||||
AI_CUSTOM_TYPES_COUNT, \
|
||||
AI_CUSTOM_SIZE(ai_shape_dimension), \
|
||||
AI_CUSTOM_SIZE(ai_stride_dimension), \
|
||||
AI_CUSTOM_SIZE(ai_array_size), \
|
||||
};
|
||||
|
||||
|
||||
typedef ai_i32 ai_stride_dimension;
|
||||
typedef ai_u32 ai_array_size;
|
||||
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /* DATATYPES_NETWORK_H */
|
||||
40
lib/stai/libstai/include/ec.h
Normal file
40
lib/stai/libstai/include/ec.h
Normal file
@ -0,0 +1,40 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file ec.h
|
||||
* @author SRA Artificial Intelligence & Embedded Architectures
|
||||
* @brief Header file of Epoch Controller Blobs.
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2024 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef __EC_H
|
||||
#define __EC_H
|
||||
|
||||
#include <inttypes.h>
|
||||
|
||||
/** Magic number of the Epoch Controller binary file. */
|
||||
#define ECASM_BINARY_MAGIC 0xECBF0020
|
||||
|
||||
/** Magic number of the Epoch Controller program. */
|
||||
#define ECASM_PROGRAM_MAGIC 0xCA057A7A
|
||||
|
||||
/** Type containing an Epoch Controller instruction. */
|
||||
typedef uint32_t ECInstr;
|
||||
|
||||
/** Type containing an address of an Epoch Controller instruction. */
|
||||
typedef uint32_t ECAddr;
|
||||
|
||||
/** Type used for each entry of the Epoch Controller binary file: magic number, number of elements, file and instruction
|
||||
* offsets. */
|
||||
typedef uint32_t ECFileEntry;
|
||||
|
||||
#endif // #ifndef __EC_H
|
||||
69
lib/stai/libstai/include/ecloader.h
Normal file
69
lib/stai/libstai/include/ecloader.h
Normal file
@ -0,0 +1,69 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file ecloader.h
|
||||
* @author SRA Artificial Intelligence & Embedded Architectures
|
||||
* @brief Header file of Epoch Controller Blobs Loader.
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2024 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef __ECLOADER_H
|
||||
#define __ECLOADER_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif
|
||||
|
||||
#include <inttypes.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
#include "ec.h"
|
||||
|
||||
#ifdef USE_FILES
|
||||
|
||||
// return the size of a file.
|
||||
extern long ec_file_size(const char *path);
|
||||
|
||||
// copy a file to memory
|
||||
extern bool ec_copy_file(const char *path, uint8_t *ptr);
|
||||
|
||||
#endif /* #ifdef USE_FILES */
|
||||
|
||||
// copy to memory the Epoch Controller program contained in an Epoch Controller binary
|
||||
extern bool ec_copy_program(const uint8_t *file_ptr, ECInstr *program, unsigned int *program_size);
|
||||
|
||||
// copy to memory the relocation table contained in an Epoch Controller binary
|
||||
extern bool ec_copy_reloc_table(const uint8_t *file_ptr, ECFileEntry *reloc_table, unsigned int *reloc_table_size);
|
||||
|
||||
// get the pointer to the relocation table contained in an Epoch Controller binary
|
||||
extern const ECFileEntry *ec_get_reloc_table_ptr(const uint8_t *file_ptr);
|
||||
|
||||
// return the number of different relocations contained in an Epoch Controller binary
|
||||
extern unsigned int ec_get_num_relocs(const ECFileEntry *reloc_table_ptr);
|
||||
|
||||
// return the identifier of a relocation contained in an Epoch Controller binary
|
||||
extern const char *ec_get_reloc_id(const ECFileEntry *reloc_table_ptr, unsigned int idx);
|
||||
|
||||
// relocate all the values associated with a relocation specified by using an index
|
||||
extern bool ec_reloc(const ECFileEntry *reloc_table_ptr, ECInstr *program, unsigned int idx, ECAddr base,
|
||||
ECAddr *prev_base);
|
||||
|
||||
// relocate all the values associated with a relocation specified by using an identifier
|
||||
extern bool ec_reloc_by_id(const ECFileEntry *reloc_table_ptr, ECInstr *program, const char *id, ECAddr base,
|
||||
ECAddr *prev_base);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // #ifndef __ECLOADER_H
|
||||
94
lib/stai/libstai/include/formats_list.h
Normal file
94
lib/stai/libstai/include/formats_list.h
Normal file
@ -0,0 +1,94 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file format_list.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @brief Definitions of AI platform public APIs types
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2019 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
/* FMT_ENTRY( exp_(0/1 only), name_, type_id_,
|
||||
* sign_bit_, complex_bit_, pmask_, bits_, fbits_, ldiv_bits_)
|
||||
* Specifications (in order of the bit fields, little endian):
|
||||
- name_ : it is the enum used to define both the ai_array_format and
|
||||
ai_buffer_format.
|
||||
- exp_ (1bit) : it is a boolean flag (0 or 1) indicating whether the format
|
||||
is available as a public APIs ai_buffer format. in this case the field
|
||||
exp_name_ indicates the enum name of the ai_buffer format
|
||||
- (7 bits): reserved for flags
|
||||
- sign_bit_ (1bit) : codes whether or not the format is of a signed type
|
||||
- complex_bit_ (1bit) : codes if the format is of a complex type
|
||||
- ldiv_bits (2 bits) : right shift value for computing the byte size of the
|
||||
format
|
||||
- type_id_ (4bits) : it is used to define the "family" of the format:
|
||||
see @ref AI_FMT_Q as an example. Currently supported types are:
|
||||
AI_FMT_Q (fixed point types), AI_FMT_FLOAT (floating point values),
|
||||
AI_FMT_LUT_FLOAT or AI_FMT_LUT_Q (compressed formats)
|
||||
- pmask_ (3bits) : padding mask bits for the format
|
||||
- bits_ (7bits) : size in bits of the format (NB: integer+fractional bits)
|
||||
- fbits_ (7bits) : number of fractional bits for the format
|
||||
(for AI_FMT_Q only)
|
||||
|
||||
*/
|
||||
|
||||
/* Format none entry */
|
||||
FMT_ENTRY(1, NONE, AI_FMT_NONE, 0, 0, 0x0, 0, 0, 0)
|
||||
|
||||
/* Floating point formats */
|
||||
FMT_ENTRY(1, FLOAT, AI_FMT_FLOAT, 1, 0, 0x0, 32, 0, 0)
|
||||
FMT_ENTRY(0, FLOAT64, AI_FMT_FLOAT, 1, 0, 0x0, 64, 0, 0)
|
||||
FMT_ENTRY(0, FLOAT16, AI_FMT_FLOAT, 1, 0, 0x0, 16, 0, 0)
|
||||
|
||||
/* Integer formats (i.e. fractional bits = 0!) */
|
||||
FMT_ENTRY(1, U8, AI_FMT_Q, 0, 0, 0x0, 8, 0, 0)
|
||||
FMT_ENTRY(1, U16, AI_FMT_Q, 0, 0, 0x0, 16, 0, 0)
|
||||
FMT_ENTRY(1, U32, AI_FMT_Q, 0, 0, 0x0, 32, 0, 0)
|
||||
FMT_ENTRY(0, U64, AI_FMT_Q, 0, 0, 0x0, 64, 0, 0)
|
||||
FMT_ENTRY(1, U1, AI_FMT_Q, 0, 0, 0x0, 1, 0, 0)
|
||||
FMT_ENTRY(0, U4, AI_FMT_Q, 0, 0, 0x0, 4, 0, 0)
|
||||
|
||||
FMT_ENTRY(1, S8, AI_FMT_Q, 1, 0, 0x0, 8, 0, 0)
|
||||
FMT_ENTRY(1, S16, AI_FMT_Q, 1, 0, 0x0, 16, 0, 0)
|
||||
FMT_ENTRY(1, S32, AI_FMT_Q, 1, 0, 0x0, 32, 0, 0)
|
||||
FMT_ENTRY(0, S64, AI_FMT_Q, 1, 0, 0x0, 64, 0, 0)
|
||||
FMT_ENTRY(1, S1, AI_FMT_Q, 1, 0, 0x0, 1, 0, 0)
|
||||
FMT_ENTRY(0, S4, AI_FMT_Q, 1, 0, 0x0, 4, 0, 0)
|
||||
|
||||
/* Fixed-point formats including ARM CMSIS Q7, Q15, Q31 ones */
|
||||
FMT_ENTRY(1, Q, AI_FMT_Q, 1, 0, 0x0, 0, 0, 0)
|
||||
FMT_ENTRY(1, Q7, AI_FMT_Q, 1, 0, 0x0, 8, 7, 0)
|
||||
FMT_ENTRY(1, Q15, AI_FMT_Q, 1, 0, 0x0, 16, 15, 0)
|
||||
FMT_ENTRY(0, Q31, AI_FMT_Q, 1, 0, 0x0, 32, 31, 0)
|
||||
|
||||
FMT_ENTRY(1, UQ, AI_FMT_Q, 0, 0, 0x0, 0, 0, 0)
|
||||
FMT_ENTRY(1, UQ7, AI_FMT_Q, 0, 0, 0x0, 8, 7, 0)
|
||||
FMT_ENTRY(1, UQ15, AI_FMT_Q, 0, 0, 0x0, 16, 15, 0)
|
||||
FMT_ENTRY(0, UQ31, AI_FMT_Q, 0, 0, 0x0, 32, 31, 0)
|
||||
|
||||
/* Compressed formats */
|
||||
FMT_ENTRY(0, LUT4_FLOAT, AI_FMT_LUT_FLOAT, 1, 0, 0x0, 32, 0, 3)
|
||||
FMT_ENTRY(0, LUT8_FLOAT, AI_FMT_LUT_FLOAT, 1, 0, 0x0, 32, 0, 2)
|
||||
FMT_ENTRY(0, LUT4_Q15, AI_FMT_LUT_Q, 1, 0, 0x0, 16, 15, 2)
|
||||
FMT_ENTRY(0, LUT8_Q15, AI_FMT_LUT_Q, 1, 0, 0x0, 16, 15, 1)
|
||||
FMT_ENTRY(0, LUT4_UQ15, AI_FMT_LUT_Q, 0, 0, 0x0, 16, 15, 2)
|
||||
FMT_ENTRY(0, LUT8_UQ15, AI_FMT_LUT_Q, 0, 0, 0x0, 16, 15, 1)
|
||||
|
||||
/* Boolean format */
|
||||
FMT_ENTRY(1, BOOL, AI_FMT_BOOL, 0, 0, 0x0, 8, 0, 0)
|
||||
|
||||
/* Complex formats */
|
||||
FMT_ENTRY(0, COMPLEX_FLOAT64, AI_FMT_FLOAT, 1, 1, 0x0, 64, 0, 0)
|
||||
FMT_ENTRY(0, COMPLEX_S64, AI_FMT_Q, 1, 1, 0x0, 64, 0, 0)
|
||||
FMT_ENTRY(0, COMPLEX_S32, AI_FMT_Q, 1, 1, 0x0, 32, 0, 0)
|
||||
FMT_ENTRY(0, COMPLEX_S16, AI_FMT_Q, 1, 1, 0x0, 16, 0, 0)
|
||||
|
||||
#undef FMT_ENTRY
|
||||
76
lib/stai/libstai/include/layers.h
Normal file
76
lib/stai/libstai/include/layers.h
Normal file
@ -0,0 +1,76 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers.h
|
||||
* @author STMicroelectronics
|
||||
* @brief header file of AI platform layers datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2017 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef LAYERS_H
|
||||
#define LAYERS_H
|
||||
|
||||
#include "layers_common.h"
|
||||
#include "layers_conv2d.h"
|
||||
#include "layers_custom.h"
|
||||
#include "layers_dense.h"
|
||||
#include "layers_formats_converters.h"
|
||||
#include "layers_generic.h"
|
||||
#include "layers_lite_graph.h"
|
||||
#include "layers_nl.h"
|
||||
#include "layers_norm.h"
|
||||
#include "layers_pad_dqnn.h"
|
||||
#include "layers_pad_generic.h"
|
||||
#include "layers_pool.h"
|
||||
#include "layers_rnn.h"
|
||||
#include "layers_sm.h"
|
||||
#include "layers_ml.h"
|
||||
#include "layers_ml_iforest.h"
|
||||
#include "layers_ml_svc.h"
|
||||
#include "layers_ml.h"
|
||||
#include "layers_ml_linearclassifier.h"
|
||||
#include "layers_ml_treeensembleclassifier.h"
|
||||
#include "layers_ml_treeensembleregressor.h"
|
||||
#include "layers_ml_svmregressor.h"
|
||||
|
||||
#include "layers_conv2d_dqnn.h"
|
||||
#include "layers_dense_dqnn.h"
|
||||
#include "layers_pool_dqnn.h"
|
||||
#include "layers_generic_dqnn.h"
|
||||
#include "layers_upsample_generic.h"
|
||||
#include "layers_upsample.h"
|
||||
#include "layers_resize.h"
|
||||
#include "layers_argminmax.h"
|
||||
#include "layers_wrappers.h"
|
||||
#include "ai_math_helpers.h"
|
||||
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @struct ai_any_layer_ptr
|
||||
* @ingroup layers
|
||||
* @brief Generic union for typed layers pointers
|
||||
*/
|
||||
typedef struct {
|
||||
ai_layer_type type; /*!< layer type id (see @ref ai_layer_type) */
|
||||
union {
|
||||
#define LAYER_ENTRY(type_, id_, struct_, forward_func_, init_func_, destroy_func_) \
|
||||
AI_CONCAT(ai_layer_, struct_)* struct_;
|
||||
#include "layers_list.h"
|
||||
};
|
||||
} ai_any_layer_ptr;
|
||||
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*LAYERS_H*/
|
||||
49
lib/stai/libstai/include/layers_argminmax.h
Normal file
49
lib/stai/libstai/include/layers_argminmax.h
Normal file
@ -0,0 +1,49 @@
|
||||
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_arminmax.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform generic layers datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2023 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LAYERS_ARGMINMAX_H
|
||||
#define LAYERS_ARGMINMAX_H
|
||||
|
||||
#include "layers_generic.h"
|
||||
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/******************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/******************************************************************************/
|
||||
|
||||
/*!
|
||||
* @brief Compute the indices of the max elements of the input tensor's element along the provided axis.
|
||||
* @ingroup layers_generic
|
||||
* @param layer argminmax layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_argmax_is8(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Compute the indices of the max elements of the input tensor's element along the provided axis.
|
||||
* @ingroup layers_generic
|
||||
* @param layer argminmax layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_argmin_is8(ai_layer* layer);
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*LAYERS_ARGMINMAX_H*/
|
||||
290
lib/stai/libstai/include/layers_common.h
Normal file
290
lib/stai/libstai/include/layers_common.h
Normal file
@ -0,0 +1,290 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_common.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @brief header file of AI platform layers datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2017 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LAYERS_COMMON_H
|
||||
#define LAYERS_COMMON_H
|
||||
|
||||
#ifdef USE_CYCLE_MEASUREMENTS
|
||||
#include "layers_cycles_estimation.h"
|
||||
#endif
|
||||
#include "ai_platform.h"
|
||||
#include "ai_common_config.h"
|
||||
|
||||
#include "core_common.h"
|
||||
|
||||
/* optimizations */
|
||||
#define AI_OPTIM_FUNC_MP_ARRAY_F32 (0)
|
||||
|
||||
|
||||
#define AI_LAYER_OBJ(obj_) \
|
||||
((ai_layer_base*)(obj_))
|
||||
|
||||
#define AI_LAYER_FUNC(func_) \
|
||||
((layer_func)(func_))
|
||||
|
||||
#define AI_LAYER_TYPE(type_) \
|
||||
( (ai_layer_type)((ai_u32)(type_)&0xFFFF) )
|
||||
|
||||
#define AI_LAYER_TYPE_ENTRY(type_) \
|
||||
AI_CONCAT(AI_CONCAT(AI_LAYER_, type_), _TYPE)
|
||||
|
||||
#define AI_LAYER_TYPE_NAME(type_) \
|
||||
ai_layer_type_name(AI_LAYER_TYPE(type_))
|
||||
|
||||
|
||||
#if (AI_TOOLS_API_VERSION <= AI_TOOLS_API_VERSION_1_3)
|
||||
#pragma message ("Including deprecated AI_LAYER_OBJ_INIT, AI_LAYER_OBJ_DECLARE")
|
||||
|
||||
AI_DEPRECATED
|
||||
#define AI_LAYER_OBJ_INIT(type_, id_, network_, \
|
||||
next_, forward_, ...) \
|
||||
{ \
|
||||
AI_NODE_COMMON_INIT(AI_CONCAT(AI_LAYER_, type_), id_, 0x0, \
|
||||
NULL, network_, next_, forward_), \
|
||||
## __VA_ARGS__ \
|
||||
}
|
||||
|
||||
AI_DEPRECATED
|
||||
#define AI_LAYER_OBJ_DECLARE(varname_, id_, type_, struct_, forward_func_, \
|
||||
network_, next_, attr_, ...) \
|
||||
AI_ALIGNED(4) \
|
||||
attr_ AI_CONCAT(ai_layer_, struct_) varname_ = \
|
||||
AI_LAYER_OBJ_INIT(type_, id_, network_, \
|
||||
next_, forward_func_, \
|
||||
## __VA_ARGS__);
|
||||
|
||||
#else
|
||||
|
||||
#define AI_LAYER_OBJ_INIT(type_, id_, flags_, klass_, network_, \
|
||||
next_, forward_, tensors_, ...) \
|
||||
{ \
|
||||
AI_NODE_COMMON_INIT(AI_CONCAT(AI_LAYER_, type_), id_, flags_, \
|
||||
klass_, network_, next_, forward_), \
|
||||
.tensors = (tensors_), \
|
||||
## __VA_ARGS__ \
|
||||
}
|
||||
|
||||
#define AI_LAYER_OBJ_DECLARE( \
|
||||
varname_, id_, \
|
||||
type_, flags_, klass_obj_, \
|
||||
struct_, forward_func_, \
|
||||
tensors_chain_, \
|
||||
network_, next_, attr_, ...) \
|
||||
AI_ALIGNED(4) \
|
||||
attr_ AI_CONCAT(ai_layer_, struct_) varname_ = \
|
||||
AI_LAYER_OBJ_INIT(type_, id_, flags_, klass_obj_, network_, \
|
||||
next_, forward_func_, tensors_chain_, ## __VA_ARGS__);
|
||||
|
||||
#endif /* AI_TOOLS_API_VERSION_1_3 */
|
||||
|
||||
#ifdef HAS_AI_ASSERT
|
||||
#define AI_LAYER_IO_GET(layer_, in_, out_) \
|
||||
ASSERT_LAYER_SANITY(layer_) \
|
||||
const ai_tensor* in_ = GET_TENSOR_IN((layer_)->tensors, 0); \
|
||||
ai_tensor* out_ = GET_TENSOR_OUT((layer_)->tensors, 0); \
|
||||
ASSERT_TENSOR_DATA_SANITY(in_) \
|
||||
ASSERT_TENSOR_DATA_SANITY(out_)
|
||||
|
||||
#define AI_LAYER_TENSOR_LIST_IO_GET(layer_, tlist_in_, tlist_out_) \
|
||||
ASSERT_LAYER_SANITY(layer_) \
|
||||
const ai_tensor_list* tlist_in_ = GET_TENSOR_LIST_IN((layer_)->tensors); \
|
||||
ai_tensor_list* tlist_out_ = GET_TENSOR_LIST_OUT((layer_)->tensors); \
|
||||
ASSERT_TENSOR_LIST_SANITY(tlist_in_) \
|
||||
ASSERT_TENSOR_LIST_SANITY(tlist_out_)
|
||||
|
||||
#define AI_LAYER_WEIGHTS_GET(layer_, weights_, bias_) \
|
||||
const ai_tensor* weights_ = GET_TENSOR_WEIGHTS((layer_)->tensors, 0); \
|
||||
const ai_tensor* bias_ = (GET_TENSOR_LIST_SIZE(GET_TENSOR_LIST_WEIGTHS((layer_)->tensors))>1) \
|
||||
? GET_TENSOR_WEIGHTS((layer_)->tensors, 1) \
|
||||
: NULL; \
|
||||
ASSERT_TENSOR_DATA_SANITY(weights_) \
|
||||
if (bias_) { ASSERT_TENSOR_DATA_SANITY(bias_) }
|
||||
#else
|
||||
#define AI_LAYER_IO_GET(layer_, in_, out_) \
|
||||
const ai_tensor* in_ = GET_TENSOR_IN((layer_)->tensors, 0); \
|
||||
ai_tensor* out_ = GET_TENSOR_OUT((layer_)->tensors, 0);
|
||||
|
||||
#define AI_LAYER_TENSOR_LIST_IO_GET(layer_, tlist_in_, tlist_out_) \
|
||||
const ai_tensor_list* tlist_in_ = GET_TENSOR_LIST_IN((layer_)->tensors); \
|
||||
ai_tensor_list* tlist_out_ = GET_TENSOR_LIST_OUT((layer_)->tensors);
|
||||
|
||||
#define AI_LAYER_WEIGHTS_GET(layer_, weights_, bias_) \
|
||||
const ai_tensor* weights_ = GET_TENSOR_WEIGHTS((layer_)->tensors, 0); \
|
||||
const ai_tensor* bias_ = (GET_TENSOR_LIST_SIZE(GET_TENSOR_LIST_WEIGTHS((layer_)->tensors))>1) \
|
||||
? GET_TENSOR_WEIGHTS((layer_)->tensors, 1) \
|
||||
: NULL; \
|
||||
|
||||
#endif /*HAS_AI_ASSERT*/
|
||||
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @defgroup layers_common Layers Common
|
||||
* @brief Implementation of the common layers datastructures
|
||||
* This header enumerates the layers specific definition implemented in the
|
||||
* library toghether with the macros and datatypes used to manipulate them.
|
||||
*/
|
||||
|
||||
/*!
|
||||
* @typedef (*func_copy_tensor)
|
||||
* @ingroup layers_common
|
||||
* @brief Fuction pointer for generic tensor copy routines
|
||||
* this function pointer abstracts a generic tensor copy routine.
|
||||
*/
|
||||
typedef ai_bool (*func_copy_tensor)(ai_tensor* dst, const ai_tensor* src);
|
||||
|
||||
/*!
|
||||
* @enum ai_layer_type
|
||||
* @ingroup layers_common
|
||||
* @brief ai_tools supported layers type id
|
||||
*/
|
||||
typedef enum {
|
||||
#define LAYER_ENTRY(type_, id_, struct_, forward_func_, init_func_, destroy_func_) \
|
||||
AI_LAYER_TYPE_ENTRY(type_) = id_,
|
||||
#include "layers_list.h"
|
||||
} ai_layer_type;
|
||||
|
||||
#define AI_LAYER_COMMON_FIELDS_DECLARE \
|
||||
AI_NODE_COMMON_FIELDS_DECLARE
|
||||
|
||||
#define AI_LAYER_STATEFUL_FIELDS_DECLARE \
|
||||
AI_NODE_STATEFUL_FIELDS_DECLARE
|
||||
|
||||
|
||||
/*!
|
||||
* @typedef void (*layer_func)(struct ai_layer_* layer)
|
||||
* @ingroup layers_common
|
||||
* @brief Callback signatures for all layers forward functions
|
||||
*/
|
||||
typedef node_func layer_func;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_base
|
||||
* @ingroup layers_common
|
||||
* @brief Structure encoding a base layer in the network
|
||||
*
|
||||
*/
|
||||
typedef ai_node ai_layer_base;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_stateful
|
||||
* @ingroup layers_common
|
||||
* @brief Structure encoding a stateful layer in the network
|
||||
*
|
||||
*/
|
||||
typedef ai_node_stateful ai_layer_stateful;
|
||||
|
||||
/*!
|
||||
* @brief Check the custom network types against the internally compiled ones
|
||||
* Helper function to check if the private APIs where compiled with a different
|
||||
* `datatypes_network.h` than the one provided to the caller.
|
||||
* @ingroup layers_common
|
||||
* @param signatures list of type sizes signatures (first element is the number of types)
|
||||
* @return false if there is a type size mismatch
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
ai_bool ai_check_custom_types(const ai_custom_type_signature* signatures);
|
||||
|
||||
/*!
|
||||
* @brief Helper API to retrieve a human readable layer type from enum
|
||||
* @ingroup layers_common
|
||||
* @param type in type of layer
|
||||
* @return string defining the type of the layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
const char* ai_layer_type_name(const ai_layer_type type);
|
||||
|
||||
/*!
|
||||
* @brief Helper API to check if a node is a valid layer type
|
||||
* @ingroup layers_common
|
||||
* @param type in type of layer
|
||||
* @return true if the layer is one of the ones listed in the enum,
|
||||
* false otherwise
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
ai_bool ai_layer_type_is_valid(const ai_layer_type type);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief check scratch size computed with actual scratch buffer size
|
||||
* @ingroup layers
|
||||
* @param layer_type the layer type
|
||||
* @param fmt buffers format
|
||||
* @param filt_width filter width (when relevant)
|
||||
* @param filt_height filter height (when relevant)
|
||||
* @param n_channel_in the number of channels in
|
||||
* @param n_channel_out the number of channels out
|
||||
* @param is_pointwise is pointwise convulation (conv2d)
|
||||
* @param is_rgb is rgb convolution (conv2d)
|
||||
* @param is depthwise is depthwise convolution (conv2d)
|
||||
* @param is_ch_wise has weights per channel
|
||||
* @param is_sssa is signed
|
||||
* @param p_tensor_scratch the scratch tensor
|
||||
* @param p_function_name the name of the function
|
||||
* @param line_nb the the line of the function
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void ai_layer_check_scratch_size( ai_layer_type layer_type, ai_array_format fmt,
|
||||
ai_size filt_width, ai_size filt_height,
|
||||
ai_u16 n_channel_in, ai_u16 n_channel_out,
|
||||
ai_bool is_pointwise, ai_bool is_rgb,
|
||||
ai_bool is_depthwise, ai_bool is_ch1st, ai_bool is_ch_wise,
|
||||
ai_bool is_sssa, ai_u32 tensor_scratch_size_bytes,
|
||||
const char *p_function_name, const int line_nb);
|
||||
|
||||
#ifdef HAS_AI_ASSERT
|
||||
#define CHECK_SCRATCH_BUFFER_SIZE( layer_type, fmt, \
|
||||
filt_width, filt_height, \
|
||||
n_channel_in, n_channel_out, \
|
||||
is_pointwise, is_rgb, \
|
||||
is_depthwise, is_ch1st, is_ch_wise, \
|
||||
is_sssa_ch, tensor_scratch_size_bytes) \
|
||||
ai_layer_check_scratch_size( layer_type, fmt, \
|
||||
filt_width, filt_height, \
|
||||
n_channel_in, n_channel_out, \
|
||||
is_pointwise, is_rgb, \
|
||||
is_depthwise, is_ch1st, is_ch_wise, \
|
||||
is_sssa_ch, tensor_scratch_size_bytes, \
|
||||
__FUNCTION__, __LINE__);
|
||||
#else
|
||||
#define CHECK_SCRATCH_BUFFER_SIZE( layer_type, fmt, \
|
||||
filt_width, filt_height, \
|
||||
n_channel_in, n_channel_out, \
|
||||
is_pointwise, is_rgb, \
|
||||
is_depthwise, is_ch1st, is_ch_wise, \
|
||||
is_sssa_ch, tensor_scratch_size_bytes) \
|
||||
AI_WRAP_FUNC(/*NULL*/)
|
||||
#endif
|
||||
#define IS_PW 1
|
||||
#define IS_RGB 1
|
||||
#define IS_DW 1
|
||||
#define IS_CH1ST 1
|
||||
#define IS_CH_WISE 1
|
||||
#define IS_SSSA_CH 1
|
||||
|
||||
#define NOT_PW 0
|
||||
#define NOT_RGB 0
|
||||
#define NOT_DW 0
|
||||
#define NOT_CH1ST 0
|
||||
#define NOT_CH_WISE 0
|
||||
#define NOT_SSSA_CH 0
|
||||
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*LAYERS_COMMON_H*/
|
||||
741
lib/stai/libstai/include/layers_conv2d.h
Normal file
741
lib/stai/libstai/include/layers_conv2d.h
Normal file
@ -0,0 +1,741 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_conv2d.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @brief header file of AI platform conv2d layers datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2018 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LAYERS_CONV2D_H
|
||||
#define LAYERS_CONV2D_H
|
||||
|
||||
#include "layers_nl.h"
|
||||
#include "layers_pool.h"
|
||||
|
||||
|
||||
|
||||
|
||||
#define AI_LAYER_CONV2D_FIELDS_DECLARE \
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE \
|
||||
ai_u32 groups; /*!< groups for separable convolution */ \
|
||||
AI_CONST ai_array* nl_params; /*!< array pointer to non linear parameters */ \
|
||||
ai_handle nl_func; /*!< function pointer to non linear transform */ \
|
||||
ai_shape_2d filter_stride; /*!< filter stride, how much the filter moves */ \
|
||||
ai_shape_2d dilation; /*!< dilation value along axis of the filter */ \
|
||||
ai_shape filter_pad; /*!< filter pad 4d */ \
|
||||
ai_layer_format_type in_ch_format; /*!< Input format (Channel 1st vs Channel last */ \
|
||||
ai_layer_format_type out_ch_format; /*!< Output format (Channel 1st vs Channel last */
|
||||
|
||||
|
||||
|
||||
/*!
|
||||
* @defgroup layers_conv2d Convolutive Layers Definitions
|
||||
* @brief definition
|
||||
*
|
||||
*/
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_dense
|
||||
* @ingroup layers_conv2d
|
||||
* @brief Dense (fully connected) layer
|
||||
*/
|
||||
typedef ai_layer_base ai_layer_dense;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_gemm
|
||||
* @ingroup layers_conv2d
|
||||
* @brief layer for General Matrix Multiplication
|
||||
*
|
||||
* Layer for General Matrix Multiplication (GEMM):
|
||||
* \f{equation}{ Y = \alpha A \cdot B + \beta C \f}
|
||||
* \f$\alpha\f$ and \f$\beta\f$ are paramaters, A and B are matrices,
|
||||
* C is a matrix or an array. Size checks for A, B, C, and Y are performed and
|
||||
* broadcast is applied on C if necessary.
|
||||
* This is a sequential layer (see @ref ai_layer).
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_gemm_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_float alpha; /*!< alpha coefficient */
|
||||
ai_float beta; /*!< beta coefficient */
|
||||
ai_u8 tA; /*!< transpose A flag */
|
||||
ai_u8 tB; /*!< transpose B flag */
|
||||
} ai_layer_gemm;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_matmul
|
||||
* @ingroup layers_conv2d
|
||||
* @brief layer for General Matrix Multiplication
|
||||
*
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_matmul_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_float alpha; /*!< alpha coefficient */
|
||||
ai_float beta; /*!< beta coefficient */
|
||||
ai_u8 tA; /*!< transpose A flag */
|
||||
ai_u8 tB; /*!< transpose B flag */
|
||||
} ai_layer_matmul;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_conv2d
|
||||
* @ingroup layers_conv2d
|
||||
* @brief 2D convolutional layer with strides and pads
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_conv2d_ {
|
||||
AI_LAYER_CONV2D_FIELDS_DECLARE
|
||||
} ai_layer_conv2d;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_conv2d_nl_pool
|
||||
* @ingroup layers_conv2d
|
||||
* @brief 2D convolutional layer + nl + pooling with strides and pads
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_conv2d_nl_pool_ {
|
||||
AI_LAYER_CONV2D_FIELDS_DECLARE
|
||||
|
||||
ai_shape_2d pool_size; /*!< pooling size */
|
||||
ai_shape_2d pool_stride; /*!< pooling stride */
|
||||
ai_shape pool_pad; /*!< pooling pad */
|
||||
|
||||
ai_handle pool_func; /*!< function pointer to pooling transform */
|
||||
} ai_layer_conv2d_nl_pool;
|
||||
|
||||
|
||||
/*
|
||||
AI_INTERNAL_API
|
||||
void ai_dict8_dot_array_f32(ai_handle out, ai_ptr_const data0, ai_ptr_const lut,
|
||||
const ai_float* data1, const ai_size data_size);
|
||||
|
||||
AI_INTERNAL_API
|
||||
void ai_dict4_dot_array_f32(ai_handle out, ai_ptr_const data0, ai_ptr_const lut,
|
||||
const ai_float* data1, const ai_size data_size);
|
||||
*/
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/******************************************************************************/
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a floating point 32 2D convolutional layer.
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_if32of32wf32(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a floating point 32 2D dw layer.
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dw_if32of32wf32(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a floating point 32 2D convolutional group layer.
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_if32of32wf32_group(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a 2D floating point 32 pool fused convolutional layer.
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_if32of32wf32_nl_pool(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a 2D floating point 32 pool fused dw layer.
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dw_if32of32wf32_nl_pool(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a 2D floating point 32 pool fused convolutional group layer.
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_if32of32wf32_group_nl_pool(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a GEMM layer.
|
||||
* @ingroup layers
|
||||
* @param layer the layer including output and input tensors
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_gemm(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes matmul layer, intended as numpy.matmul(A,B).
|
||||
* @ingroup layers
|
||||
* @param layer the layer including output and input tensors
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_matmul(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a dense (fully connected) layer.
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the dense layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense(ai_layer* layer);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a fixed point 2D convolutional layer.
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_fixed(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a fixed point @ref ai_layer_conv2d_nl_pool
|
||||
* layer.
|
||||
* The @ref ai_layer_conv2d_nl_pool is a fused conv2D + optional nonlinear
|
||||
* layer + optional pooling / nonlinearity (average, max)
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer see @ai_layer_conv2d_nl_pool
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_nl_pool_fixed(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a integer quantized 2D convolutional layer.
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_integer(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a integer quantized 2D convolutional layer
|
||||
* for SSSA per layer quantized scheme
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_integer_SSSA(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a integer quantized 2D convolutional layer
|
||||
* for SSSA per channel quantized scheme
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_is8os8ws8_sssa_ch(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a int8 quantized DW layer
|
||||
* for SSSA per channel quantized scheme
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dw_sssa8_ch(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a int8 quantized DW layer
|
||||
* for SSSA per channel quantized scheme Optimized for HSP
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dw_hsp_1step_sssa8_ch(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a int8 quantized DW layer
|
||||
* for SSSA per channel quantized scheme Optimized for HSP
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dw_hsp_sssa8_ch(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a int8 quantized DW layer
|
||||
* for SSSA per channel quantized scheme Optimized for HSP
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dw_hsp_3step_sssa8_ch(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a int8 quantized DW layer
|
||||
* for SSSA per channel quantized scheme, with 3x3 kernels
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dw_3x3_sssa8_ch(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a int8 quantized DW layer
|
||||
* for SSSA per channel quantized scheme, with 1xN kernels
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dw_1xN_sssa8_ch(ai_layer *pLayer);
|
||||
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a int8 quantized DW layer
|
||||
* for SSSA per channel quantized scheme, with 3x3 kernels and input are
|
||||
* channel first
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dw_3x3_ch1st_sssa8_ch(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a int8 quantized DW layer
|
||||
* for SSSA per channel quantized scheme with depth multiplier > 1
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dw_dm_sssa8_ch(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of int8 quantized DW layers.
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dw_all_sssa8_ch(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a int8 quantized PW layer
|
||||
* for SSSA per channel quantized scheme
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_pw_sssa8_ch(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a int8 quantized PW layer
|
||||
* for SSSA per channel quantized scheme. Optimized for HSP
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_pw_hsp_sssa8_ch(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a int8 quantized PW layer
|
||||
* for SSSA per channel quantized scheme. Optimized for HSP
|
||||
* 1Step version (nb input channel <= 4)
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_pw_hsp_1step_sssa8_ch(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a int8 quantized PW layer
|
||||
* for SSSA per channel quantized scheme. Optimized for HSP
|
||||
* 3 Step variant
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_pw_hsp_3step_sssa8_ch(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a int8 quantized dilated Conv2d layer
|
||||
* for SSSA per channel quantized scheme (valid padding)
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_dilated_sssa8_ch(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a int8 non dilated Conv2d layer
|
||||
* for SSSA per channel quantized scheme (valid padding)
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_deep_sssa8_ch(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a int8 non dilated Conv2d layer
|
||||
* for SSSA per channel quantized scheme (valid padding)
|
||||
* number of output channel is greater than 8
|
||||
* Kernels shall be 3x3 and stride is (1,1)
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_deep_3x3_sssa8_ch(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a int8 non dilated Conv2d layer
|
||||
* for SSSA per channel quantized scheme (valid or same padding)
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_sssa8_ch(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a int8 non dilated Conv2d layer
|
||||
* for SSSA per channel quantized scheme (valid or same padding)
|
||||
* Used for configuration supported by HSP and if HSP is available
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_hsp_1step_sssa8_ch(ai_layer *pLayer);
|
||||
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_hsp_sssa8_ch(ai_layer *pLayer);
|
||||
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_hsp_3step_sssa8_ch(ai_layer *pLayer);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a int8 quantized Conv2d layer
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_all_sssa8_ch(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a int8 quantized RGB Conv2d layer
|
||||
* for SSSA per channel quantized scheme
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_rgb_sssa8_ch(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a int8 quantized DW layer
|
||||
* for SSSA per channel quantized scheme with pooling fused
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dw_sssa8_ch_nl_pool(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a int8 quantized DW layer
|
||||
* for SSSA per channel quantized scheme, with 3x3 kernels,
|
||||
* with pooling fused
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dw_3x3_sssa8_ch_nl_pool(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a int8 quantized DW layer
|
||||
* for SSSA per channel quantized scheme, with 3x3 kernels,
|
||||
* with pooling fused
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dw_3x3_ch1st_sssa8_ch_nl_pool(ai_layer *pLayer);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a int8 quantized DW layer
|
||||
* for SSSA per channel quantized scheme with depth multiplier > 1
|
||||
* with pooling fused
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dw_dm_sssa8_ch_nl_pool(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of int8 quantized DW layers, with pooling fused
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dw_all_sssa8_ch_nl_pool(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a int8 quantized PW layer,
|
||||
* with pooling fused
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_pw_sssa8_ch_nl_pool(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a int8 quantized dilated Conv2d layer
|
||||
* for SSSA per channel quantized scheme (valid padding) and pooling fused
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_dilated_sssa8_ch_nl_pool(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a int8 quantized non dilated Conv2d layer
|
||||
* for SSSA per channel quantized scheme (valid padding) and pooling fused
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_deep_sssa8_ch_nl_pool(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a int8 non dilated Conv2d layer
|
||||
* for SSSA per channel quantized scheme (valid padding) and pooling fused
|
||||
* number of output channel is greater than 8
|
||||
* Kernels shall be 3x3 and stride is (1,1)
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_deep_3x3_sssa8_ch_nl_pool(ai_layer *pLayer);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a int8 quantized non dilated Conv2d layer
|
||||
* for SSSA per channel quantized scheme (valid or same padding) and pooling fused
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_sssa8_ch_nl_pool(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a int8 quantized Conv2d layer and pooling fused
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_all_sssa8_ch_nl_pool(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a integer quantized 2D convolutional layer
|
||||
* for SSUA per layer quantized scheme
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_integer_SSUA(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a integer quantized 2D convolutional layer
|
||||
* for SSUA per channel quantized scheme
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_integer_SSUA_ch(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a integer quantized 2D convolutional layer
|
||||
* for UAUA per layer quantized scheme
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_integer_UAUA(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a integer quantized 2D convolutional layer
|
||||
* for UAUA per channel quantized scheme
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_integer_UAUA_ch(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a integer @ref ai_layer_conv2d_nl_pool layer.
|
||||
* The @ref ai_layer_conv2d_nl_pool is a fused conv2D + optional nonlinear
|
||||
* layer + optional pooling / nonlinearity (average, max)
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer see @ai_layer_conv2d_nl_pool
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_nl_pool_integer(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a integer @ref ai_layer_conv2d_nl_pool layer
|
||||
* for SSSA per layer quantized scheme
|
||||
* The @ref ai_layer_conv2d_nl_pool is a fused conv2D + optional nonlinear
|
||||
* layer + optional pooling / nonlinearity (average, max)
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer see @ai_layer_conv2d_nl_pool
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_nl_pool_integer_SSSA(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a integer @ref ai_layer_conv2d_nl_pool layer
|
||||
* for SSSA per channel quantized scheme
|
||||
* The @ref ai_layer_conv2d_nl_pool is a fused conv2D + optional nonlinear
|
||||
* layer + optional pooling / nonlinearity (average, max)
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer see @ai_layer_conv2d_nl_pool
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_nl_pool_integer_SSSA_ch(ai_layer *pLayer);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a integer @ref ai_layer_conv2d_nl_pool layer
|
||||
* for SSUA per layer quantized scheme
|
||||
* The @ref ai_layer_conv2d_nl_pool is a fused conv2D + optional nonlinear
|
||||
* layer + optional pooling / nonlinearity (average, max)
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer see @ai_layer_conv2d_nl_pool
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_nl_pool_integer_SSUA(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a integer @ref ai_layer_conv2d_nl_pool layer
|
||||
* for SSUA per channel quantized scheme
|
||||
* The @ref ai_layer_conv2d_nl_pool is a fused conv2D + optional nonlinear
|
||||
* layer + optional pooling / nonlinearity (average, max)
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer see @ai_layer_conv2d_nl_pool
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_nl_pool_integer_SSUA_ch(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a integer @ref ai_layer_conv2d_nl_pool layer
|
||||
* for UAUA per layer quantized scheme
|
||||
* The @ref ai_layer_conv2d_nl_pool is a fused conv2D + optional nonlinear
|
||||
* layer + optional pooling / nonlinearity (average, max)
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer see @ai_layer_conv2d_nl_pool
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_nl_pool_integer_UAUA(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a integer @ref ai_layer_conv2d_nl_pool layer
|
||||
* for UAUA per channel quantized scheme
|
||||
* The @ref ai_layer_conv2d_nl_pool is a fused conv2D + optional nonlinear
|
||||
* layer + optional pooling / nonlinearity (average, max)
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer see @ai_layer_conv2d_nl_pool
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_nl_pool_integer_UAUA_ch(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a integer dense (fully connected) layer.
|
||||
* @ingroup layers_dense
|
||||
* @param layer the dense layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_integer(ai_layer *pLayer);
|
||||
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a integer dense (fully connected) layer
|
||||
* for SSSA per layer quantized scheme
|
||||
* @ingroup layers_dense
|
||||
* @param layer the dense layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_integer_SSSA(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a integer dense (fully connected) layer
|
||||
* for SSSA per layer quantized scheme Optimized for HSP
|
||||
* @ingroup layers_dense
|
||||
* @param layer the dense layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_hsp_sssa8(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a integer dense (fully connected) layer
|
||||
* for SSSA per layer quantized scheme Optimized for HSP, 3Step loop (out_ch)
|
||||
* @ingroup layers_dense
|
||||
* @param layer the dense layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_hsp_3step_sssa8(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a integer dense (fully connected) layer
|
||||
* for SSSA per channel quantized scheme: HSP variant
|
||||
* @ingroup layers_dense
|
||||
* @param layer the dense layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_integer_SSSA_ch(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a integer dense (fully connected) layer
|
||||
* for SSUA per layer quantized scheme
|
||||
* @ingroup layers_dense
|
||||
* @param layer the dense layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_integer_SSUA(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a integer dense (fully connected) layer
|
||||
* for SSUA per channel quantized scheme
|
||||
* @ingroup layers_dense
|
||||
* @param layer the dense layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_integer_SSUA_ch(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a integer dense (fully connected) layer
|
||||
* for UAUA per layer quantized scheme
|
||||
* @ingroup layers_dense
|
||||
* @param layer the dense layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_integer_UAUA(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a integer dense (fully connected) layer
|
||||
* for UAUA per channel quantized scheme
|
||||
* @ingroup layers_dense
|
||||
* @param layer the dense layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_integer_UAUA_ch(ai_layer *pLayer);
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*LAYERS_CONV2D_H*/
|
||||
488
lib/stai/libstai/include/layers_conv2d_dqnn.h
Normal file
488
lib/stai/libstai/include/layers_conv2d_dqnn.h
Normal file
@ -0,0 +1,488 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_conv2d_dqnn.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform DQNN conv datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2021 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LAYERS_CONV2D_DQNN_H
|
||||
#define LAYERS_CONV2D_DQNN_H
|
||||
|
||||
#include "layers_common.h"
|
||||
#include "layers_conv2d.h"
|
||||
|
||||
/*!
|
||||
* @defgroup layers_conv2d_dqnn Layers Definitions
|
||||
* @brief definition
|
||||
*
|
||||
*/
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
|
||||
#define AI_DQNN_PAD_1_KEY (1)
|
||||
#define AI_DQNN_PAD_M1_KEY (-1)
|
||||
#define AI_DQNN_PAD_0_KEY (0)
|
||||
#define AI_DQNN_PAD_1_VALUE (0x0)
|
||||
#define AI_DQNN_PAD_M1_VALUE (0xFFFFFFFF)
|
||||
#define AI_DQNN_PAD_0_VALUE (0x2)
|
||||
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_conv2d_dqnn
|
||||
* @ingroup layers_conv2d_dqnn
|
||||
* @brief conv2d_dqnn layer
|
||||
*
|
||||
* @ref forward_conv2d_is1os1ws1
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_conv2d_dqnn_ {
|
||||
AI_LAYER_CONV2D_FIELDS_DECLARE
|
||||
ai_i32 pad_value;
|
||||
} ai_layer_conv2d_dqnn;
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/******************************************************************************/
|
||||
|
||||
/*!
|
||||
* @brief Handles point wise convolution with binary input, binary output and
|
||||
* binary weights
|
||||
* @ingroup layers_conv2d_dqnn
|
||||
* @param layer conv2d_dqnn layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_pw_is1os1ws1_bn(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles point wise convolution with binary input, binary output and
|
||||
* binary weights - Optimized thanks to Optim2 assumptions
|
||||
* @ingroup layers_conv2d_dqnn
|
||||
* @param layer conv2d_dqnn layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_pw_is1os1ws1_bn_optim2(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles point wise convolution with binary input, 8-bits output and
|
||||
* binary weights
|
||||
* @ingroup layers_conv2d_dqnn
|
||||
* @param layer conv2d_dqnn layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_pw_is1os8ws1_bn(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles point wise convolution with binary input, 8-bits output and
|
||||
* binary weights - Optimized thanks to Optim1 assumptions
|
||||
* @ingroup layers_conv2d_dqnn
|
||||
* @param layer conv2d_dqnn layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_pw_is1os8ws1_bn_optim1(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles point-wise convolution with binary input, float32 output
|
||||
* and binary weights
|
||||
* @ingroup layers_conv2d_dqnn
|
||||
* @param layer conv2d_dqnn layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_pw_is1of32ws1_bn(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles point-wise convolution with binary input, float32 output
|
||||
* and binary weights - Optimized thanks to Optim1 assumptions
|
||||
* @ingroup layers_conv2d_dqnn
|
||||
* @param layer conv2d_dqnn layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_pw_is1of32ws1_bn_optim1(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with binary input, binary output and
|
||||
* binary weights
|
||||
* @ingroup layers_conv2d_dqnn
|
||||
* @param layer conv2d_dqnn layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_is1os1ws1_bn(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with binary input, binary output and
|
||||
* binary weights - Optimized thanks to Optim2 assumptions
|
||||
* @ingroup layers_conv2d_dqnn
|
||||
* @param layer conv2d_dqnn layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_is1os1ws1_bn_optim2(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with binary input, 8-bits output and
|
||||
* binary weights
|
||||
* @ingroup layers_conv2d_dqnn
|
||||
* @param layer conv2d_dqnn layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_is1os8ws1_bn(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with binary input, 8-bits output and
|
||||
* binary weights - Optimized thanks to Optim1 assumptions
|
||||
* @ingroup layers_conv2d_dqnn
|
||||
* @param layer conv2d_dqnn layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_is1os8ws1_bn_optim1(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with binary input, binary output and
|
||||
* binary weights - with 0 padding (QKeras like)
|
||||
* @ingroup layers_conv2d_dqnn
|
||||
* @param layer conv2d_dqnn layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_is1os1ws1_bn_pad0(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with binary input, binary output and
|
||||
* binary weights - with 0 padding (QKeras like) - Optimized thanks to
|
||||
* Optim0 assumptions
|
||||
* @ingroup layers_conv2d_dqnn
|
||||
* @param layer conv2d_dqnn layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_is1os1ws1_bn_pad0_optim0(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with binary input, 8-bits output and
|
||||
* binary weights - with 0 padding (QKeras like)
|
||||
* @ingroup layers_conv2d_dqnn
|
||||
* @param layer conv2d_dqnn layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_is1os8ws1_bn_pad0(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with binary input, binary output and
|
||||
* binary weights - with +1/-1 padding (Larq like)
|
||||
* @ingroup layers_conv2d_dqnn
|
||||
* @param layer conv2d_dqnn layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_is1os1ws1_bn_pad1(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with binary input, binary output and
|
||||
* binary weights - with +1/-1 padding (Larq like) - Optimized thanks
|
||||
* to Optim2 assumptions
|
||||
* @ingroup layers_conv2d_dqnn
|
||||
* @param layer conv2d_dqnn layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_is1os1ws1_bn_pad1_optim2(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with binary input, 8-bits output and
|
||||
* binary weights - with +1/-1 padding (Larq like)
|
||||
* @ingroup layers_conv2d_dqnn
|
||||
* @param layer conv2d_dqnn layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_is1os8ws1_bn_pad1(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with binary input, 8-bits output and
|
||||
* binary weights - with +1/-1 padding (Larq like) - Optimized thanks
|
||||
* to Optim1 assumptions
|
||||
* @ingroup layers_conv2d_dqnn
|
||||
* @param layer conv2d_dqnn layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_is1os8ws1_bn_pad1_optim1(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with 8-bits quantized Input and weights and
|
||||
* binary output
|
||||
* @ingroup layers_conv2d_dqnn
|
||||
* @param layer conv2d_dqnn layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_is8os1ws8(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with 8-bits quantized Input and weights and
|
||||
* binary output - Optimized thanks to Optim2 assumptions
|
||||
* @ingroup layers_conv2d_dqnn
|
||||
* @param layer conv2d_dqnn layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_is8os1ws8_optim2(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with 8-bits quantized Input and weights and
|
||||
* binary output - quantized with DoReFa SotA quantizer
|
||||
* @ingroup layers_conv2d_dqnn
|
||||
* @param layer conv2d_dqnn layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_dorefa_is8os1ws8(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with 16-bits quantized input, binary weights
|
||||
and binary output
|
||||
* @ingroup layers_conv2d_dqnn
|
||||
* @param layer conv2d_dqnn layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_is16os1ws1_bn_fxp(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with 16-bits quantized input, binary weights
|
||||
and 16-bits quantized output
|
||||
* @ingroup layers_conv2d_dqnn
|
||||
* @param layer conv2d_dqnn layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_is16os16ws1_fxp(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles depth-wise convolution with binary input, binary output and
|
||||
* binary weights
|
||||
* @ingroup layers_conv2d_dqnn
|
||||
* @param layer conv2d_dqnn layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dw_is1os1ws1_bn(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles depth-wise convolution with binary input, binary output and
|
||||
* binary weights - Optimized thanks to Optim3 assumptions
|
||||
* @ingroup layers_conv2d_dqnn
|
||||
* @param layer conv2d_dqnn layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dw_is1os1ws1_bn_optim3(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles depth-wise convolution with binary input, binary output and
|
||||
* binary weights - with 0 padding (QKeras like)
|
||||
* @ingroup layers_conv2d_dqnn
|
||||
* @param layer conv2d_dqnn layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dw_is1os1ws1_bn_pad0(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles depth-wise convolution with binary input, binary output and
|
||||
* binary weights - with 0 padding (QKeras like) - Optimized thanks to
|
||||
* Optim3 assumptions
|
||||
* @ingroup layers_conv2d_dqnn
|
||||
* @param layer conv2d_dqnn layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dw_is1os1ws1_bn_pad0_optim3(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles depth-wise convolution with binary input, binary output and
|
||||
* binary weights - with +1/-1 padding (Larq like)
|
||||
* @ingroup layers_conv2d_dqnn
|
||||
* @param layer conv2d_dqnn layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dw_is1os1ws1_bn_pad1(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles depth-wise convolution with binary input, binary output and
|
||||
* binary weights - with +1/-1 padding (Larq like) - Optimized thanks to
|
||||
* Optim3 assumptions
|
||||
* @ingroup layers_conv2d_dqnn
|
||||
* @param layer conv2d_dqnn layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dw_is1os1ws1_bn_pad1_optim3(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with 8-bits quantized Input and output and
|
||||
* binary weights
|
||||
* @ingroup layers_conv2d_dqnn
|
||||
* @param layer conv2d_dqnn layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_is8os8ws1(ai_layer *pLayer);
|
||||
|
||||
/**
|
||||
* @brief Handles 2D convolution with binary input, fixed point 16-bits output and
|
||||
* binary weights - with 0 padding (QKeras like) - Lite I/F
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_is1os16ws1_bn_pad0_fxp(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with binary input, fixed point 16-bits output and
|
||||
* binary weights - with +1/-1 padding (Larq like) - Lite I/F
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_is1os16ws1_bn_pad1_fxp(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with binary input, fixed point 16-bits output and
|
||||
* binary weights - with +1/-1 padding (Larq like) - Lite I/F
|
||||
* - Optimized thanks to Optim1 assumptions
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_is1os16ws1_bn_pad1_optim1_fxp(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with binary input, fixed point 16-bits unsigned output and
|
||||
* binary weights - with 0 padding (QKeras like) - Lite I/F
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_is1ou16ws1_bn_pad0_fxp(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with binary input, fixed point 16-bits unsigned output and
|
||||
* binary weights - with +1/-1 padding (Larq like) - Lite I/F
|
||||
* @ingroup lite_conv2d_dqnn
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_is1ou16ws1_bn_pad1_fxp(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with binary input, fixed point 16-bits unsiged output and
|
||||
* binary weights - with +1/-1 padding (Larq like) - Lite I/F
|
||||
* - Optimized thanks to Optim1 assumptions
|
||||
* @ingroup lite_conv2d_dqnn
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_is1ou16ws1_bn_pad1_optim1_fxp(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a integer quantized 2D convolutional layer
|
||||
* for SSSA per channel quantized RGB scheme using n_channel_in = 3
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_is8os8ws8_sssa_ch_rgb(const ai_i8 *pData_in,
|
||||
ai_i8 *pData_out,
|
||||
const ai_i8 *pWeights,
|
||||
const ai_i32 *pBias,
|
||||
ai_u16 *pBuffer_a,
|
||||
const ai_size width_in,
|
||||
const ai_size height_in,
|
||||
const ai_size width_out,
|
||||
const ai_size height_out,
|
||||
const ai_u16 n_channel_in,
|
||||
const ai_u16 n_channel_out,
|
||||
const ai_size filt_width,
|
||||
const ai_size filt_height,
|
||||
const ai_u16 filt_pad_x,
|
||||
const ai_u16 filt_pad_y,
|
||||
const ai_u16 filt_stride_x,
|
||||
const ai_u16 filt_stride_y,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
const ai_i8 in_zeropoint,
|
||||
const ai_i8 out_zeropoint,
|
||||
const ai_bool out_ch_format,
|
||||
ai_i16 *p_out_r_shift,
|
||||
ai_i32 *p_out_factor);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a point-wise integer quantized convolution
|
||||
for SSSA per channel quantized scheme
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_pw_is8os8ws8_sssa_ch(const ai_i8 *pData_in,
|
||||
ai_i8 *pData_out,
|
||||
const ai_i8 *pWeights,
|
||||
const ai_i32 *pBias,
|
||||
ai_u16 *pBuffer_a,
|
||||
const ai_size width_in,
|
||||
const ai_size height_in,
|
||||
const ai_size width_out,
|
||||
const ai_size height_out,
|
||||
const ai_u16 n_channel_in,
|
||||
const ai_u16 n_channel_out,
|
||||
const ai_size filt_width,
|
||||
const ai_size filt_height,
|
||||
const ai_u16 filt_pad_x,
|
||||
const ai_u16 filt_pad_y,
|
||||
const ai_u16 filt_stride_x,
|
||||
const ai_u16 filt_stride_y,
|
||||
const ai_u16 dilation_x,
|
||||
const ai_u16 dilation_y,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
const ai_i8 in_zeropoint,
|
||||
const ai_i8 out_zeropoint,
|
||||
ai_i16 *p_out_r_shift,
|
||||
ai_i32 *p_out_factor,
|
||||
ai_i32 AI_PWOverlay,
|
||||
ai_i16 *bufferA,
|
||||
ai_i32 scratch_size);
|
||||
// st_nn_context_t context);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a depth-wise integer quantized convolution
|
||||
for SSSA per channel quantized scheme
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dw_is8os8ws8_sssa_ch(const ai_i8 *pData_in,
|
||||
ai_i8 *pData_out,
|
||||
const ai_i8 *pWeights,
|
||||
const ai_i32 *pBias,
|
||||
ai_u16 *pBuffer_a,
|
||||
const ai_size width_in,
|
||||
const ai_size height_in,
|
||||
const ai_size width_out,
|
||||
const ai_size height_out,
|
||||
const ai_u16 n_channel_in,
|
||||
const ai_u16 n_channel_out,
|
||||
const ai_size filt_width,
|
||||
const ai_size filt_height,
|
||||
const ai_u16 filt_pad_x,
|
||||
const ai_u16 filt_pad_y,
|
||||
const ai_u16 filt_stride_x,
|
||||
const ai_u16 filt_stride_y,
|
||||
const ai_u16 dilation_x,
|
||||
const ai_u16 dilation_y,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
const ai_i8 in_zeropoint,
|
||||
const ai_i8 out_zeropoint,
|
||||
ai_i16 *p_out_r_shift,
|
||||
ai_i32 *p_out_factor);
|
||||
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*LAYERS_CONV2D_DQNN_H*/
|
||||
41
lib/stai/libstai/include/layers_custom.h
Normal file
41
lib/stai/libstai/include/layers_custom.h
Normal file
@ -0,0 +1,41 @@
|
||||
#ifndef LAYERS_CUSTOM_H
|
||||
#define LAYERS_CUSTOM_H
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_custom.h
|
||||
* @author STMicroelectronics
|
||||
* @brief header file of AI platform custom layers datatype
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2020 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#include "layers_common.h"
|
||||
|
||||
/*!
|
||||
* @defgroup layers_custom Custom layer definitions
|
||||
* @brief Definition of structures custom layers
|
||||
*/
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_custom
|
||||
* @ingroup layers_custom
|
||||
* @brief Custom layer wrapper
|
||||
*
|
||||
* The custom layer wrapper
|
||||
*/
|
||||
typedef ai_layer_stateful ai_layer_custom;
|
||||
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /* LAYERS_CUSTOM_H */
|
||||
105
lib/stai/libstai/include/layers_dense.h
Normal file
105
lib/stai/libstai/include/layers_dense.h
Normal file
@ -0,0 +1,105 @@
|
||||
#ifndef LAYERS_DENSE_H
|
||||
#define LAYERS_DENSE_H
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_dense.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @brief header file of AI platform dense layers datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2018 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
#include "layers_common.h"
|
||||
|
||||
|
||||
/*!
|
||||
* @defgroup layers Normalization Layers Definitions
|
||||
* @brief definition
|
||||
*
|
||||
*/
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a fixed point dense (fully connected) layer.
|
||||
* @ingroup layers_dense
|
||||
* @param layer the dense layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_fixed(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a integer dense (fully connected) layer.
|
||||
* @ingroup layers_dense
|
||||
* @param layer the dense layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_integer(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a integer dense (fully connected) layer
|
||||
* for SSSA per layer quantized scheme
|
||||
* @ingroup layers_dense
|
||||
* @param layer the dense layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_integer_SSSA(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a integer dense (fully connected) layer
|
||||
* for SSSA per channel quantized scheme
|
||||
* @ingroup layers_dense
|
||||
* @param layer the dense layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_integer_SSSA_ch(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a integer dense (fully connected) layer
|
||||
* for SSUA per layer quantized scheme
|
||||
* @ingroup layers_dense
|
||||
* @param layer the dense layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_integer_SSUA(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a integer dense (fully connected) layer
|
||||
* for SSUA per channel quantized scheme
|
||||
* @ingroup layers_dense
|
||||
* @param layer the dense layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_integer_SSUA_ch(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a integer dense (fully connected) layer
|
||||
* for UAUA per layer quantized scheme
|
||||
* @ingroup layers_dense
|
||||
* @param layer the dense layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_integer_UAUA(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a integer dense (fully connected) layer
|
||||
* for UAUA per channel quantized scheme
|
||||
* @ingroup layers_dense
|
||||
* @param layer the dense layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_integer_UAUA_ch(ai_layer *pLayer);
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*LAYERS_DENSE_H*/
|
||||
|
||||
394
lib/stai/libstai/include/layers_dense_dqnn.h
Normal file
394
lib/stai/libstai/include/layers_dense_dqnn.h
Normal file
@ -0,0 +1,394 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_dense_dqnn.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @brief header file of deeply quantized dense layers.
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2021 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LAYERS_DENSE_DQNN_H
|
||||
#define LAYERS_DENSE_DQNN_H
|
||||
|
||||
#include "layers_common.h"
|
||||
|
||||
/*!
|
||||
* @defgroup layers_dense_dqnn Quantized Dense Layers definition.
|
||||
* @brief Implements the kernels and the forward functions to implement
|
||||
* dense layers with quantized inputs, weights, or outputs.
|
||||
*/
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_dense_dqnn
|
||||
* @ingroup layers_dense_dqnn
|
||||
* @brief Specific instance of deeply quantized dense layers.
|
||||
*/
|
||||
typedef ai_layer_base ai_layer_dense_dqnn;
|
||||
|
||||
/*****************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/*****************************************************************************/
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed binary input,
|
||||
* signed binary output, and signed binary weights.
|
||||
* @ingroup layers_dense_dqnn
|
||||
* @param layer template layer as an opaque pointer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_is1os1ws1(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed binary input,
|
||||
* signed binary output, and signed binary weights.
|
||||
* The BN is fused, i.e., the layer requires weights, scale, and offset, where
|
||||
* weights are those of the dense layer, scale is that of the BN, and the offset
|
||||
* corresponds to dense bias * bn scale + bn offset. If the parameters do not
|
||||
* agree with such convention, the behavior is undefined.
|
||||
* @ingroup layers_dense_dqnn
|
||||
* @param layer template layer as an opaque pointer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_is1os1ws1_bn(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed binary input,
|
||||
* 8-bit signed output, and signed binary weights.
|
||||
* @ingroup layers_dense_dqnn
|
||||
* @param layer template layer as an opaque pointer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_is1os8ws1(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed binary input,
|
||||
* 8-bit signed output, and signed binary weights.
|
||||
* @ingroup layers_dense_dqnn
|
||||
* @param layer template layer as an opaque pointer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_is1os16ws1(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed binary input,
|
||||
* 32-bit floating point output, and signed binary weights.
|
||||
* @ingroup layers_dense_dqnn
|
||||
* @param layer template layer as an opaque pointer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_is1of32ws1(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed binary input,
|
||||
* 32-bit floating point output, and signed binary weights.
|
||||
* The BN is fused, i.e., the layer requires weights, scale, and offset, where
|
||||
* weights are those of the dense layer, scale is that of the BN, and the offset
|
||||
* corresponds to dense bias * bn scale + bn offset. If the parameters do not
|
||||
* agree with such convention, the behavior is undefined.
|
||||
* @ingroup layers_dense_dqnn
|
||||
* @param layer template layer as an opaque pointer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_is1of32ws1_bn(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed binary input,
|
||||
* 32-bit floating point output, and 32-bit floating point weights.
|
||||
* @ingroup layers_dense_dqnn
|
||||
* @param layer template layer as an opaque pointer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_is1of32wf32(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed binary input,
|
||||
* 32-bit floating point output, and 32-bit floating point weights.
|
||||
* The BN is fused, i.e., the layer requires weights, scale, and offset, where
|
||||
* weights are those of the dense layer, scale is that of the BN, and the offset
|
||||
* corresponds to dense bias * bn scale + bn offset. If the parameters do not
|
||||
* agree with such convention, the behavior is undefined.
|
||||
* @ingroup layers_dense_dqnn
|
||||
* @param layer template layer as an opaque pointer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_is1of32wf32_bn(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed binary input,
|
||||
* 32-bit floating point output, and 8-bit signed weights.
|
||||
* @ingroup layers_dense_dqnn
|
||||
* @param layer template layer as an opaque pointer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_is1of32ws8(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed binary input,
|
||||
* 32-bit floating point output, and 8-bit signed weights.
|
||||
* The BN is fused, i.e., the layer requires weights, scale, and offset, where
|
||||
* weights are those of the dense layer, scale is that of the BN, and the offset
|
||||
* corresponds to dense bias * bn scale + bn offset. If the parameters do not
|
||||
* agree with such convention, the behavior is undefined.
|
||||
* @ingroup layers_dense_dqnn
|
||||
* @param layer template layer as an opaque pointer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_is1of32ws8_bn(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed binary input,
|
||||
* binary output, and 8-bit signed weights.
|
||||
* @ingroup layers_dense_dqnn
|
||||
* @param layer template layer as an opaque pointer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_is1os1ws8(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed binary input,
|
||||
* binary output, and 8-bit signed weights.
|
||||
* The BN is fused, i.e., the layer requires weights, scale, and offset, where
|
||||
* weights are those of the dense layer, scale is that of the BN, and the offset
|
||||
* corresponds to dense bias * bn scale + bn offset. If the parameters do not
|
||||
* agree with such convention, the behavior is undefined.
|
||||
* @ingroup layers_dense_dqnn
|
||||
* @param layer template layer as an opaque pointer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_is1os1ws8_bn(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed binary input,
|
||||
* 8-bit signed output, and 8-bit signed weights.
|
||||
* @ingroup layers_dense_dqnn
|
||||
* @param layer template layer as an opaque pointer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_is1os8ws8(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed binary input,
|
||||
* 16-bit signed output, and 8-bit signed weights.
|
||||
* @ingroup layers_dense_dqnn
|
||||
* @param layer template layer as an opaque pointer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_is1os16ws8(ai_layer* layer);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed 8-bit input,
|
||||
* float output, and binary weights.
|
||||
* @ingroup layers_dense_dqnn
|
||||
* @param layer template layer as an opaque pointer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_is8of32ws1(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed 8-bit input,
|
||||
* float output, and binary weights.
|
||||
* The BN is fused, i.e., the layer requires weights, scale, and offset, where
|
||||
* weights are those of the dense layer, scale is that of the BN, and the offset
|
||||
* corresponds to dense bias * bn scale + bn offset. If the parameters do not
|
||||
* agree with such convention, the behavior is undefined.
|
||||
* @ingroup layers_dense_dqnn
|
||||
* @param layer template layer as an opaque pointer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_is8of32ws1_bn(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed 8-bit input,
|
||||
* 1-bit signed output, and binary weights.
|
||||
* @ingroup layers_dense_dqnn
|
||||
* @param layer template layer as an opaque pointer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_is8os1ws1(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed 8-bit input,
|
||||
* 1-bit signed output, and binary weights.
|
||||
* The BN is fused, i.e., the layer requires weights, scale, and offset, where
|
||||
* weights are those of the dense layer, scale is that of the BN, and the offset
|
||||
* corresponds to dense bias * bn scale + bn offset. If the parameters do not
|
||||
* agree with such convention, the behavior is undefined.
|
||||
* @ingroup layers_dense_dqnn
|
||||
* @param layer template layer as an opaque pointer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_is8os1ws1_bn(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed 8-bit input,
|
||||
* binary weights and binary output.
|
||||
* The BN is fused, i.e., the layer requires weights, scale, and offset, where
|
||||
* weights are those of the dense layer, scale is that of the BN, and the offset
|
||||
* corresponds to dense bias * bn scale + bn offset. If the parameters do not
|
||||
* agree with such convention, the behavior is undefined.
|
||||
* @ingroup layers_dense_dqnn
|
||||
* @param layer template layer as an opaque pointer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_is8os1ws1_bn_fxp(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed 8-bit input,
|
||||
* 8-bit signed output, and binary weights.
|
||||
* @ingroup layers_dense_dqnn
|
||||
* @param layer template layer as an opaque pointer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_is8os8ws1(ai_layer* layer);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed 8-bit input,
|
||||
* 16-bit signed output, and binary weights.
|
||||
* @ingroup layers_dense_dqnn
|
||||
* @param layer template layer as an opaque pointer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_is8os16ws1(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed 16-bit input,
|
||||
* 1-bit signed output, and binary weights.
|
||||
* @ingroup layers_dense_dqnn
|
||||
* @param layer template layer as an opaque pointer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_is16os1ws1(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed 16-bit input,
|
||||
* 1-bit signed output, and binary weights.
|
||||
* The BN is fused, i.e., the layer requires weights, scale, and offset, where
|
||||
* weights are those of the dense layer, scale is that of the BN, and the offset
|
||||
* corresponds to dense bias * bn scale + bn offset. If the parameters do not
|
||||
* agree with such convention, the behavior is undefined.
|
||||
* @ingroup layers_dense_dqnn
|
||||
* @param layer template layer as an opaque pointer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_is16os1ws1_bn(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed 16-bit input,
|
||||
* 8-bit signed output, and binary weights.
|
||||
* @ingroup layers_dense_dqnn
|
||||
* @param layer template layer as an opaque pointer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_is16os8ws1(ai_layer* layer);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed 16-bit input,
|
||||
* 16-bit signed output, and binary weights.
|
||||
* @ingroup layers_dense_dqnn
|
||||
* @param layer template layer as an opaque pointer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_is16os16ws1(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed 16-bit input,
|
||||
* f32 output, and binary weights.
|
||||
* @ingroup layers_dense_dqnn
|
||||
* @param layer template layer as an opaque pointer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_is16of32ws1(ai_layer* layer);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed 16-bit input,
|
||||
* f32 output, and binary weights.
|
||||
* The BN is fused, i.e., the layer requires weights, scale, and offset, where
|
||||
* weights are those of the dense layer, scale is that of the BN, and the offset
|
||||
* corresponds to dense bias * bn scale + bn offset. If the parameters do not
|
||||
* agree with such convention, the behavior is undefined.
|
||||
* @ingroup layers_dense_dqnn
|
||||
* @param layer template layer as an opaque pointer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_is16of32ws1_bn(ai_layer* layer);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed f32 input,
|
||||
* 1-bit signed output, and binary weights.
|
||||
* @ingroup layers_dense_dqnn
|
||||
* @param layer template layer as an opaque pointer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_if32os1ws1(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed f32 input,
|
||||
* 1-bit signed output, and binary weights.
|
||||
* The BN is fused, i.e., the layer requires weights, scale, and offset, where
|
||||
* weights are those of the dense layer, scale is that of the BN, and the offset
|
||||
* corresponds to dense bias * bn scale + bn offset. If the parameters do not
|
||||
* agree with such convention, the behavior is undefined.
|
||||
* @ingroup layers_dense_dqnn
|
||||
* @param layer template layer as an opaque pointer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_if32os1ws1_bn(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed f32 input,
|
||||
* 8-bit signed output, and binary weights.
|
||||
* @ingroup layers_dense_dqnn
|
||||
* @param layer template layer as an opaque pointer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_if32os8ws1(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed f32 input,
|
||||
* 16-bit signed output, and binary weights.
|
||||
* @ingroup layers_dense_dqnn
|
||||
* @param layer template layer as an opaque pointer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_if32os16ws1(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed f32 input,
|
||||
* f32 output, and binary weights.
|
||||
* @ingroup layers_dense_dqnn
|
||||
* @param layer template layer as an opaque pointer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_if32of32ws1(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed f32 input,
|
||||
* f32 output, and binary weights.
|
||||
* The BN is fused, i.e., the layer requires weights, scale, and offset, where
|
||||
* weights are those of the dense layer, scale is that of the BN, and the offset
|
||||
* corresponds to dense bias * bn scale + bn offset. If the parameters do not
|
||||
* agree with such convention, the behavior is undefined.
|
||||
* @ingroup layers_dense_dqnn
|
||||
* @param layer template layer as an opaque pointer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_if32of32ws1_bn(ai_layer* layer);
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*LAYERS_DENSE_DQNN_H*/
|
||||
57
lib/stai/libstai/include/layers_formats_converters.h
Normal file
57
lib/stai/libstai/include/layers_formats_converters.h
Normal file
@ -0,0 +1,57 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_formats_converters.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @brief header file of formats converters layers
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2021 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LAYERS_FORMATS_CONVERTERS_H
|
||||
#define LAYERS_FORMATS_CONVERTERS_H
|
||||
|
||||
#include "layers_common.h"
|
||||
|
||||
/*!
|
||||
* @defgroup layers_formats_converters Formats Converters Layers Definition
|
||||
* @brief this group implements formats converter layers (cast, etc.)
|
||||
*
|
||||
*/
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_cast
|
||||
* @ingroup layers_formats_converters
|
||||
* @brief C Implementation of cast layer
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_cast_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_array_format to_format; /*!< cast output format */
|
||||
} ai_layer_cast;
|
||||
|
||||
|
||||
/*****************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/*****************************************************************************/
|
||||
|
||||
/*!
|
||||
* @brief forward function for cast layer.
|
||||
* @ingroup layers_
|
||||
* @param layer template layer as an opaque pointer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_cast(ai_layer* layer);
|
||||
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*LAYERS_FORMATS_CONVERTERS_H*/
|
||||
809
lib/stai/libstai/include/layers_generic.h
Normal file
809
lib/stai/libstai/include/layers_generic.h
Normal file
@ -0,0 +1,809 @@
|
||||
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_generic.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @brief header file of AI platform generic layers datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2018 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LAYERS_GENERIC_H
|
||||
#define LAYERS_GENERIC_H
|
||||
|
||||
#include "layers_common.h"
|
||||
|
||||
typedef enum {
|
||||
KTfLiteNone = 0,
|
||||
KTfLiteActRelu,
|
||||
KTfLiteActRelu1,
|
||||
KTfLiteActRelu6,
|
||||
KTfLiteActTanh,
|
||||
KTfLiteActSignBit,
|
||||
KTfLiteActSigmoid
|
||||
} ai_tflitefused_activation;
|
||||
|
||||
/*!
|
||||
* @defgroup layers_generic Generic Layers Definitions
|
||||
* @brief definition
|
||||
*
|
||||
*/
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_time_delay
|
||||
* @ingroup layers_generic
|
||||
* @brief TimeDelay layer with sparse kernel
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_time_delay_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
AI_CONST ai_array* mask; /*!< sparse filter mask */
|
||||
} ai_layer_time_delay;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_split
|
||||
* @ingroup layers_generic
|
||||
* @brief Split layer definition
|
||||
*
|
||||
* This layer defines the params of a splitting layer. It is intended to be used
|
||||
* by his associated forward function @ref forward_split
|
||||
*/
|
||||
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_split_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
const ai_i32 outer_elems;
|
||||
const ai_i32 outer_elems_stride;
|
||||
} ai_layer_split;
|
||||
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_topK
|
||||
* @ingroup layers_generic
|
||||
* @brief topK layer definition
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_topK_{
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_i16 axis;
|
||||
ai_i16 largest;
|
||||
} ai_layer_topK;
|
||||
|
||||
typedef AI_ALIGNED_TYPE(struct,4)ai_layer_svdf_{
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_size rank;
|
||||
ai_tflitefused_activation activation;
|
||||
|
||||
} ai_layer_svdf;
|
||||
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_slice
|
||||
* @ingroup layers_generic
|
||||
* @brief Slice layer definition
|
||||
*
|
||||
* This layer defines the params of a slicing layer. It is intended to be used
|
||||
* by his associated forward function @ref forward_slice
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_slice_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
AI_CONST ai_array* axes; /*!< Axes that 'starts' and 'ends' apply to. It's optional*/
|
||||
AI_CONST ai_array* starts; /*!< Starting indices of corrisponding axis in axes*/
|
||||
AI_CONST ai_array* ends; /*!< Ending indices (exclusive) of corrisponding axis in axes*/
|
||||
} ai_layer_slice;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_gather
|
||||
* @ingroup layers_generic
|
||||
* @brief Gather layer definition
|
||||
*
|
||||
* This layer defines the params of a gathering layer. It is intended to be used
|
||||
* by his associated forward function @ref forward_gather
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_gather_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_i16 axis; /*!< Which axis to gather on It's optional*/
|
||||
ai_tensor* indices; /*!< Indices of corrisponding axis in axes*/
|
||||
} ai_layer_gather;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_gather_nd
|
||||
* @ingroup layers_generic
|
||||
* @brief GatherND layer definition
|
||||
*
|
||||
* This layer defines the params of a gathering layer (ND). It is intended to be used
|
||||
* by his associated forward function @ref forward_gather_nd
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_gather_nd_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_tensor* indices; /*!< Indices of corrisponding slices of inputs*/
|
||||
} ai_layer_gather_nd;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_tile
|
||||
* @ingroup layers generic
|
||||
* @brief Tile layer definition
|
||||
*
|
||||
* This layer defines the param of an tile layer. It constructs a tensor by tiling a
|
||||
* given tensor. It is intended to be used by its associated forward function
|
||||
* @ref forward_upsample
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_tile_{
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
AI_CONST ai_array* repeats; /*!< numbers of repeated copies along each dimension */
|
||||
} ai_layer_tile;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_shape
|
||||
* @ingroup layers generic
|
||||
* @brief Shape layer definition
|
||||
*
|
||||
* This layer defines the param of a shape layer. It returns the shape of the
|
||||
* input tensor. It is intended to be used by its associated forward function
|
||||
* @ref forward_shape
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_shape_{
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
} ai_layer_shape;
|
||||
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_upsample
|
||||
* @ingroup layers generic
|
||||
* @brief Upsample layer definition
|
||||
*
|
||||
* This layer defines the param of an upsampling layer. It overloads its params
|
||||
* to allow zeros upsampling, helpful traspose convolutions, for instance.
|
||||
* It is intended to be used by its associated forward function @ref forward_upsample
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_upsample_{
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_upsample_mode mode; /*!< upsample mode */
|
||||
ai_bool center; /*!< center pixels */
|
||||
AI_CONST ai_array* scales; /*!< scale array along each dimension */
|
||||
ai_nearest_mode nearest_mode; /*!< used in nearest mode */
|
||||
} ai_layer_upsample;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_resize
|
||||
* @ingroup layers generic
|
||||
* @brief Resize layer definition
|
||||
*
|
||||
* This layer defines the param of a resize layer.
|
||||
* It is intended to be used by its associated forward function @ref forward_resize
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_resize_{
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
|
||||
ai_coord_transf_mode coord_transf_mode; /*!< coordinate tranformation mode */
|
||||
ai_float cubic_coeff_a; /*!< the coefficient 'a' used in cubic interpolation */
|
||||
ai_bool exclude_outside; /*!< exclude outside pixels flag */
|
||||
ai_float extrapol_val; /*!< used in tf_crop_and_resize cas */
|
||||
ai_resize_mode mode; /*!< resize mode */
|
||||
ai_nearest_mode nearest_mode; /*!< used in nearest mode */
|
||||
AI_CONST ai_array* scales; /*!< scale array along each dimension */
|
||||
AI_CONST ai_array* roi; /*!< roi array, used in tf_crop_and_resize case */
|
||||
} ai_layer_resize;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_instanceNormalization
|
||||
* @ingroup layers generic
|
||||
* @brief instance normalization layer definition
|
||||
*
|
||||
* This layer defines the params of an instance normalization layer.
|
||||
* It is intended to be used by its associated forward function @ref forward_instanceNormalization
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_instanceNormaization_{
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_float eps; /*!< epsilon value, to avoid by zero division */
|
||||
} ai_layer_instanceNormalization;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_mode
|
||||
* @ingroup layers generic
|
||||
* @brief Pad layer definition
|
||||
*
|
||||
* This layer defines the param of an pad layer. It pad a tensor.
|
||||
* It is intended to be used by its associated forward function @ref forward_pad
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_pad_{
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_pad_mode mode; /*!< pad mode */
|
||||
ai_shape pads; /*!< Number of padding to add or remove at the beginning and end of each axis */
|
||||
const ai_array* value; /*!< Indicates the value to be filled */
|
||||
} ai_layer_pad;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_mode
|
||||
* @ingroup layers generic
|
||||
* @brief ConstantOfShape layer definition
|
||||
*
|
||||
* This layer defines the param of an constantofshape layer. It constantofshape a tensor.
|
||||
* It is intended to be used by its associated forward function @ref forward_constantofshape
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_constantofshape_{
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
const ai_array* value; /*!< Indicates the value to be filled */
|
||||
} ai_layer_constantofshape;
|
||||
/*!
|
||||
* @struct ai_layer_add
|
||||
* @ingroup layers_generic
|
||||
* @brief Add layer definition
|
||||
*
|
||||
* This layer defines the params of an add layer.
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_add_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_u16 in_layers_count; /*!< number of input layers to concat */
|
||||
ai_u16 in_layer_curr; /*!< current layer to concat */
|
||||
ai_tensor** in_tensors; /*!< input tensors list (if NULL==no copy) */
|
||||
ai_tensor* out_tensor; /*!< output tensor (if NULL==no copy) */
|
||||
func_copy_tensor copy_to_out_tensor; /*!< pointer to copy tensor func
|
||||
(NULL = no copy) */
|
||||
ai_layer_base* split_layer; /*!< pointer to associated split layer */
|
||||
ai_layer_base* next_layer; /*!< pointer to next layer to process */
|
||||
} ai_layer_add;
|
||||
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_argminmax_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_i16 axis;
|
||||
ai_i16 select_last_index;
|
||||
} ai_layer_argminmax;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_transpose
|
||||
* @ingroup layers_generic
|
||||
* @brief Transpose layer datastruct declaration. This defines the params of a
|
||||
* transpose layer. It is intended to be used by his associated forward function
|
||||
* @ref forward_transpose
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_transpose_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_shape out_mapping; /*!< transpose output mapping order. I.e. tt is a
|
||||
permutation of the input tensor shape */
|
||||
} ai_layer_transpose;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_transpose_batch
|
||||
* @ingroup layers_generic
|
||||
* @brief Transpose batch layer datastruct declaration. This defines the params of a
|
||||
* transpose layer. It is intended to be used by his associated forward function
|
||||
* @ref forward_transpose_batch
|
||||
*/
|
||||
typedef ai_layer_base ai_layer_transpose_batch;
|
||||
|
||||
|
||||
#define AI_TIME_DISTRIBUTED_AXIS (AI_SHAPE_HEIGHT)
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_time_distributed
|
||||
* @ingroup layers_generic
|
||||
* @brief Time distributed layer datastruct declaration. This defines the params
|
||||
* of a time distributed layer. It is intended to be used by his associated
|
||||
* forward function @ref forward_time_distributed
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_time_distributed_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_layer_base* inner_layer; /*!< inner layer to process */
|
||||
} ai_layer_time_distributed;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_concat
|
||||
* @ingroup layers_generic
|
||||
* @brief Concatenation layer
|
||||
*
|
||||
* Concat Layer.
|
||||
* It is a sequential layer. see @ref ai_layer_sequential
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_concat_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_shape_dimension axis; /*!< which axis to concatenate on */
|
||||
} ai_layer_concat;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_pack
|
||||
* @ingroup layers_generic
|
||||
* @brief pack layer
|
||||
*
|
||||
* Pack Layer.
|
||||
* It is a sequential layer. see @ref ai_layer_sequential
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_pack_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_shape_dimension axis; /*!< which axis to concatenate on */
|
||||
} ai_layer_pack;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_unpack
|
||||
* @ingroup layers_generic
|
||||
* @brief unpack layer
|
||||
*
|
||||
* Unpack Layer.
|
||||
* It is a sequential layer. see @ref ai_layer_sequential
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_unpack_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_shape_dimension axis; /*!< which axis to concatenate on */
|
||||
} ai_layer_unpack;
|
||||
|
||||
typedef void (*func_binary)(ai_handle out,const ai_handle a, const ai_handle b);
|
||||
typedef void (*func_buffer_binary)(ai_handle out,const ai_handle a, const ai_handle b, const ai_size loop);
|
||||
typedef void (*func_buffer_binary_integer)(ai_handle out,const ai_handle a, const ai_handle b, const ai_size loop,
|
||||
const ai_handle scale1, const ai_handle zp1, const ai_handle scale2, const ai_handle zp2,
|
||||
const ai_handle scaleout, const ai_handle zpout, const ai_i32 scalar_op);
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_eltwise
|
||||
* @ingroup layers_generic
|
||||
* @brief General element-wise transformation layer
|
||||
*
|
||||
* Elementwise Layer.
|
||||
* It is a sequential layer. see @ref ai_layer_sequential
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_eltwise_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
func_binary operation; /*!< operation to apply elementwise */
|
||||
func_buffer_binary buffer_operation; /*!< operation to apply elementwise */
|
||||
} ai_layer_eltwise;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_eltwise_integer
|
||||
* @ingroup layers_generic
|
||||
* @brief General element-wise transformation layer for integer data
|
||||
*
|
||||
* Elementwise Layer.
|
||||
* It is a sequential layer. see @ref ai_layer_sequential
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_eltwise_integer_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
func_binary operation; /*!< operation to apply elementwise */
|
||||
func_buffer_binary_integer buffer_operation; /*!< operation to apply elementwise */
|
||||
} ai_layer_eltwise_integer;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_scatter_nd
|
||||
* @ingroup layers_generic
|
||||
* @brief ScatterND layer definition
|
||||
*
|
||||
* This layer defines the params of a scattering layer (ND). It is intended to be used
|
||||
* by his associated forward function @ref forward_scatter_nd
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_scatter_nd_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_tensor* indices; /*!< Indices of corrisponding slices of inputs*/
|
||||
ai_tensor* updates; /*!< Updates of corrisponding slices of inputs*/
|
||||
func_binary operation; /*!< operation to apply elementwise */
|
||||
ai_scatter_nd_reduction reduction; /*!< Reduction operation in ScatterND layer*/
|
||||
} ai_layer_scatter_nd;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_reduce
|
||||
* @ingroup layers_generic
|
||||
* @brief General dimension reduction layer
|
||||
*
|
||||
* reduction Layer.
|
||||
* It is a sequential layer. see @ref ai_layer_sequential
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_reduce_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
const ai_array* neutral_value; /*!< Initialization value for operation */
|
||||
func_binary operation; /*!< operation to apply elementwise */
|
||||
} ai_layer_reduce;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_reduce_log_sum_exp
|
||||
* @ingroup layers_generic
|
||||
* @brief General dimension reduction layer
|
||||
*
|
||||
* reduction Layer.
|
||||
* It is a sequential layer. see @ref ai_layer_sequential
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_reduce_log_sum_exp_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_shape_dimension axis;
|
||||
} ai_layer_reduce_log_sum_exp;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_reduce l1
|
||||
* @ingroup layers_generic
|
||||
* @brief General dimension reduction layer
|
||||
*
|
||||
* reduction Layer.
|
||||
* It is a sequential layer. see @ref ai_layer_sequential
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_reduce_l1_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
AI_CONST ai_array* axes;
|
||||
} ai_layer_reduce_l1;
|
||||
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_reduce l2
|
||||
* @ingroup layers_generic
|
||||
* @brief General dimension reduction layer
|
||||
*
|
||||
* reduction Layer.
|
||||
* It is a sequential layer. see @ref ai_layer_sequential
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_reduce_l2_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
AI_CONST ai_array* axes;
|
||||
} ai_layer_reduce_l2;
|
||||
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_where
|
||||
* @ingroup layers generic
|
||||
* @brief Where layer definition
|
||||
*
|
||||
* This layer operates on 3 input tensors: condition, X and Y.
|
||||
* It return elements, either from X or Y, depending on condition
|
||||
* (with Numpy-style broadcasting support).
|
||||
* @ref forward_where
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_where_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
const ai_array *shapes_len;
|
||||
ai_bool channel_first;
|
||||
} ai_layer_where;
|
||||
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_reverse
|
||||
* @ingroup layers_reverse
|
||||
* @brief Reverse layer
|
||||
*
|
||||
* The type of reverse function is handled by the specific forward function
|
||||
* @ref forward_svm_regressor
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_reverse_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_i32 axis; /*!< selected axis to perform the operation */
|
||||
} ai_layer_reverse;
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/******************************************************************************/
|
||||
|
||||
/*!
|
||||
* @brief Dummy forward routine with no processing.
|
||||
* @ingroup layers_generic
|
||||
* @param generic layer handle
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_nop(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a TimeDelay layer.
|
||||
* @ingroup layers_generic
|
||||
* @param layer the time delay layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_time_delay(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Split network computation in N parallel branches.
|
||||
* @ingroup layers_generic
|
||||
* @param layer the split layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_split(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Add network computation from N parallel branches.
|
||||
* @ingroup layers_generic
|
||||
* @param layer the add layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_add(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Compute the indices of the max elements of the input tensor's element along the provided axis.
|
||||
* @ingroup layers_generic
|
||||
* @param layer argminmax layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_argmax(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Compute the indices of the min elements of the input tensor's element along the provided axis.
|
||||
* @ingroup layers_generic
|
||||
* @param layer argminmax layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_argmin(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Svdf layer.
|
||||
* @ingroup layers_generic
|
||||
* @param layer svdf layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_svdf(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Transpose a tensor along a pivot and save transposed values into an output
|
||||
* tensor
|
||||
* @ingroup layers_generic
|
||||
* @param layer the transpose layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_transpose(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Transpose batch and save transposed values of a determinate batch into an output
|
||||
* tensor
|
||||
* @ingroup layers_generic
|
||||
* @param layer the transpose batch layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_transpose_batch(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief TimeDistrubuted forward layer function. This forward function
|
||||
* implements the timedistributed layer.
|
||||
* @ingroup layers_generic
|
||||
* @param layer the time distributed layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_time_distributed(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Packing a list of tensors in a single tensor
|
||||
* @ingroup layers generic
|
||||
* @param layer the packing layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_pack(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Unpacking a single of tensors in a list tensor
|
||||
* @ingroup layers generic
|
||||
* @param layer the unpacking layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_unpack(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Concatenates a list of tensors into a single tensor.
|
||||
* @ingroup layers_generic
|
||||
* @param layer the concatenation layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_concat(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Gather an input tensor
|
||||
* @ingroup layers_generic
|
||||
* @param layer the gathered layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_gather(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief GatherND an input tensor
|
||||
* @ingroup layers_generic
|
||||
* @param layer the gathered layer (ND)
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_gather_nd(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief GatherND channel first an input tensor
|
||||
* @ingroup layers_generic
|
||||
* @param layer the gathered layer (ND)
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_gather_nd_channel_first(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief ScatterND an input tensor
|
||||
* @ingroup layers_generic
|
||||
* @param layer the scattered layer (ND)
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_scatter_nd(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Slice an input tensors
|
||||
* @ingroup layers_generic
|
||||
* @param layer the sliced layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_slice(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Tile an input tensors
|
||||
* @ingroup layers_generic
|
||||
* @param layer the tiled layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_tile(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Returns the shape of an input tensors
|
||||
* @ingroup layers_generic
|
||||
* @param layer the Shape layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_shape(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief TopK an input tensors
|
||||
* @ingroup layers_generic
|
||||
* @param layer the Topked layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_topK(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Pad an input tensors
|
||||
* @ingroup layers_generic
|
||||
* @param layer the pad layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_pad(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief ConstantofShape an input tensors
|
||||
* @ingroup layers_generic
|
||||
* @param layer the constantofshape layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_constantofshape(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Upsample an input tensors
|
||||
* @ingroup layers_generic
|
||||
* @param layer the upsampled layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_upsample(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Resize an input tensors
|
||||
* @ingroup layers_generic
|
||||
* @param layer the resized layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_resize(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Instance Normalization on an input tensors
|
||||
* @ingroup layers_generic
|
||||
* @param layer the instance normalization layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_instanceNormalization(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Apply an elementwise transformation to the input tensors
|
||||
* @ingroup layers_generic
|
||||
* @param layer the elementwise layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_eltwise(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Apply an elementwise transformation to the integer input tensors
|
||||
* @ingroup layers_generic
|
||||
* @param layer the elementwise layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_eltwise_integer(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Apply an elementwise transformation to the signed integer input tensors
|
||||
* @ingroup layers_generic
|
||||
* @param layer the elementwise layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_eltwise_integer_INT8(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Apply an elementwise transformation to the unsigned integer input tensors
|
||||
* @ingroup layers_generic
|
||||
* @param layer the elementwise layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_eltwise_integer_UINT8(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Apply a reduce transformation to the input tensors
|
||||
* @ingroup layers_generic
|
||||
* @param layer the reduce layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_reduce(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Apply a reduce transformation to the input tensors
|
||||
* @ingroup layers_generic
|
||||
* @param layer the reduce layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_reduce_log_sum_exp(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Apply a reduce transformation to the input tensors
|
||||
* @ingroup layers_generic
|
||||
* @param layer the reduce layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_reduce_l1(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Apply a reduce transformation to the input tensors
|
||||
* @ingroup layers_generic
|
||||
* @param layer the reduce layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_reduce_l2(ai_layer* layer);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Behave like numpy.where with Numpy-style broadcasting support
|
||||
* @ingroup layers_generic
|
||||
* @param layer the where layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_where(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Apply an elementwise addition to the input tensors
|
||||
* @ingroup layers_generic
|
||||
* @param layer the elementwise layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_add_integer(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Apply an elementwise addition to the input tensors
|
||||
* with int8 I/O
|
||||
* @ingroup layers_generic
|
||||
* @param layer the elementwise layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_add_integer_INT8(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Apply an elementwise addition to the input tensors
|
||||
* with uint8 I/O
|
||||
* @ingroup layers_generic
|
||||
* @param layer the elementwise layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_add_integer_UINT8(ai_layer* layer);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Reverse layer.
|
||||
* @ingroup layers_generic
|
||||
* @param layer reverse layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_reverse(ai_layer *pLayer);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Upsample an input tensors with unsigned 8-bit integer input,.
|
||||
* It is to be used also for other formats, since the function only
|
||||
* performs memory copy.
|
||||
* @ingroup layers_generic
|
||||
* @param layer the upsampled layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_upsample_generic(ai_layer* layer);
|
||||
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*LAYERS_GENERIC_H*/
|
||||
51
lib/stai/libstai/include/layers_generic_dqnn.h
Normal file
51
lib/stai/libstai/include/layers_generic_dqnn.h
Normal file
@ -0,0 +1,51 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_generic_dqnn.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform DQNN generic datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2021 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LAYERS_GENERIC_DQNN_H
|
||||
#define LAYERS_GENERIC_DQNN_H
|
||||
|
||||
#include "layers_common.h"
|
||||
#include "layers_generic.h"
|
||||
|
||||
/*!
|
||||
* @defgroup layers_generic_dqnn Layers Definitions
|
||||
* @brief definition
|
||||
*
|
||||
*/
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/******************************************************************************/
|
||||
|
||||
/*!
|
||||
* @brief Handles concat with binary input, binary output and
|
||||
* binary weights
|
||||
* @ingroup layers_generic_dqnn
|
||||
* @param layer concat layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_concat_is1os1(ai_layer *pLayer);
|
||||
|
||||
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*LAYERS_GENERIC_DQNN_H*/
|
||||
168
lib/stai/libstai/include/layers_list.h
Normal file
168
lib/stai/libstai/include/layers_list.h
Normal file
@ -0,0 +1,168 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_list.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @brief header file of AI platform layers datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2018-2024 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
|
||||
/* No sentry. This is deliberate!! */
|
||||
/* Template: LAYER_ENTRY(type_, id_, struct_, forward_func_, init_func_, destroy_func_)
|
||||
* Where:
|
||||
* - type_ is the (enum) type name of the layer. to have the complete enum
|
||||
* value you should use the macro @ref AI_LAYER_TYPE_ENTRY(type_) that adds
|
||||
* the specific prefix and postfix tokens to the type_
|
||||
* - id_ is the numeric id of the layer
|
||||
* - struct_ is the name of the datastruct of the layer without the ai_layer_
|
||||
* prefix
|
||||
* - forward_func_ is the forward function name of the routine implementing
|
||||
* actual layer processing
|
||||
* - init_func_ is the init function name of the routine implementing
|
||||
* actual layer initialization
|
||||
* - destroy_func_ is the destroy function name of the routine implementing
|
||||
* actual layer de-initialization
|
||||
*/
|
||||
|
||||
/* Layer IDs for stateless layers (bit 8 set) */
|
||||
#define LAYER_ID(id_) \
|
||||
(0x100 + (id_))
|
||||
/* Layer IDs for stateful layers (bits 7 and 8 set) */
|
||||
#define LAYER_STATEFUL_ID(id_) \
|
||||
(0x180 + (id_))
|
||||
|
||||
/*!< Base layer */
|
||||
LAYER_ENTRY(BASE, LAYER_ID(0), base, NULL, NULL, NULL)
|
||||
/*!< Elementwise addition layer */
|
||||
LAYER_ENTRY(ADD, LAYER_ID(1), add, forward_add, NULL, NULL)
|
||||
/*!< Batch normalization layer */
|
||||
LAYER_ENTRY(BN, LAYER_ID(2), bn, forward_bn, NULL, NULL)
|
||||
/*!< 2D Convolutional layer */
|
||||
LAYER_ENTRY(CONV2D, LAYER_ID(3), conv2d, forward_conv2d, NULL, NULL)
|
||||
/*!< Dense layer */
|
||||
LAYER_ENTRY(DENSE, LAYER_ID(4), dense, forward_dense, NULL, NULL)
|
||||
/*!< Local Response Normalization layer */
|
||||
LAYER_ENTRY(LRN, LAYER_ID(6), lrn, forward_lrn, NULL, NULL)
|
||||
/*!< Nonlinearity layer */
|
||||
LAYER_ENTRY(NL, LAYER_ID(7), nl, NULL, NULL, NULL)
|
||||
/*!< Normalization layer */
|
||||
LAYER_ENTRY(NORM, LAYER_ID(8), norm, forward_norm, NULL, NULL)
|
||||
/*!< Merged Conv2d / Pool layer */
|
||||
LAYER_ENTRY(OPTIMIZED_CONV2D, LAYER_ID(9), conv2d_nl_pool, forward_conv2d_nl_pool, NULL, NULL)
|
||||
/*!< Transpose Tensor layer */
|
||||
LAYER_ENTRY(TRANSPOSE, LAYER_ID(10), transpose, forward_transpose, NULL, NULL)
|
||||
/*!< Pooling layer */
|
||||
LAYER_ENTRY(POOL, LAYER_ID(11), pool, forward_pool, NULL, NULL)
|
||||
/*!< Softmax layer */
|
||||
LAYER_ENTRY(SM, LAYER_ID(12), sm, forward_sm, NULL, NULL)
|
||||
/*!< Split layer */
|
||||
LAYER_ENTRY(SPLIT, LAYER_ID(13), split, forward_split, NULL, NULL)
|
||||
/*!< TimeDelay layer */
|
||||
LAYER_ENTRY(TIME_DELAY, LAYER_ID(14), time_delay, forward_time_delay, NULL, NULL)
|
||||
/*!< TimeDistributed layer */
|
||||
LAYER_ENTRY(TIME_DISTRIBUTED, LAYER_ID(15), time_distributed, forward_time_distributed, NULL, NULL)
|
||||
/*!< Concat Tensor layer */
|
||||
LAYER_ENTRY(CONCAT, LAYER_ID(16), concat, forward_concat, NULL, NULL)
|
||||
/*!< GEMM layer */
|
||||
LAYER_ENTRY(GEMM, LAYER_ID(17), gemm, forward_gemm, NULL, NULL)
|
||||
/*!< Upsample layer */
|
||||
LAYER_ENTRY(UPSAMPLE, LAYER_ID(18), upsample, forward_upsample, NULL, NULL)
|
||||
/*!< Container layer for eltwise operations */
|
||||
LAYER_ENTRY(ELTWISE, LAYER_ID(19), eltwise, forward_eltwise, NULL, NULL)
|
||||
/*!< Container layer for eltwise integer operations */
|
||||
LAYER_ENTRY(ELTWISE_INTEGER, LAYER_ID(20), eltwise_integer, NULL, NULL, NULL)
|
||||
/*!< InstanceNormalization layer */
|
||||
LAYER_ENTRY(INSTANCENORMALIZATION, LAYER_ID(21), instanceNormalization, forward_instanceNormalization, NULL, NULL)
|
||||
/*!< Pad layer */
|
||||
LAYER_ENTRY(PAD, LAYER_ID(22), pad, forward_pad, NULL, NULL)
|
||||
/*!< Slice layer */
|
||||
LAYER_ENTRY(SLICE, LAYER_ID(23), slice, forward_slice, NULL, NULL)
|
||||
/*!< Tile layer */
|
||||
LAYER_ENTRY(TILE, LAYER_ID(24), tile, forward_tile, NULL, NULL)
|
||||
/*!< Container layer for reduce operations */
|
||||
LAYER_ENTRY(REDUCE, LAYER_ID(25), reduce, forward_reduce, NULL, NULL)
|
||||
/*!< Recurrent Neural Network layer */
|
||||
LAYER_ENTRY(RNN, LAYER_ID(26), rnn, forward_rnn, NULL, NULL)
|
||||
/*!< Resize layer */
|
||||
LAYER_ENTRY(RESIZE, LAYER_ID(27), resize, forward_resize, NULL, NULL)
|
||||
/*!< Gather layer */
|
||||
LAYER_ENTRY(GATHER, LAYER_ID(28), gather, forward_gather, NULL, NULL)
|
||||
/*!< Pack layer */
|
||||
LAYER_ENTRY(PACK, LAYER_ID(29), pack, forward_pack, NULL, NULL)
|
||||
/*!< Unpack layer */
|
||||
LAYER_ENTRY(UNPACK, LAYER_ID(30), unpack, forward_unpack, NULL, NULL)
|
||||
/*!< ArgMin & ArgMax layers */
|
||||
LAYER_ENTRY(ARGMINMAX, LAYER_ID(31), argminmax, NULL, NULL, NULL)
|
||||
/*!< Cast Neural Network Layer */
|
||||
LAYER_ENTRY(CAST, LAYER_ID(33), cast, forward_cast, NULL, NULL)
|
||||
/*!< iForest layer */
|
||||
LAYER_ENTRY(IFOREST, LAYER_ID(34), iforest, forward_iforest, NULL, NULL)
|
||||
/*!< SVM Regressor layer */
|
||||
LAYER_ENTRY(SVMREG, LAYER_ID(35), svmreg, forward_svm_regressor, NULL, NULL)
|
||||
/*!< ArrayFeatureExtractor layer */
|
||||
LAYER_ENTRY(ARRAYFEATUREEXTRACTOR, LAYER_ID(36), arrayfeatureextractor, forward_arrayfeatureextractor, NULL, NULL)
|
||||
/*!< SVM Classifier (SVC) layer */
|
||||
LAYER_ENTRY(SVC, LAYER_ID(37), svc, forward_svc, NULL, NULL)
|
||||
/*!< ZipMap layer */
|
||||
LAYER_ENTRY(ZIPMAP, LAYER_ID(38), zipmap, forward_zipmap, NULL, NULL)
|
||||
/*!< Where layer */
|
||||
LAYER_ENTRY(WHERE, LAYER_ID(39), where, forward_where, NULL, NULL)
|
||||
/*!< LinearClassifier layer */
|
||||
LAYER_ENTRY(LINEARCLASSIFIER, LAYER_ID(40), linearclassifier, forward_linearclassifier, NULL, NULL)
|
||||
/*!< TreeEnsembleClassifier layer */
|
||||
LAYER_ENTRY(TREE_ENSEMBLE_CLASSIFIER, LAYER_ID(41), tree_ensemble_classifier, forward_tree_ensemble_classifier, NULL, NULL)
|
||||
/*!< TopK layer */
|
||||
LAYER_ENTRY(TOPK, LAYER_ID(42), topK, forward_topK, NULL, NULL)
|
||||
/*!< ReduceLogSumExp layer */
|
||||
LAYER_ENTRY(REDUCE_LOG_SUM_EXP, LAYER_ID(43), reduce_log_sum_exp, forward_reduce_log_sum_exp, NULL, NULL)
|
||||
/*!< ReduceL1 layer */
|
||||
LAYER_ENTRY(REDUCE_L1, LAYER_ID(44), reduce_l1, forward_reduce_l1, NULL, NULL)
|
||||
/*!< Runtime Lite Graph Wrapper layer */
|
||||
LAYER_ENTRY(LITE_GRAPH, LAYER_ID(45), lite_graph, NULL, NULL, NULL)
|
||||
/*!< TreeEnsembleRegressor layer */
|
||||
LAYER_ENTRY(TREE_ENSEMBLE_REGRESSOR, LAYER_ID(46), tree_ensemble_regressor, forward_tree_ensemble_regressor, NULL, NULL)
|
||||
/*!< GatherND layer */
|
||||
LAYER_ENTRY(GATHER_ND, LAYER_ID(67), gather_nd, forward_gather_nd, NULL, NULL)
|
||||
/*!< MATMUL layer */
|
||||
LAYER_ENTRY(MATMUL, LAYER_ID(68), matmul, forward_dmatmul, NULL, NULL)
|
||||
/*!< Deeply Quantized Dense Layers */
|
||||
LAYER_ENTRY(CONV2D_DQNN, LAYER_ID(48), conv2d_dqnn, forward_pw_is1os1ws1_bn, NULL, NULL)
|
||||
LAYER_ENTRY(POOL_DQNN, LAYER_ID(49), pool_dqnn, forward_maxpool_is1os1, NULL, NULL)
|
||||
|
||||
LAYER_ENTRY(DENSE_DQNN, LAYER_ID(50), dense_dqnn, forward_dense_is1os1ws1, NULL, NULL)
|
||||
/*!< Reverse layer */
|
||||
LAYER_ENTRY(REVERSE, LAYER_ID(51), reverse, forward_reverse, NULL, NULL)
|
||||
/*!< ScatterND layer */
|
||||
LAYER_ENTRY(SCATTER_ND, LAYER_ID(69), scatter_nd, forward_scatter_nd, NULL, NULL)
|
||||
|
||||
/*!< TFLite wrapper */
|
||||
LAYER_ENTRY(TFLITE_WRAPPER, LAYER_ID(52), tflite_wrapper, NULL, NULL, NULL)
|
||||
|
||||
/*****************************************************************************/
|
||||
/*!< Base Stateful Layer type */
|
||||
LAYER_ENTRY(STATEFUL, LAYER_STATEFUL_ID(0), stateful, NULL, NULL, NULL)
|
||||
/*!< Long Short Time Memory layer */
|
||||
LAYER_ENTRY(LSTM, LAYER_STATEFUL_ID(1), lstm, forward_lstm, init_lstm, destroy_lstm)
|
||||
/*!< Custom layer */
|
||||
LAYER_ENTRY(CUSTOM, LAYER_STATEFUL_ID(2), custom, NULL, NULL, NULL)
|
||||
/*!< Gated Recurrent Unit layer */
|
||||
LAYER_ENTRY(GRU, LAYER_STATEFUL_ID(3), gru, forward_gru, init_gru, destroy_gru)
|
||||
|
||||
/*!< Stateless Template layer declaration */
|
||||
/* LAYER_ENTRY(TEMPLATE, LAYER_ID(XX), template, forward_template, NULL, NULL) */
|
||||
/*!< Stateful Template layer declaration */
|
||||
/* LAYER_ENTRY(TEMPLATE, LAYER_STATEFUL_ID(XX), template, forward_template, init_template, destroy_template) */
|
||||
|
||||
#undef LAYER_ENTRY
|
||||
#undef LAYER_ID
|
||||
#undef LAYER_STATEFUL_ID
|
||||
48
lib/stai/libstai/include/layers_lite_graph.h
Normal file
48
lib/stai/libstai/include/layers_lite_graph.h
Normal file
@ -0,0 +1,48 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_lite_graph.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @brief header file of AI platform lite graph layers wrapper interface
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2021 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef LAYERS_LITE_GRAPH_H
|
||||
#define LAYERS_LITE_GRAPH_H
|
||||
|
||||
#include "core_common.h"
|
||||
|
||||
/*!
|
||||
* @defgroup layers_lite_graph Lite Graph Wrapper Definitions
|
||||
* @brief definition
|
||||
*
|
||||
*/
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_lite_graph
|
||||
* @ingroup layers_lite_graph
|
||||
* @brief Generic Lite Graph Layer Wrapper
|
||||
*
|
||||
* The type of lite graph is handled by the specific forward lite graph function.
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_lite_graph_ {
|
||||
AI_NODE_COMMON_FIELDS_DECLARE
|
||||
ai_handle* activations_map; /*!< array of pointers to shared activations memory pools */
|
||||
ai_handle* weights_map; /*!< array of pointers to shared weights memory pools */
|
||||
} ai_layer_lite_graph;
|
||||
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*LAYERS_LITE_GRAPH_H*/
|
||||
89
lib/stai/libstai/include/layers_ml.h
Normal file
89
lib/stai/libstai/include/layers_ml.h
Normal file
@ -0,0 +1,89 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_ml.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @brief header file of AI platform ml layers datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2021 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LAYERS_ML_H
|
||||
#define LAYERS_ML_H
|
||||
|
||||
#include "layers_common.h"
|
||||
|
||||
/*!
|
||||
* @defgroup layers_generic ML Layers Definitions
|
||||
* @brief definition
|
||||
*
|
||||
*/
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_ArrayFeatureExtractor
|
||||
* @ingroup layers_ml
|
||||
* @brief ai_layer_ArrayFeatureExtractor layer definition
|
||||
*
|
||||
* This layer select elements of the input tensor based on the indices passed. It is intended to be used
|
||||
* by his associated forward function @ref forward_arrayfeatureextractor
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_arrayfeatureextractor_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_tensor* indices; /*!< Indices of corrisponding axis in axes*/
|
||||
} ai_layer_arrayfeatureextractor;
|
||||
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_ZipMap
|
||||
* @ingroup layers_ml
|
||||
* @brief ai_layer_ZipMap layer definition
|
||||
*
|
||||
* This layer creates a map from the input and the attributes.
|
||||
* The values are provided by the input tensor, while the keys are specified by the attributes.
|
||||
* The user must provide keys in either classlabels_strings or classlabels_int64s (but not both).
|
||||
* The columns of the tensor correspond one-by-one to the keys specified by the attributes.
|
||||
* There must be as many columns as keys.
|
||||
* It is intended to be used by his associated forward function @ref forward_zipmap.
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_zipmap_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_bool has_classlabels_int;
|
||||
} ai_layer_zipmap;
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/******************************************************************************/
|
||||
|
||||
|
||||
/*!
|
||||
* @brief select elements of the input tensor based on the indices passed.
|
||||
* @ingroup layers_ml
|
||||
* @param layer array feture extractor
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_arrayfeatureextractor(ai_layer* layer);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief creates a map from the inputs and the attributes
|
||||
* @ingroup layers_ml
|
||||
* @param layer zipmap
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_zipmap(ai_layer* layer);
|
||||
|
||||
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*LAYERS_ML_H*/
|
||||
75
lib/stai/libstai/include/layers_ml_iforest.h
Normal file
75
lib/stai/libstai/include/layers_ml_iforest.h
Normal file
@ -0,0 +1,75 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_iforest.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform iForest layers datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2021 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef LAYERS_IFOREST_H
|
||||
#define LAYERS_IFOREST_H
|
||||
|
||||
#include "layers_common.h"
|
||||
|
||||
/*!
|
||||
* @defgroup layers_ml Layers Definitions
|
||||
* @brief definition
|
||||
*
|
||||
*/
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
|
||||
/* Allowed tests branch in the iTrees */
|
||||
typedef enum
|
||||
{
|
||||
AI_IFOREST_BRANCH_LT_IDX = 0,
|
||||
AI_IFOREST_BRANCH_LEQ_IDX,
|
||||
AI_IFOREST_BRANCH_EQ_IDX,
|
||||
AI_IFOREST_BRANCH_END,
|
||||
} ai_iforest_branch_e;
|
||||
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_iforest
|
||||
* @ingroup layers_iforest
|
||||
* @brief iForest layer
|
||||
*
|
||||
* The type of iforest function is handled by the specific forward function
|
||||
* @ref forward_iforest
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_iforest_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_float global_average_path_length; /*!< global average path length used to normalized average path length*/
|
||||
ai_float score_threshold; /*!< score threshold used to center the score around 0 */
|
||||
} ai_layer_iforest;
|
||||
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/******************************************************************************/
|
||||
|
||||
/*!
|
||||
* @brief Decodes the iforest ML algorithm.
|
||||
* @ingroup layers_iforest
|
||||
* @param layer iforest layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_iforest(ai_layer *pLayer);
|
||||
|
||||
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*LAYERS_IFOREST_H*/
|
||||
66
lib/stai/libstai/include/layers_ml_linearclassifier.h
Normal file
66
lib/stai/libstai/include/layers_ml_linearclassifier.h
Normal file
@ -0,0 +1,66 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_ml_linearclassifier.h
|
||||
* @author SRA
|
||||
* @brief header file of AI platform LinearClassifier datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2021 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LAYERS_LINEARCLASSIFIER_H
|
||||
#define LAYERS_LINEARCLASSIFIER_H
|
||||
|
||||
#include "layers_common.h"
|
||||
#include "layers_nl.h"
|
||||
|
||||
/*!
|
||||
* @defgroup layers_linearclassifier Layers Definitions
|
||||
* @brief definition
|
||||
*
|
||||
*/
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_linearclassifier
|
||||
* @ingroup layers_linearclassifier
|
||||
* @brief Linearclassifier layer
|
||||
*
|
||||
* The type of svmreg function is handled by the specific forward function
|
||||
* @ref forward_linearclassifier
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_linearclassifier_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
func_nl nl_func; /*!< function pointer to non linear transform */ \
|
||||
ai_bool multi_class; /*!< Indicates whether to do OvR or multinomial */
|
||||
ai_bool has_classlabels_int; /*!< if True, LinearClassifier returns classlabels int, else classlabels string */
|
||||
|
||||
} ai_layer_linearclassifier;
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/******************************************************************************/
|
||||
|
||||
/*!
|
||||
* @brief Decodes the LinearClassifier ML operator.
|
||||
* @ingroup layers_linaerclassifier
|
||||
* @param layer linear classifier layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_linearclassifier(ai_layer *pLayer);
|
||||
|
||||
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*LAYERS_LINEARCLASSIFIER_H*/
|
||||
77
lib/stai/libstai/include/layers_ml_svc.h
Normal file
77
lib/stai/libstai/include/layers_ml_svc.h
Normal file
@ -0,0 +1,77 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_svc.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @brief header file of AI platform SVM Classifier (SVC) datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2021 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef LAYERS_SVC_H
|
||||
#define LAYERS_SVC_H
|
||||
|
||||
#include "layers_common.h"
|
||||
|
||||
/*!
|
||||
* @defgroup layers_svc Layers Definitions
|
||||
* @brief definition
|
||||
*
|
||||
*/
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
|
||||
/* SVM classifier (SVC) kernel types */
|
||||
typedef enum ai_svc_kernel_e_ {
|
||||
AI_SVC_KERNEL_LINEAR = 0,
|
||||
AI_SVC_KERNEL_POLYNOMIAL,
|
||||
AI_SVC_KERNEL_RBF,
|
||||
AI_SVC_KERNEL_SIGMOID,
|
||||
AI_SVC_KERNEL_UNSUPPORTED
|
||||
} ai_svc_kernel_e;
|
||||
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_svc
|
||||
* @ingroup layers_svc
|
||||
* @brief SVM Classifier (SVC) layer
|
||||
*
|
||||
* The type of svc function is handled by the specific forward function
|
||||
* @ref forward_svc
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_svc_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_float gamma; /*!< kernel coefficient for rbf, polynomial and sigmoid functions */
|
||||
ai_float coef0; /*!< term in polynomial and sigmoid functions */
|
||||
ai_u32 degree; /*!< polynomial function degree */
|
||||
ai_svc_kernel_e kernel_type; /*!< kernel type : see ai_svm_kernel_e */
|
||||
ai_bool proba_support; /*!< whether or not use the parameters learned in Platt scaling */
|
||||
ai_bool has_classlabels_int; /*!< if True, SVC returns classlabels int, else classlabels string */
|
||||
} ai_layer_svc;
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/******************************************************************************/
|
||||
|
||||
/*!
|
||||
* @brief Decodes the SVM Classifier ML operator.
|
||||
* @ingroup layers_svc
|
||||
* @param layer svm classifier layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_svc(ai_layer *pLayer);
|
||||
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*LAYERS_SVC_H*/
|
||||
78
lib/stai/libstai/include/layers_ml_svmregressor.h
Normal file
78
lib/stai/libstai/include/layers_ml_svmregressor.h
Normal file
@ -0,0 +1,78 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_svmregressor.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform SVM Regressor datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2021 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef LAYERS_SVMREGRESSOR_H
|
||||
#define LAYERS_SVMREGRESSOR_H
|
||||
|
||||
#include "layers_common.h"
|
||||
|
||||
/*!
|
||||
* @defgroup layers_svmreg Layers Definitions
|
||||
* @brief definition
|
||||
*
|
||||
*/
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
|
||||
/* SVM regressor kernel types */
|
||||
typedef enum ai_svm_kernel_e_ {
|
||||
AI_SVMREG_KERNEL_LINEAR = 0,
|
||||
AI_SVMREG_KERNEL_POLYNOMIAL,
|
||||
AI_SVMREG_KERNEL_RBF,
|
||||
AI_SVMREG_KERNEL_SIGMOID,
|
||||
AI_SVMREG_KERNEL_UNSUPPORTED,
|
||||
} ai_svm_kernel_e;
|
||||
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_svmreg
|
||||
* @ingroup layers_svmreg
|
||||
* @brief SVM Regressor layer
|
||||
*
|
||||
* The type of svmreg function is handled by the specific forward function
|
||||
* @ref forward_svm_regressor
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_svmreg_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_float intercept; /*!< constant used in the decision function */
|
||||
ai_float gamma; /*!< kernel coefficient for rbf, polynomial and sigmoid functions */
|
||||
ai_float coef0; /*!< term in polynomial and sigmoid functions */
|
||||
ai_u32 degree; /*!< polynomial function degree */
|
||||
ai_svm_kernel_e kernel_type; /*!< kernel type : see ai_svm_kernel_e */
|
||||
} ai_layer_svmreg;
|
||||
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/******************************************************************************/
|
||||
|
||||
/*!
|
||||
* @brief Decodes the SVM Regressor ML operator.
|
||||
* @ingroup layers_svmreg
|
||||
* @param layer svm regressor layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_svm_regressor(ai_layer *pLayer);
|
||||
|
||||
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*LAYERS_SVMREGRESSOR_H*/
|
||||
108
lib/stai/libstai/include/layers_ml_treeensembleclassifier.h
Normal file
108
lib/stai/libstai/include/layers_ml_treeensembleclassifier.h
Normal file
@ -0,0 +1,108 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_ml_treeensembleclassifier.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform TreeEnsembleClassifier datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2021-2022 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef LAYERS_TREE_ENSEMBLE_CLASSIFIER_H
|
||||
#define LAYERS_TREE_ENSEMBLE_CLASSIFIER_H
|
||||
|
||||
#include "layers_common.h"
|
||||
#include "layers_nl.h"
|
||||
|
||||
|
||||
|
||||
/*!
|
||||
* @defgroup layers_ml_treensembleclassifier Layers Definitions
|
||||
* @brief definition
|
||||
*
|
||||
*/
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/* Error return codes */
|
||||
#define AI_TREE_ENSEMBLE_CLASSIFIER_ERROR_NO 0
|
||||
#define AI_TREE_ENSEMBLE_CLASSIFIER_ERROR_WRONG_IDX_FMT -1
|
||||
#define AI_TREE_ENSEMBLE_CLASSIFIER_ERROR_UNFOUND_LEAF -2
|
||||
#define AI_TREE_ENSEMBLE_CLASSIFIER_ERROR_UNSUPPORTED_BRANCH -3
|
||||
#define AI_TREE_ENSEMBLE_CLASSIFIER_ERROR_UNSUPPORTED_FEATURE -4
|
||||
|
||||
#define AI_TREE_ENSEMBLE_CLASSIFIER_DEPTH_MAX 10000
|
||||
|
||||
|
||||
/* Type of condition in the TreeEnsembleClassifier*/
|
||||
typedef enum
|
||||
{
|
||||
AI_TREE_ENSEMBLE_CLASSIFIER_BRANCH_LT_IDX = 0,
|
||||
AI_TREE_ENSEMBLE_CLASSIFIER_BRANCH_LEQ_IDX,
|
||||
AI_TREE_ENSEMBLE_CLASSIFIER_BRANCH_EQ_IDX,
|
||||
AI_TREE_ENSEMBLE_CLASSIFIER_BRANCH_END,
|
||||
} ai_tree_ensenble_classifier_branch_e;
|
||||
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_tree_ensemble_classifier_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
func_nl nl_func;
|
||||
uint8_t all_weights_are_positive;
|
||||
ai_float nodes_values_scale;
|
||||
ai_float nodes_values_offset;
|
||||
ai_float class_weights_scale;
|
||||
ai_float class_weights_offset;
|
||||
} ai_layer_tree_ensemble_classifier;
|
||||
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/******************************************************************************/
|
||||
|
||||
/*!
|
||||
* @brief Decodes the TreeEnsembleClassifier ML operator.
|
||||
* @ingroup layers_svmreg
|
||||
* @param layer tree ensemble classifier layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_tree_ensemble_classifier(ai_layer *pLayer);
|
||||
|
||||
AI_INTERNAL_API
|
||||
ai_i32 decodeEstimator_LEQ_8Bits(const ai_float *pDataIn,
|
||||
ai_float *pOutDataScores,
|
||||
const ai_u8 *pFeatureIdxForEstimator,
|
||||
const ai_float *pValuesForEstimator,
|
||||
const ai_u8 *pTrueIdxForEstimator,
|
||||
const ai_u8 *pFalseIdxForEstimator,
|
||||
const ai_handle pClassWeightsForEstimator,
|
||||
const ai_array_format classWeightsFormat,
|
||||
const ai_u8 *pClassNodeIdsForEstimator,
|
||||
const ai_u16 nbClassWithCurrentEstimator,
|
||||
const ai_u8 *pClassIdsForEstimator);
|
||||
|
||||
AI_INTERNAL_API
|
||||
ai_i32 decodeEstimator_LEQ_16Bits(const ai_float *pDataIn,
|
||||
ai_float *pOutDataScores,
|
||||
const ai_u8 *pFeatureIdxForEstimator,
|
||||
const ai_float *pValuesForEstimator,
|
||||
const ai_u16 *pTrueIdxForEstimator,
|
||||
const ai_u16 *pFalseIdxForEstimator,
|
||||
ai_handle pClassWeightsForEstimator,
|
||||
const ai_array_format classWeightsFormat,
|
||||
const ai_u16 *pClassNodeIdsForEstimator,
|
||||
const ai_u16 nbClassWithCurrentEstimator,
|
||||
const ai_u16 *pClassIdsForEstimator);
|
||||
|
||||
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*LAYERS_TREE_ENSEMBLE_CLASSIFIER_H*/
|
||||
59
lib/stai/libstai/include/layers_ml_treeensembleregressor.h
Normal file
59
lib/stai/libstai/include/layers_ml_treeensembleregressor.h
Normal file
@ -0,0 +1,59 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_svmregressor.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform SVM Regressor datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2021 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef LAYERS_TREE_ENSEMBLE_REGRESSOR_H
|
||||
#define LAYERS_TREE_ENSEMBLE_REGRESSOR_H
|
||||
|
||||
#include "layers_common.h"
|
||||
#include "layers_ml_treeensembleclassifier.h"
|
||||
#include "layers_nl.h"
|
||||
|
||||
/*!
|
||||
* @defgroup layers_svmreg Layers Definitions
|
||||
* @brief definition
|
||||
*
|
||||
*/
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_tree_ensemble_regressor_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
func_nl nl_func;
|
||||
uint8_t all_weights_are_positive;
|
||||
ai_float nodes_values_offset;
|
||||
ai_float nodes_values_scale;
|
||||
ai_float target_weights_offset;
|
||||
ai_float target_weights_scale;
|
||||
} ai_layer_tree_ensemble_regressor;
|
||||
|
||||
/******************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/******************************************************************************/
|
||||
|
||||
/*!
|
||||
* @brief Decodes the TreeEnsembleRegressor ML operator.
|
||||
* @ingroup layers_svmreg
|
||||
* @param layer tree ensemble regressor layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_tree_ensemble_regressor(ai_layer *pLayer);
|
||||
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*LAYERS_SVMREGRESSOR_H*/
|
||||
1130
lib/stai/libstai/include/layers_nl.h
Normal file
1130
lib/stai/libstai/include/layers_nl.h
Normal file
File diff suppressed because it is too large
Load Diff
205
lib/stai/libstai/include/layers_norm.h
Normal file
205
lib/stai/libstai/include/layers_norm.h
Normal file
@ -0,0 +1,205 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_norm.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @brief header file of AI platform normalization layers datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2018 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LAYERS_NORM_H
|
||||
#define LAYERS_NORM_H
|
||||
|
||||
#include "layers_common.h"
|
||||
|
||||
/*!
|
||||
* @defgroup layers_norm Normalization Layers Definitions
|
||||
* @brief definition
|
||||
*
|
||||
*/
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_bn
|
||||
* @ingroup layers_norm
|
||||
* @brief Batch normalization (scale with bias) layer
|
||||
*/
|
||||
typedef ai_layer_base ai_layer_bn;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_lrn
|
||||
* @ingroup layers_norm
|
||||
* @brief Local Response Normalization layer
|
||||
*
|
||||
* Divides each element by a scale factor computed
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_lrn_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_u32 local_size; /*!< size of the normalization window */
|
||||
ai_float k; /*!< bias term */
|
||||
ai_float alpha; /*!< input scale */
|
||||
ai_float beta; /*!< scale exponent */
|
||||
} ai_layer_lrn;
|
||||
|
||||
/*!
|
||||
* @enum ai_norm_type_e
|
||||
* @ingroup layers_norm
|
||||
* @brief store the type of normalization algorithm to apply
|
||||
*/
|
||||
typedef enum ai_norm_type_ {
|
||||
NONE = 0,
|
||||
L1 = 1,
|
||||
L2 = 2,
|
||||
MAX = 3,
|
||||
} ai_norm_type_e;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_norm
|
||||
* @ingroup layers_norm
|
||||
* @brief Lp Normalization layer
|
||||
*
|
||||
* Normalizes the tensor along the 'axis' direction using the Lp norm.
|
||||
* Optionally divides the result by the number of the elements.
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_norm_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_shape_idx axis; /*! normalization axis */
|
||||
ai_float exponent; /*!< normalization exponent p */
|
||||
ai_bool scale; /*!< multiplies by the pth root of the number of elements */
|
||||
ai_norm_type_e norm_type;
|
||||
} ai_layer_norm;
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Local response normalization computed on a float array
|
||||
* @ingroup layers_norm
|
||||
* @param out opaque handler to float output channel
|
||||
* @param in opaque handler to float input channel
|
||||
* @param pad amount of padding for the channels
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void func_lrn_array_f32(ai_handle out, const ai_handle in,
|
||||
const ai_size in_size, const ai_size channel_size,
|
||||
const ai_i32 pad, const ai_float k,
|
||||
const ai_float alpha, const ai_float beta);
|
||||
|
||||
/*!
|
||||
* @brief Lp normalization computed on a float array
|
||||
* @ingroup layers_norm
|
||||
* @param out opaque handler to float output channel
|
||||
* @param in opaque handler to float input channel
|
||||
* @param exponent p exponent for the Lp normalization
|
||||
* @param axis_stride stride (in array elements) of the normalization axis
|
||||
* @param axis_size size of the normalization axis
|
||||
* @param outer_size number of tensor slices (including the normalization axis)
|
||||
* on which compute the normalization
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void func_norm_array_f32(ai_handle out, const ai_handle in,
|
||||
const ai_float exponent,
|
||||
const ai_float norm,
|
||||
const ai_size axis_stride,
|
||||
const ai_size axis_size,
|
||||
const ai_size outer_size);
|
||||
|
||||
/*!
|
||||
* @brief Max normalization computed on float array
|
||||
* @ingroup layers_norm
|
||||
* @param out opaque handler to float output channel
|
||||
* @param in opaque handler to float input channel
|
||||
* @param axis_stride stride (in array elements) of the normalization axis
|
||||
* @param axis_size size of the normalization axis
|
||||
* @param outer_size number of tensor slices (including the normalization axis)
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void func_norm_max_array_f32(ai_handle out, const ai_handle in,
|
||||
const ai_float norm,
|
||||
const ai_size axis_size,
|
||||
const ai_size n_el);
|
||||
|
||||
/*!
|
||||
* @brief Fast L2 normalization computed on a float array
|
||||
* @ingroup layers_norm
|
||||
* @param out opaque handler to float output channel
|
||||
* @param in opaque handler to float input channel
|
||||
* @param axis_size size of the normalization axis
|
||||
* @param n_el total number of elements in the tensor
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void func_norm_l2_fast_array_f32(ai_handle out, const ai_handle in,
|
||||
const ai_float norm,
|
||||
const ai_size axis_size,
|
||||
const ai_size outer_size);
|
||||
|
||||
/*!
|
||||
* @brief Fast L1 normalization computed on a float array
|
||||
* @ingroup layers_norm
|
||||
* @param out opaque handler to float output channel
|
||||
* @param in opaque handler to float input channel
|
||||
* @param axis_size size of the normalization axis
|
||||
* @param n_el total number of elements in the tensor
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void func_norm_l1_fast_array_f32(ai_handle out, const ai_handle in,
|
||||
const ai_float norm,
|
||||
const ai_size axis_size,
|
||||
const ai_size n_el);
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/******************************************************************************/
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a batchnorm (scale + bias) layer.
|
||||
* @ingroup layers_norm
|
||||
* @param layer the batch normalization (bn) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_bn(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a batchnorm (scale + bias) layer with
|
||||
* integer format
|
||||
* @ingroup layers_norm
|
||||
* @param layer the batch normalization (bn) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_bn_integer(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a Local Response Normalization Layer.
|
||||
* @ingroup layers_norm
|
||||
* @param layer the local response normalization (lrn) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_lrn(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a normalization layer.
|
||||
* @ingroup layers_norm
|
||||
* @param layer the normalization (norm) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_norm(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Batch Normalization with 16-bit input, 16-bit threshold and binary output.
|
||||
* It is implemented using a threshold, and this is possible because the output is binary.
|
||||
* @param layer the batch normalization layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_bn_is16os1ws16(ai_layer *pLayer);
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*LAYERS_NORM_H*/
|
||||
49
lib/stai/libstai/include/layers_pad_dqnn.h
Normal file
49
lib/stai/libstai/include/layers_pad_dqnn.h
Normal file
@ -0,0 +1,49 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_pad_dqnn.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform DQNN padding datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2021 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LAYERS_PADDING_DQNN_H
|
||||
#define LAYERS_PADDING_DQNN_H
|
||||
|
||||
#include "layers_common.h"
|
||||
#include "layers_generic.h"
|
||||
|
||||
/*!
|
||||
* @defgroup layers_generic_dqnn Layers Definitions
|
||||
* @brief definition
|
||||
*
|
||||
*/
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/******************************************************************************/
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Handles padding with binary input and binary output
|
||||
* @ingroup layers_generic_dqnn
|
||||
* @param layer pad layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_pad_is1os1(ai_layer *pLayer);
|
||||
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*LAYERS_PADDING_DQNN_H*/
|
||||
71
lib/stai/libstai/include/layers_pad_generic.h
Normal file
71
lib/stai/libstai/include/layers_pad_generic.h
Normal file
@ -0,0 +1,71 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_pad_generic.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform padding generic datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2022 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LAYERS_PADDING_DQNN_H
|
||||
#define LAYERS_PADDING_DQNN_H
|
||||
|
||||
#include "layers_generic.h"
|
||||
|
||||
/*!
|
||||
* @defgroup layers_pad_generic Layers Definitions
|
||||
* @brief definition
|
||||
*
|
||||
*/
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/******************************************************************************/
|
||||
|
||||
/*!
|
||||
* @brief Handles generic padding in constant mode
|
||||
* @ingroup layers_generic_dqnn
|
||||
* @param layer pad layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_pad_constant(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles generic padding in edge mode
|
||||
* @ingroup layers_generic_dqnn
|
||||
* @param layer pad layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_pad_edge(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles generic padding in reflect mode
|
||||
* @ingroup layers_generic_dqnn
|
||||
* @param layer pad layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_pad_reflect(ai_layer *pLayer);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Handles generic padding in constant mode Channel 1st 8bit
|
||||
* @ingroup layers_generic_dqnn
|
||||
* @param layer pad layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_pad_8bit_ch1st_3x3_constant(ai_layer* pLayer);
|
||||
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*LAYERS_PAD_GENERIC_H*/
|
||||
413
lib/stai/libstai/include/layers_pool.h
Normal file
413
lib/stai/libstai/include/layers_pool.h
Normal file
@ -0,0 +1,413 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_pool.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @brief header file of AI platform pooling layers datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2018 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LAYERS_POOL_H
|
||||
#define LAYERS_POOL_H
|
||||
|
||||
#include "layers_common.h"
|
||||
#include "lite_maxpool_dqnn.h"
|
||||
#include "lite_pool_f32.h"
|
||||
|
||||
/*!
|
||||
* @defgroup layers_pool Pooling Layers Definitions
|
||||
* @brief definition
|
||||
*
|
||||
*/
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_pool
|
||||
* @ingroup layers_pool
|
||||
* @brief Pooling layer
|
||||
*
|
||||
* The type of pooling function is handled by the specific forward function
|
||||
* @ref forward_pool
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_pool_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_shape_2d pool_size; /*!< pooling size */
|
||||
ai_shape_2d pool_stride; /*!< pooling stride */
|
||||
ai_shape pool_pad; /*!< pooling pad, y,x border sizes */
|
||||
ai_u8 count_include_pad; /*!< include pad flag */
|
||||
} ai_layer_pool;
|
||||
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Max Pooling on a 8/16 bits fixed point data array
|
||||
* @ingroup layers_pool
|
||||
* @param in opaque handler to input data to process
|
||||
* @param dim_im_in_x input feature map width
|
||||
* @param dim_im_in_y input feature map height
|
||||
* @param ch_im_in number of input channels
|
||||
* @param dim_kernel_x kernel width
|
||||
* @param dim_kernel_y kernel height
|
||||
* @param padding_x right padding value
|
||||
* @param padding_y top padding value
|
||||
* @param stride_x stride value on x dimension
|
||||
* @param stride_y stride value on y dimension
|
||||
* @param dim_im_out_x output feature map width
|
||||
* @param dim_im_out_y output feature map height
|
||||
* @param out opaque handler to output data
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void pool_func_mp_array_fixed(ai_handle in,
|
||||
const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y,
|
||||
const ai_u16 padding_x, const ai_u16 padding_y,
|
||||
const ai_u16 stride_x, const ai_u16 stride_y,
|
||||
const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y,
|
||||
ai_handle out);
|
||||
|
||||
/*!
|
||||
* @brief Max Pooling on a 8-bits integer quantized data array
|
||||
* @ingroup layers_pool
|
||||
* @param in opaque handler to input data to process
|
||||
* @param dim_im_in_x input feature map width
|
||||
* @param dim_im_in_y input feature map height
|
||||
* @param ch_im_in number of input channels
|
||||
* @param dim_kernel_x kernel width
|
||||
* @param dim_kernel_y kernel height
|
||||
* @param padding_x right padding value
|
||||
* @param padding_y top padding value
|
||||
* @param stride_x stride value on x dimension
|
||||
* @param stride_y stride value on y dimension
|
||||
* @param dim_im_out_x output feature map width
|
||||
* @param dim_im_out_y output feature map height
|
||||
* @param out opaque handler to output data
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void pool_func_mp_array_integer(ai_handle in,
|
||||
const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y,
|
||||
const ai_u16 padding_x, const ai_u16 padding_y,
|
||||
const ai_u16 stride_x, const ai_u16 stride_y,
|
||||
const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y,
|
||||
ai_handle out);
|
||||
|
||||
/*!
|
||||
* @brief Max Pooling on a signed 8-bits integer quantized data array
|
||||
* @ingroup layers_pool
|
||||
* @param in opaque handler to input data to process
|
||||
* @param dim_im_in_x input feature map width
|
||||
* @param dim_im_in_y input feature map height
|
||||
* @param ch_im_in number of input channels
|
||||
* @param dim_kernel_x kernel width
|
||||
* @param dim_kernel_y kernel height
|
||||
* @param padding_x right padding value
|
||||
* @param padding_y top padding value
|
||||
* @param stride_x stride value on x dimension
|
||||
* @param stride_y stride value on y dimension
|
||||
* @param dim_im_out_x output feature map width
|
||||
* @param dim_im_out_y output feature map height
|
||||
* @param out opaque handler to output data
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void pool_func_mp_array_integer_INT8(ai_handle in,
|
||||
const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y,
|
||||
const ai_u16 padding_x, const ai_u16 padding_y,
|
||||
const ai_u16 stride_x, const ai_u16 stride_y,
|
||||
const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y,
|
||||
ai_handle out);
|
||||
|
||||
/*!
|
||||
* @brief Max Pooling on a unsigned 8-bits integer quantized data array
|
||||
* @ingroup layers_pool
|
||||
* @param in opaque handler to input data to process
|
||||
* @param dim_im_in_x input feature map width
|
||||
* @param dim_im_in_y input feature map height
|
||||
* @param ch_im_in number of input channels
|
||||
* @param dim_kernel_x kernel width
|
||||
* @param dim_kernel_y kernel height
|
||||
* @param padding_x right padding value
|
||||
* @param padding_y top padding value
|
||||
* @param stride_x stride value on x dimension
|
||||
* @param stride_y stride value on y dimension
|
||||
* @param dim_im_out_x output feature map width
|
||||
* @param dim_im_out_y output feature map height
|
||||
* @param out opaque handler to output data
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void pool_func_mp_array_integer_UINT8(ai_handle in,
|
||||
const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y,
|
||||
const ai_u16 padding_x, const ai_u16 padding_y,
|
||||
const ai_u16 stride_x, const ai_u16 stride_y,
|
||||
const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y,
|
||||
ai_handle out);
|
||||
|
||||
/*!
|
||||
* @brief Average Pooling on a 8/16 bits fixed point data array
|
||||
* @ingroup layers_pool
|
||||
* @param in opaque handler to input data to process
|
||||
* @param dim_im_in_x input feature map width
|
||||
* @param dim_im_in_y input feature map height
|
||||
* @param ch_im_in number of input channels
|
||||
* @param dim_kernel_x kernel width
|
||||
* @param dim_kernel_y kernel height
|
||||
* @param padding_x right padding value
|
||||
* @param padding_y top padding value
|
||||
* @param stride_x stride value on x dimension
|
||||
* @param stride_y stride value on y dimension
|
||||
* @param dim_im_out_x output feature map width
|
||||
* @param dim_im_out_y output feature map height
|
||||
* @param out opaque handler to scratch memory
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void pool_func_ap_array_fixed(ai_handle in,
|
||||
const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y,
|
||||
const ai_u16 padding_x, const ai_u16 padding_y,
|
||||
const ai_u16 stride_x, const ai_u16 stride_y,
|
||||
const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y,
|
||||
ai_handle out);
|
||||
|
||||
/*!
|
||||
* @brief Average Pooling on a 8-bits integer quantized data array
|
||||
* @ingroup layers_pool
|
||||
* @param in opaque handler to input data to process
|
||||
* @param dim_im_in_x input feature map width
|
||||
* @param dim_im_in_y input feature map height
|
||||
* @param ch_im_in number of input channels
|
||||
* @param dim_kernel_x kernel width
|
||||
* @param dim_kernel_y kernel height
|
||||
* @param padding_x right padding value
|
||||
* @param padding_y top padding value
|
||||
* @param stride_x stride value on x dimension
|
||||
* @param stride_y stride value on y dimension
|
||||
* @param dim_im_out_x output feature map width
|
||||
* @param dim_im_out_y output feature map height
|
||||
* @param out opaque handler to scratch memory
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void pool_func_ap_array_integer(ai_handle in,
|
||||
const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y,
|
||||
const ai_u16 padding_x, const ai_u16 padding_y,
|
||||
const ai_u16 stride_x, const ai_u16 stride_y,
|
||||
const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y,
|
||||
ai_handle out);
|
||||
|
||||
/*!
|
||||
* @brief Average Pooling on a signed 8-bits integer quantized data array
|
||||
* @ingroup layers_pool
|
||||
* @param in opaque handler to input data to process
|
||||
* @param dim_im_in_x input feature map width
|
||||
* @param dim_im_in_y input feature map height
|
||||
* @param ch_im_in number of input channels
|
||||
* @param dim_kernel_x kernel width
|
||||
* @param dim_kernel_y kernel height
|
||||
* @param padding_x right padding value
|
||||
* @param padding_y top padding value
|
||||
* @param stride_x stride value on x dimension
|
||||
* @param stride_y stride value on y dimension
|
||||
* @param dim_im_out_x output feature map width
|
||||
* @param dim_im_out_y output feature map height
|
||||
* @param out opaque handler to scratch memory
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void pool_func_ap_array_integer_INT8(ai_handle in,
|
||||
const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y,
|
||||
const ai_u16 padding_x, const ai_u16 padding_y,
|
||||
const ai_u16 stride_x, const ai_u16 stride_y,
|
||||
const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y,
|
||||
ai_handle out);
|
||||
|
||||
/*!
|
||||
* @brief Average Pooling on a unsigned 8-bits integer quantized data array
|
||||
* @ingroup layers_pool
|
||||
* @param in opaque handler to input data to process
|
||||
* @param dim_im_in_x input feature map width
|
||||
* @param dim_im_in_y input feature map height
|
||||
* @param ch_im_in number of input channels
|
||||
* @param dim_kernel_x kernel width
|
||||
* @param dim_kernel_y kernel height
|
||||
* @param padding_x right padding value
|
||||
* @param padding_y top padding value
|
||||
* @param stride_x stride value on x dimension
|
||||
* @param stride_y stride value on y dimension
|
||||
* @param dim_im_out_x output feature map width
|
||||
* @param dim_im_out_y output feature map height
|
||||
* @param out opaque handler to scratch memory
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void pool_func_ap_array_integer_UINT8(ai_handle in,
|
||||
const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y,
|
||||
const ai_u16 padding_x, const ai_u16 padding_y,
|
||||
const ai_u16 stride_x, const ai_u16 stride_y,
|
||||
const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y,
|
||||
ai_handle out);
|
||||
|
||||
/******************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/******************************************************************************/
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a max pooling layer.
|
||||
* @ingroup layers_pool
|
||||
* @param layer the pooling (pool) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_mp(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a fixed point max pooling layer.
|
||||
* @ingroup layers_pool
|
||||
* @param layer the pooling (pool) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_mp_fixed(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of an integer-quantized max pooling layer.
|
||||
* @ingroup layers_pool
|
||||
* @param layer the pooling (pool) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_mp_integer(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of an integer-quantized max pooling layer
|
||||
* with int8 I/O
|
||||
* @ingroup layers_pool
|
||||
* @param layer the pooling (pool) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_mp_integer_INT8(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of an integer-quantized max pooling layer
|
||||
* with int8 I/O. Optimized for HSP
|
||||
* @ingroup layers_pool
|
||||
* @param layer the pooling (pool) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_mp_hsp_INT8(ai_layer *pLayer);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of an integer-quantized max pooling layer
|
||||
* with int8 I/O. Optimized for HSP: 2 Step variant for bigger tensors
|
||||
* @ingroup layers_pool
|
||||
* @param layer the pooling (pool) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_mp_hsp_2step_INT8(ai_layer *pLayer);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of an integer-quantized max pooling layer
|
||||
* with uint8 I/O
|
||||
* @ingroup layers_pool
|
||||
* @param layer the pooling (pool) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_mp_integer_UINT8(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of an integer-quantized max pooling layer
|
||||
* with int16 I/O
|
||||
* @ingroup layers_pool
|
||||
* @param layer the pooling (pool) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_mp_integer_INT16(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of an integer-quantized max pooling layer
|
||||
* with uint16 I/O
|
||||
* @ingroup layers_pool
|
||||
* @param layer the pooling (pool) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_mp_integer_UINT16(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of an average pooling layer.
|
||||
* @ingroup layers_pool
|
||||
* @param layer the pooling (pool) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_ap(ai_layer* layer);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a fixed point average pooling layer.
|
||||
* @ingroup layers_pool
|
||||
* @param layer the pooling (pool) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_ap_fixed(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of an integer-quantized average pooling layer.
|
||||
* @ingroup layers_pool
|
||||
* @param layer the pooling (pool) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_ap_integer(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of an average pooling layer. Optimized for HSP
|
||||
* @ingroup layers_pool
|
||||
* @param layer the pooling (pool) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_ap_hsp_INT8(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of an average pooling layer. Optimized for HSP
|
||||
* Variant for larger tensors
|
||||
* @ingroup layers_pool
|
||||
* @param layer the pooling (pool) layer,
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_ap_hsp_2step_INT8(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of an integer-quantized average pooling layer
|
||||
* with int8 I/O
|
||||
* @ingroup layers_pool
|
||||
* @param layer the pooling (pool) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_ap_integer_INT8(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of an integer-quantized average pooling layer
|
||||
* with uint8 I/O
|
||||
* @ingroup layers_pool
|
||||
* @param layer the pooling (pool) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_ap_integer_UINT8(ai_layer *pLayer);
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*LAYERS_POOL_H*/
|
||||
68
lib/stai/libstai/include/layers_pool_dqnn.h
Normal file
68
lib/stai/libstai/include/layers_pool_dqnn.h
Normal file
@ -0,0 +1,68 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_conv2d_dqnn.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform DQNN pool datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2021 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef LAYERS_POOL_DQNN_H
|
||||
#define LAYERS_POOL_DQNN_H
|
||||
|
||||
#include "layers_common.h"
|
||||
#include "layers_pool.h"
|
||||
|
||||
/*!
|
||||
* @defgroup layers_pool_dqnn Layers Definitions
|
||||
* @brief definition
|
||||
*
|
||||
*/
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_pool_dqnn
|
||||
* @ingroup layers_pool_dqnn
|
||||
* @brief pool_dqnn layer
|
||||
*
|
||||
* @ref forward_maxpool_is1os1
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_pool_dqnn_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_shape_2d pool_size; /*!< pooling size */
|
||||
ai_shape_2d pool_stride; /*!< pooling stride */
|
||||
ai_shape pool_pad; /*!< pooling pad, y,x border sizes */
|
||||
// ai_u32 pad_value; /*!< pooling pad value */
|
||||
} ai_layer_pool_dqnn;
|
||||
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/******************************************************************************/
|
||||
|
||||
/*!
|
||||
* @brief Handles max pooling with binary input and binary output
|
||||
* @ingroup layers_pool_dqnn
|
||||
* @param layer conv2d_pool layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_maxpool_is1os1(ai_layer *pLayer);
|
||||
|
||||
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*LAYERS_POOL_DQNN_H*/
|
||||
94
lib/stai/libstai/include/layers_resize.h
Normal file
94
lib/stai/libstai/include/layers_resize.h
Normal file
@ -0,0 +1,94 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_resize.h
|
||||
* @author STMicroelectronics
|
||||
* @brief header file of AI platform padding generic datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2023 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LAYERS_RESIZE_H
|
||||
#define LAYERS_RESIZE_H
|
||||
|
||||
#include "layers_generic.h"
|
||||
|
||||
/*!
|
||||
* @defgroup layers_pad_generic Layers Definitions
|
||||
* @brief definition
|
||||
*
|
||||
*/
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/******************************************************************************/
|
||||
|
||||
/*!
|
||||
* @brief Handles generic resizing in nearest mode
|
||||
* @ingroup layers_generic
|
||||
* @param layer resize layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_resize_if32of32(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Handles generic resizing in bilinear mode
|
||||
* @ingroup layers_generic
|
||||
* @param layer resize layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_resize_bilinear_if32of32(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles generic resizing in bilinear mode
|
||||
* @ingroup layers_generic
|
||||
* @param layer resize layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_resize_nearest_if32of32(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles generic resizing in bilinear mode
|
||||
* @ingroup layers_generic
|
||||
* @param layer resize layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_resize_bilinear_is16os16(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles generic resizing in bilinear mode
|
||||
* @ingroup layers_generic
|
||||
* @param layer resize layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_resize_nearest_is16os16(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles generic resizing in bilinear mode
|
||||
* @ingroup layers_generic
|
||||
* @param layer resize layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_resize_bilinear_is8os8(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles generic resizing in bilinear mode
|
||||
* @ingroup layers_generic
|
||||
* @param layer resize layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_resize_nearest_is8os8(ai_layer *pLayer);
|
||||
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*LAYERS_PAD_GENERIC_H*/
|
||||
201
lib/stai/libstai/include/layers_rnn.h
Normal file
201
lib/stai/libstai/include/layers_rnn.h
Normal file
@ -0,0 +1,201 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_rnn.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @brief header file of RNN layers
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2018 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LAYERS_RNN_H
|
||||
#define LAYERS_RNN_H
|
||||
|
||||
#include "layers_common.h"
|
||||
#include "layers_nl.h"
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_lstm
|
||||
* @ingroup layers
|
||||
* @brief LSTM layer with generic nonlinearities and peephole connections
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_lstm_ {
|
||||
AI_LAYER_STATEFUL_FIELDS_DECLARE
|
||||
ai_size n_units; /**< size of the hidden RNN state */
|
||||
func_nl activation_nl; /**< activation nonlinearity (input to cell) */
|
||||
func_nl recurrent_nl; /**< recurrent nonlinearity (hidden to cell) */
|
||||
func_nl out_nl; /**< output nonlinearity (cell to hidden) */
|
||||
ai_bool go_backwards; /**< process reversed input */
|
||||
ai_bool return_state; /**< return state */
|
||||
ai_bool reverse_seq; /**< reverse output sequence */
|
||||
ai_float cell_clip; /**< cell clip value */
|
||||
} ai_layer_lstm;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_gru
|
||||
* @ingroup layers
|
||||
* @brief Gated Recurrent Unit (GRU) layer with generic nonlinearities
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_gru_ {
|
||||
AI_LAYER_STATEFUL_FIELDS_DECLARE
|
||||
ai_size n_units; /**< size of the hidden RNN state */
|
||||
func_nl activation_nl; /**< activation nonlinearity (input to cell) */
|
||||
func_nl recurrent_nl; /**< recurrent nonlinearity (hidden to cell) */
|
||||
ai_bool reset_after;
|
||||
ai_bool return_state;
|
||||
ai_bool go_backwards; /**< process reversed input */
|
||||
ai_bool reverse_seq; /**< reverse output sequence */
|
||||
} ai_layer_gru;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_rnn
|
||||
* @ingroup layers
|
||||
* @brief Simple Recurrent Neural Network (RNN) layer
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_rnn_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_size n_units; /**< size of the hidden RNN state */
|
||||
func_nl activation_nl; /**< activation nonlinearity (input to hidden) */
|
||||
ai_bool go_backwards; /**< process reversed input */
|
||||
ai_bool reverse_seq; /**< reverse output sequence */
|
||||
ai_bool return_state;
|
||||
} ai_layer_rnn;
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Allocate states for a stateful network.
|
||||
* @ingroup layers
|
||||
*
|
||||
* Function used to allocate states of a stateful network.
|
||||
*/
|
||||
void _allocate_states(ai_float **states, ai_u32 size_in_bytes);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Deallocate states for a stateful network.
|
||||
* @ingroup layers
|
||||
*
|
||||
* Function used to deallocate states of a stateful network.
|
||||
*/
|
||||
void _deallocate_states(ai_float **states);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Initialize a Long-Short Term Memory (LSTM) layer.
|
||||
* @ingroup layers
|
||||
*
|
||||
* Function used to initialize lstm internal state
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void init_lstm(ai_layer * layer);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Destroy a Long-Short Term Memory (LSTM) layer state.
|
||||
* @ingroup layers
|
||||
*
|
||||
* Function used to destroy lstm internal state
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void destroy_lstm(ai_layer * layer);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a Long-Short Term Memory (LSTM) layer.
|
||||
* @ingroup layers
|
||||
*
|
||||
* Implements a Long-Short Term Layer with peephole connections:
|
||||
* \f{eqnarray*}{
|
||||
* i_t &=& \sigma_a(x_t W_{xi} + h_{t-1} W_{hi}
|
||||
* + w_{ci} \odot c_{t-1} + b_i)\\
|
||||
* f_t &=& \sigma_a(x_t W_{xf} + h_{t-1} W_{hf}
|
||||
* + w_{cf} \odot c_{t-1} + b_f)\\
|
||||
* c_t &=& f_t \odot c_{t - 1}
|
||||
* + i_t \odot \sigma_r(x_t W_{xc} + h_{t-1} W_{hc} + b_c)\\
|
||||
* o_t &=& \sigma_a(x_t W_{xo} + h_{t-1} W_{ho} + w_{co} \odot c_t + b_o)\\
|
||||
* h_t &=& o_t \odot \sigma_o(c_t)
|
||||
* \f}
|
||||
* where \f$\sigma_a\f$ is the activation nonlinearity, \f$\sigma_r\f$ is the
|
||||
* recurrent nonlinearity and \f$\sigma_o\f$ is the out nonlinearity. The
|
||||
* \f$W_x\f$, \f$W_h\f$ and \f$W_c\f$ weights are sliced from the kernel,
|
||||
* recurrent and peephole weights.
|
||||
*
|
||||
* @param layer the LSTM layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_lstm(ai_layer * layer);
|
||||
|
||||
AI_INTERNAL_API
|
||||
void forward_lstm_is8os8ws8(ai_layer * layer);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Initialize a Gated Recurrent Unit (GRU) layer.
|
||||
* @ingroup layers
|
||||
*
|
||||
* Function used to initialize gru internal state
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void init_gru(ai_layer * layer);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Destroy a Gated Recurrent Unit (GRU) layer state.
|
||||
* @ingroup layers
|
||||
*
|
||||
* Function used to destroy gru internal state
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void destroy_gru(ai_layer * layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a Gated Recurrent Unit (GRU) layer.
|
||||
* @ingroup layers
|
||||
*
|
||||
* Implements a Gated Recurrent Unit with the formula:
|
||||
* \f{eqnarray*}{
|
||||
* r_t &=& \sigma_a(x_t W_{xr} + h_{t - 1} W_{hr} + b_r) \\
|
||||
* z_t &=& \sigma_a(x_t W_{xz} + h_{t - 1} W_{hz} + b_z) \\
|
||||
* c_t &=& \sigma_r(x_t W_{xc} + r_t \odot (h_{t - 1} W_{hc} + b_{hc}) + b_c)
|
||||
* \qquad \textnormal{when reset after is true} \\
|
||||
* c_t &=& \sigma_r(x_t W_{xc} + (r_t \odot h_{t - 1}) W_{hc} + b_{hc} + b_c)
|
||||
* \qquad \textnormal{when reset after is false (default)} \\
|
||||
* h_t &=& (1 - z_t) \odot h_{t - 1} + z_t \odot c_t
|
||||
* \f}
|
||||
* where \f$\sigma_a\f$ is the activation nonlinearity and \f$\sigma_r\f$ is
|
||||
* the recurrent nonlinearity. The weights are sliced from the kernel and
|
||||
* recurrent weights.
|
||||
*
|
||||
* @param layer the GRU layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_gru(ai_layer * layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a Recurrent Neural Network (RNN) layer.
|
||||
* @ingroup layers
|
||||
*
|
||||
* Implements a recurrent layer with the formula:
|
||||
* \f{eqnarray*}{
|
||||
* h_t &=& \sigma_a(x_t W_{xr} + h_{t - 1} W_{hr} + b_r)
|
||||
* \f}
|
||||
* where \f$\sigma_a\f$ is the activation nonlinearity. The weights are sliced
|
||||
* from the kernel and recurrent weights.
|
||||
*
|
||||
* @param layer the RNN layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_rnn(ai_layer * layer);
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /* LAYERS_RNN_H */
|
||||
60
lib/stai/libstai/include/layers_sm.h
Normal file
60
lib/stai/libstai/include/layers_sm.h
Normal file
@ -0,0 +1,60 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_sm.h
|
||||
* @author STMicroelectronics
|
||||
* @brief header file of AI platform non softmax layer datatype
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2018 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef LAYERS_SM_H
|
||||
#define LAYERS_SM_H
|
||||
|
||||
#include "layers_common.h"
|
||||
|
||||
/*!
|
||||
* @defgroup layers SoftMax Layer Definitions
|
||||
* @brief definition
|
||||
*
|
||||
*/
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @brief Softmax normalization computed on an array of fixed point channels
|
||||
* @ingroup layers_sm
|
||||
* @param out opaque handler to output channel array
|
||||
* @param in opaque handler to input channel array
|
||||
* @param in_size total size (number of elements) to process on the input
|
||||
* @param channel_size number of elements of the input channel
|
||||
* @param in_channel_step number of elements to move to next input element
|
||||
* @param out_channel_step number of elements to move to next output element
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void sm_func_sm_array_fixed(ai_handle out, const ai_handle in,
|
||||
const ai_size in_size,
|
||||
const ai_size channel_size,
|
||||
const ai_size in_channel_step,
|
||||
const ai_size out_channel_step);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a fixed point softmax nonlinear layer.
|
||||
* @ingroup layers_sm
|
||||
* @param layer the softmax (sm) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_sm_fixed(ai_layer *pLayer);
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*LAYERS_SM_H*/
|
||||
|
||||
69
lib/stai/libstai/include/layers_upsample.h
Normal file
69
lib/stai/libstai/include/layers_upsample.h
Normal file
@ -0,0 +1,69 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_upsample_generic.h
|
||||
* @author STMicroelectronics
|
||||
* @brief header file of AI platform padding generic datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2022 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LAYERS_UPSAMPLE_H
|
||||
#define LAYERS_UPSAMPLE_H
|
||||
|
||||
#include "layers_generic.h"
|
||||
|
||||
/*!
|
||||
* @defgroup layers_pad_generic Layers Definitions
|
||||
* @brief definition
|
||||
*
|
||||
*/
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/******************************************************************************/
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Handles upsampling in zeros mode
|
||||
* @ingroup layers_upsample
|
||||
* @param layer upsample layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_upsample_zeros_is8os8(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Handles upsampling in bilinear mode
|
||||
* @ingroup layers_upsample
|
||||
* @param layer upsample layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_upsample_bilinear_is8os8(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Handles upsampling in zeros mode
|
||||
* @ingroup layers_upsample
|
||||
* @param layer upsample layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_upsample_zeros_is16os16(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Handles upsampling in bilinear mode
|
||||
* @ingroup layers_upsample
|
||||
* @param layer upsample layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_upsample_bilinear_is16os16(ai_layer* layer);
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*LAYERS_UPSAMPLE_H*/
|
||||
61
lib/stai/libstai/include/layers_upsample_generic.h
Normal file
61
lib/stai/libstai/include/layers_upsample_generic.h
Normal file
@ -0,0 +1,61 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_upsample_generic.h
|
||||
* @author STMicroelectronics
|
||||
* @brief header file of AI platform padding generic datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2022 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LAYERS_UPSAMPLE_GENERIC_H
|
||||
#define LAYERS_UPSAMPLE_GENERIC_H
|
||||
|
||||
#include "layers_generic.h"
|
||||
|
||||
/*!
|
||||
* @defgroup layers_pad_generic Layers Definitions
|
||||
* @brief definition
|
||||
*
|
||||
*/
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/******************************************************************************/
|
||||
|
||||
/*!
|
||||
* @brief Handles generic upsampling in nearest mode
|
||||
* @ingroup layers_generic
|
||||
* @param layer upsample layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_upsample_nearest(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles generic upsampling in zeros mode
|
||||
* @ingroup layers_generic
|
||||
* @param layer upsample layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_upsample_zeros(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Handles generic upsampling in bilinear mode
|
||||
* @ingroup layers_generic
|
||||
* @param layer upsample layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_upsample_bilinear(ai_layer *pLayer);
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*LAYERS_UPSAMPLE_GENERIC_H*/
|
||||
45
lib/stai/libstai/include/layers_wrappers.h
Normal file
45
lib/stai/libstai/include/layers_wrappers.h
Normal file
@ -0,0 +1,45 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_wrappers.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @brief header file of AI platform generic layers datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2024 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef _LAYERS_WRAPPERS_H
|
||||
#define _LAYERS_WRAPPERS_H
|
||||
|
||||
#include "layers_common.h"
|
||||
|
||||
/*!
|
||||
* @defgroup layers_wrappers Runtime Wrapper Layers Definitions
|
||||
* @brief definition
|
||||
*
|
||||
*/
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_tflite_wrapper
|
||||
* @ingroup layers_generic
|
||||
* @brief TimeDelay layer with sparse kernel
|
||||
*/
|
||||
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_tflite_wrapper_ {
|
||||
AI_NODE_COMMON_FIELDS_DECLARE
|
||||
const ai_array* init_data;
|
||||
} ai_layer_tflite_wrapper;
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /* _LAYERS_WRAPPERS_H */
|
||||
|
||||
61
lib/stai/libstai/include/lite_argminmax.h
Normal file
61
lib/stai/libstai/include/lite_argminmax.h
Normal file
@ -0,0 +1,61 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file lite_argminmax.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform lite argmin argmax funcions
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2023 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LITE_ARGMINMAX_H
|
||||
#define LITE_ARGMINMAX_H
|
||||
|
||||
#include "ai_lite_interface.h"
|
||||
|
||||
|
||||
void forward_lite_argmax_if32( const ai_float* in_data,
|
||||
ai_u32* dst_out,
|
||||
const ai_size width_in,
|
||||
const ai_size height_in,
|
||||
const ai_size n_channel_in,
|
||||
const ai_i16 axis,
|
||||
const ai_i16 select_last_index);
|
||||
void forward_lite_argmin_if32( const ai_float* in_data,
|
||||
ai_u32* dst_out,
|
||||
const ai_size width_in,
|
||||
const ai_size height_in,
|
||||
const ai_size n_channel_in,
|
||||
const ai_i16 axis,
|
||||
const ai_i16 select_last_index);
|
||||
void forward_lite_argmax_is8( const ai_i8* in_data,
|
||||
ai_u32* dst_out,
|
||||
const ai_size width_in,
|
||||
const ai_size height_in,
|
||||
const ai_size n_channel_in,
|
||||
const ai_i16 axis,
|
||||
const ai_i16 select_last_index);
|
||||
void forward_lite_argmax_iu8( const ai_u8* in_data,
|
||||
ai_u32* dst_out,
|
||||
const ai_size width_in,
|
||||
const ai_size height_in,
|
||||
const ai_size n_channel_in,
|
||||
const ai_i16 axis,
|
||||
const ai_i16 select_last_index);
|
||||
void forward_lite_argmin_is8( const ai_i8* in_data,
|
||||
ai_u32* dst_out,
|
||||
const ai_size width_in,
|
||||
const ai_size height_in,
|
||||
const ai_size n_channel_in,
|
||||
const ai_i16 axis,
|
||||
const ai_i16 select_last_index);
|
||||
|
||||
|
||||
#endif /*LITE_ARGMINMAX_H*/
|
||||
42
lib/stai/libstai/include/lite_bn_f32.h
Normal file
42
lib/stai/libstai/include/lite_bn_f32.h
Normal file
@ -0,0 +1,42 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file lite_bnf32.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform lite batch normalization functions
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2023 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LITE_BN_F32_H
|
||||
#define LITE_BN_F32_H
|
||||
|
||||
|
||||
#include "ai_lite_interface.h"
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a batch normalization (BN) layer with
|
||||
* signed float input, signed float output, and float parameters.
|
||||
* @ingroup lite_bn_f32
|
||||
* @param output The pointer to output buffer.
|
||||
* @param input The pointer to input buffer.
|
||||
* @param scale The pointer to BN scale param.
|
||||
* @param bias The pointer to bias.
|
||||
* @param n_elements The number of elements in the input tensor.
|
||||
* @param n_channel_in The number of channel in the input tensor.
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_bn_if32of32wf32(
|
||||
ai_float* output, const ai_float* input,
|
||||
const ai_float* scale, const ai_float* bias,
|
||||
const ai_u32 n_elements, const ai_u32 n_channel_in);
|
||||
|
||||
|
||||
#endif /* LITE_BN_F32_H */
|
||||
216
lib/stai/libstai/include/lite_bn_integer.h
Normal file
216
lib/stai/libstai/include/lite_bn_integer.h
Normal file
@ -0,0 +1,216 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file lite_bn_integer.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform lite integer batch normalization
|
||||
* normalization functions
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2023 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LITE_BN_INTEGER_H
|
||||
#define LITE_BN_INTEGER_H
|
||||
|
||||
|
||||
#include "ai_lite_interface.h"
|
||||
|
||||
/**
|
||||
* @brief Batch Normalization with 16-bit input, 16-bit threshold and binary output.
|
||||
* It is implemented using a threshold, and this is possible because the output is binary.
|
||||
*
|
||||
* @param[in] pIn Input data pointer
|
||||
* @param[out] pOut_32 Output data pointer
|
||||
* @param[in] pThreshold Thresholds pointer (one per channel)
|
||||
* @param[in] dim_x X dimension
|
||||
* @param[in] dim_y Y dimension
|
||||
* @param[in] channels_num Channels number
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_bn_is16os1ws16(const ai_i16 *pIn,
|
||||
ai_u32 *pOut_32,
|
||||
const ai_i16 *pThreshold,
|
||||
const ai_i16 dim_x,
|
||||
const ai_i16 dim_y,
|
||||
const ai_i16 channels_num);
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* @brief Batch Normalization with signed 8-bit input, output.
|
||||
*
|
||||
* @param[in] p_in Input data pointer
|
||||
* @param[out] p_out Output data pointer
|
||||
* @param[in] n_channel_inout nb channels
|
||||
* @param[in] n_elements nb elements
|
||||
* @param[in] in_scale input scale
|
||||
* @param[in] in_zeropoint input zero point
|
||||
* @param[in] out_scale output scale
|
||||
* @param[in] out_zeropoint output zero point
|
||||
* @param[in] pSc_scale pointer on scale scales
|
||||
* @param[in] pSc_zeropoint pointer on scale zero_point
|
||||
* @param[in] pScale_data pointer on scale input
|
||||
* @param[in] pBias_scale pointer on bias scales
|
||||
* @param[in] pBias_zeropoint pointer on bias zero_point
|
||||
* @param[in] pBias_data pointer on bias input
|
||||
* @param[in] bnl_param_sign sign of BNL parameters (0=unsigned, 1=signed)
|
||||
* @param[in] pBuffer_a scratch buffer for:
|
||||
* out factor: nb channels * sizeof(ai_i32) +
|
||||
* out offset: nb channels * sizeof(ai_i32) +
|
||||
* out shift: nb channels * sizeof(ai_i16)
|
||||
*/
|
||||
void forward_lite_bn_is8os8( const ai_i8 *p_in,
|
||||
ai_i8 *p_out,
|
||||
ai_size n_channel_inout,
|
||||
ai_size n_elements,
|
||||
ai_float in_scale,
|
||||
const ai_i32 in_zeropoint,
|
||||
ai_float out_scale,
|
||||
const ai_i32 out_zeropoint,
|
||||
const ai_float *pSc_scale,
|
||||
const ai_i8 *pSc_zeropoint,
|
||||
const ai_i8 *pData_scale,
|
||||
const ai_float *pBias_scale,
|
||||
const ai_i8 *pBias_zeropoint,
|
||||
const ai_i8 *pData_bias,
|
||||
ai_i16 bnl_param_sign,
|
||||
ai_i32 scratch_size,
|
||||
ai_i16 *pBuffer_a);
|
||||
|
||||
/**
|
||||
* @brief Batch Normalization with signed 8-bit input, output
|
||||
* per channel quantization
|
||||
*
|
||||
*
|
||||
* @param[in] p_in Input data pointer
|
||||
* @param[out] p_out Output data pointer
|
||||
* @param[in] n_channel_inout nb channels
|
||||
* @param[in] n_elements nb elements
|
||||
* @param[in] in_scale input scale
|
||||
* @param[in] in_zeropoint input zero point
|
||||
* @param[in] out_scale output scale
|
||||
* @param[in] out_zeropoint output zero point
|
||||
* @param[in] pSc_scale pointer on scale scales
|
||||
* @param[in] pSc_zeropoint pointer on scale zero_point
|
||||
* @param[in] pScale_data pointer on scale input
|
||||
* @param[in] pBias_scale pointer on bias scales
|
||||
* @param[in] pBias_zeropoint pointer on bias zero_point
|
||||
* @param[in] pBias_data pointer on bias input
|
||||
* @param[in] bnl_param_sign sign of BNL parameters (0=unsigned, 1=signed)
|
||||
* @param[in] pBuffer_a scratch buffer for:
|
||||
* out factor: nb channels * sizeof(ai_i32) +
|
||||
* out offset: nb channels * sizeof(ai_i32) +
|
||||
* out shift: nb channels * sizeof(ai_i16)
|
||||
*/
|
||||
void forward_lite_bn_is8os8_ch( const ai_i8 *p_in,
|
||||
ai_i8 *p_out,
|
||||
ai_size n_channel_inout,
|
||||
ai_size n_elements,
|
||||
ai_float in_scale,
|
||||
const ai_i32 in_zeropoint,
|
||||
ai_float out_scale,
|
||||
const ai_i32 out_zeropoint,
|
||||
const ai_float *pSc_scale,
|
||||
const ai_i8 *pSc_zeropoint,
|
||||
const ai_i8 *pData_scale,
|
||||
const ai_float *pBias_scale,
|
||||
const ai_i8 *pBias_zeropoint,
|
||||
const ai_i8 *pData_bias,
|
||||
ai_i16 bnl_param_sign,
|
||||
ai_i32 scratch_size,
|
||||
ai_i16 *pBuffer_a);
|
||||
|
||||
/**
|
||||
* @brief Batch Normalization with unsigned 8-bit input, output
|
||||
*
|
||||
*
|
||||
* @param[in] p_in Input data pointer
|
||||
* @param[out] p_out Output data pointer
|
||||
* @param[in] n_channel_inout nb channels
|
||||
* @param[in] n_elements nb elements
|
||||
* @param[in] in_scale input scale
|
||||
* @param[in] in_zeropoint input zero point
|
||||
* @param[in] out_scale output scale
|
||||
* @param[in] out_zeropoint output zero point
|
||||
* @param[in] pSc_scale pointer on scale scales
|
||||
* @param[in] pSc_zeropoint pointer on scale zero_point
|
||||
* @param[in] pScale_data pointer on scale input
|
||||
* @param[in] pBias_scale pointer on bias scales
|
||||
* @param[in] pBias_zeropoint pointer on bias zero_point
|
||||
* @param[in] pBias_data pointer on bias input
|
||||
* @param[in] bnl_param_sign sign of BNL parameters (0=unsigned, 1=signed)
|
||||
* @param[in] pBuffer_a scratch buffer for:
|
||||
* out factor: nb channels * sizeof(ai_i32) +
|
||||
* out offset: nb channels * sizeof(ai_i32) +
|
||||
* out shift: nb channels * sizeof(ai_i16)
|
||||
*/
|
||||
void forward_lite_bn_iu8ou8( const ai_u8 *p_in,
|
||||
ai_u8 *p_out,
|
||||
ai_size n_channel_inout,
|
||||
ai_size n_elements,
|
||||
ai_float in_scale,
|
||||
const ai_i32 in_zeropoint_32,
|
||||
ai_float out_scale,
|
||||
const ai_i32 out_zeropoint_32,
|
||||
const ai_float *pSc_scale,
|
||||
const ai_i8 *pSc_zeropoint,
|
||||
const ai_i8 *pData_scale,
|
||||
const ai_float *pBias_scale,
|
||||
const ai_i8 *pBias_zeropoint,
|
||||
const ai_i8 *pData_bias,
|
||||
ai_i16 bnl_param_sign,
|
||||
ai_i32 scratch_size,
|
||||
ai_i16 *pBuffer_a);
|
||||
|
||||
/**
|
||||
* @brief Batch Normalization with unsigned 8-bit input, output
|
||||
* per channel quantization
|
||||
*
|
||||
*
|
||||
* @param[in] p_in Input data pointer
|
||||
* @param[out] p_out Output data pointer
|
||||
* @param[in] n_channel_inout nb channels
|
||||
* @param[in] n_elements nb elements
|
||||
* @param[in] in_scale input scale
|
||||
* @param[in] in_zeropoint input zero point
|
||||
* @param[in] out_scale output scale
|
||||
* @param[in] out_zeropoint output zero point
|
||||
* @param[in] pSc_scale pointer on scale scales
|
||||
* @param[in] pSc_zeropoint pointer on scale zero_point
|
||||
* @param[in] pScale_data pointer on scale input
|
||||
* @param[in] pBias_scale pointer on bias scales
|
||||
* @param[in] pBias_zeropoint pointer on bias zero_point
|
||||
* @param[in] pBias_data pointer on bias input
|
||||
* @param[in] bnl_param_sign sign of BNL parameters (0=unsigned, 1=signed)
|
||||
* @param[in] pBuffer_a scratch buffer for:
|
||||
* out factor: nb channels * sizeof(ai_i32) +
|
||||
* out offset: nb channels * sizeof(ai_i32) +
|
||||
* out shift: nb channels * sizeof(ai_i16)
|
||||
*/
|
||||
void forward_lite_bn_iu8ou8_ch( const ai_u8 *p_in,
|
||||
ai_u8 *p_out,
|
||||
ai_size n_channel_inout,
|
||||
ai_size n_elements,
|
||||
ai_float in_scale,
|
||||
const ai_i32 in_zeropoint,
|
||||
ai_float out_scale,
|
||||
const ai_i32 out_zeropoint,
|
||||
const ai_float *pSc_scale,
|
||||
const ai_i8 *pSc_zeropoint,
|
||||
const ai_i8 *pData_scale,
|
||||
const ai_float *pBias_scale,
|
||||
const ai_i8 *pBias_zeropoint,
|
||||
const ai_i8 *pData_bias,
|
||||
ai_i16 bnl_param_sign,
|
||||
ai_i32 scratch_size,
|
||||
ai_i16 *pBuffer_a);
|
||||
#endif /* LITE_BN_INTEGER_H */
|
||||
477
lib/stai/libstai/include/lite_conv2d.h
Normal file
477
lib/stai/libstai/include/lite_conv2d.h
Normal file
@ -0,0 +1,477 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file lite_conv2d.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform lite conv2d kernel datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2021 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LITE_CONV2D_H
|
||||
#define LITE_CONV2D_H
|
||||
|
||||
#include "ai_lite_interface.h"
|
||||
#include "lite_internal_apis.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/******************************************************************************/
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with float input, float output and
|
||||
* float weights
|
||||
* @ingroup lite_conv2d
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_conv2d_if32of32wf32(const ai_float *pDataIn_init,
|
||||
ai_float *pDataOut_init,
|
||||
const ai_ptr_const pWeights_init,
|
||||
const ai_ptr_const pBias_init,
|
||||
ai_float *pWeights_prefetch,
|
||||
const ai_size n_channel_in,
|
||||
const ai_size n_channel_out,
|
||||
const ai_size width_in,
|
||||
const ai_size height_in,
|
||||
const ai_size width_out,
|
||||
const ai_size height_out,
|
||||
const ai_size filt_width,
|
||||
const ai_size filt_height,
|
||||
const ai_u16 filt_pad_x,
|
||||
const ai_u16 filt_pad_y,
|
||||
const ai_u16 filt_stride_x,
|
||||
const ai_u16 filt_stride_y,
|
||||
const ai_size filt_height_dilated,
|
||||
const ai_size filt_width_dilated,
|
||||
const ai_u16 dilation_x,
|
||||
const ai_u16 dilation_y,
|
||||
const ai_size n_groups);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D depthwise convolution with float input, float output and
|
||||
* float weights
|
||||
* @ingroup lite_conv2d
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_dw_if32of32wf32(const ai_float *pDataIn_init,
|
||||
ai_float *pDataOut_init,
|
||||
const ai_ptr_const pWeights_init,
|
||||
const ai_ptr_const pBias_init,
|
||||
const ai_size n_channel_in,
|
||||
const ai_size n_channel_out,
|
||||
const ai_size width_in,
|
||||
const ai_size height_in,
|
||||
const ai_size width_out,
|
||||
const ai_size height_out,
|
||||
const ai_size filt_width,
|
||||
const ai_size filt_height,
|
||||
const ai_u16 filt_pad_x,
|
||||
const ai_u16 filt_pad_y,
|
||||
const ai_u16 filt_stride_x,
|
||||
const ai_u16 filt_stride_y,
|
||||
const ai_size filt_height_dilated,
|
||||
const ai_size filt_width_dilated,
|
||||
const ai_u16 dilation_x,
|
||||
const ai_u16 dilation_y,
|
||||
const ai_size n_groups);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D grouped convolution with float input, float output and
|
||||
* float weights
|
||||
* @ingroup lite_conv2d
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_conv2d_if32of32wf32_group(const ai_float *pDataIn_init,
|
||||
ai_float *pDataOut_init,
|
||||
const ai_ptr_const pWeights_init,
|
||||
const ai_ptr_const pBias_init,
|
||||
const ai_size n_channel_in,
|
||||
const ai_size n_channel_out,
|
||||
const ai_size width_in,
|
||||
const ai_size height_in,
|
||||
const ai_size width_out,
|
||||
const ai_size height_out,
|
||||
const ai_size filt_width,
|
||||
const ai_size filt_height,
|
||||
const ai_u16 filt_pad_x,
|
||||
const ai_u16 filt_pad_y,
|
||||
const ai_u16 filt_stride_x,
|
||||
const ai_u16 filt_stride_y,
|
||||
const ai_size filt_height_dilated,
|
||||
const ai_size filt_width_dilated,
|
||||
const ai_u16 dilation_x,
|
||||
const ai_u16 dilation_y,
|
||||
const ai_size n_groups);
|
||||
|
||||
/*!
|
||||
* @brief Handles dilated conv2d convolutions (valid padding)
|
||||
* @ingroup lite_conv2d
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void
|
||||
forward_lite_conv2d_dilated_sssa8_ch(const ai_i8 *pData_in,
|
||||
const ai_u16 dim_im_in_x,
|
||||
const ai_u16 dim_im_in_y,
|
||||
const ai_u16 n_channel_in,
|
||||
const ai_i8 *pWeights,
|
||||
const ai_u16 n_channel_out,
|
||||
const ai_u16 dim_kernel_x,
|
||||
const ai_u16 dim_kernel_y,
|
||||
const ai_u16 stride_x,
|
||||
const ai_u16 stride_y,
|
||||
const ai_u16 dilation_x,
|
||||
const ai_u16 dilation_y,
|
||||
const ai_i32 *pBias,
|
||||
const ai_i8 in_zeropoint,
|
||||
const ai_i8 out_zeropoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
const ai_layer_format_type out_ch_format,
|
||||
ai_i8 *pData_out,
|
||||
const ai_u16 dim_im_out_x,
|
||||
const ai_u16 dim_im_out_y,
|
||||
ai_u32 height_loop_cnt,
|
||||
const ai_u16 weights_prefetch_enabled,
|
||||
ai_i32 scratch_size,
|
||||
ai_i16 *pBuffer_a);
|
||||
|
||||
/*!
|
||||
* @brief Handles conv2d convolutions (valid padding) with number of channels >= 8
|
||||
* @ingroup lite_conv2d
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void
|
||||
forward_lite_conv2d_deep_sssa8_ch(const ai_i8 *pData_in,
|
||||
const ai_u16 dim_im_in_x,
|
||||
const ai_u16 dim_im_in_y,
|
||||
const ai_u16 n_channel_in,
|
||||
const ai_i8 *pWeights,
|
||||
const ai_u16 n_channel_out,
|
||||
const ai_u16 dim_kernel_x,
|
||||
const ai_u16 dim_kernel_y,
|
||||
const ai_u16 stride_x,
|
||||
const ai_u16 stride_y,
|
||||
const ai_i32 *pBias,
|
||||
const ai_i8 in_zeropoint,
|
||||
const ai_i8 out_zeropoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
const ai_layer_format_type out_ch_format,
|
||||
ai_i8 *pData_out,
|
||||
const ai_u16 dim_im_out_x,
|
||||
const ai_u16 dim_im_out_y,
|
||||
ai_u32 height_loop_cnt,
|
||||
const ai_u16 weights_prefetch_enabled,
|
||||
ai_i32 scratch_size,
|
||||
ai_i16 *pBuffer_a);
|
||||
/*!
|
||||
* @brief Handles conv2d convolutions (valid padding) with number of channels >= 8
|
||||
* Special forward function for 3x3 kernels and Stride = 1
|
||||
* @ingroup lite_conv2d
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void
|
||||
forward_lite_conv2d_deep_3x3_sssa8_ch(const ai_i8 *pData_in,
|
||||
const ai_u16 dim_im_in_x,
|
||||
const ai_u16 dim_im_in_y,
|
||||
const ai_u16 n_channel_in,
|
||||
const ai_i8 *pWeights,
|
||||
const ai_u16 n_channel_out,
|
||||
const ai_i32 *pBias,
|
||||
const ai_i8 in_zeropoint,
|
||||
const ai_i8 out_zeropoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
const ai_layer_format_type out_ch_format,
|
||||
ai_i8 *pData_out,
|
||||
const ai_u16 dim_im_out_x,
|
||||
const ai_u16 dim_im_out_y,
|
||||
ai_u32 height_loop_cnt,
|
||||
ai_i32 scratch_size,
|
||||
ai_i16 *pBuffer_a);
|
||||
|
||||
/*!
|
||||
* @brief Handles conv2d convolutions optimized by HSP HW
|
||||
* @ingroup lite_conv2d
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void
|
||||
forward_lite_conv2d_hsp_1step_sssa8_ch(const ai_i8 *pData_in,
|
||||
const ai_u16 dim_im_in_x,
|
||||
const ai_u16 dim_im_in_y,
|
||||
const ai_u16 n_channel_in,
|
||||
const ai_i8 *pWeights,
|
||||
const ai_u16 n_channel_out,
|
||||
const ai_u16 dim_kernel_x,
|
||||
const ai_u16 dim_kernel_y,
|
||||
const ai_u16 stride_x,
|
||||
const ai_u16 stride_y,
|
||||
const ai_u16 padding_x,
|
||||
const ai_u16 padding_x_r,
|
||||
const ai_u16 padding_y,
|
||||
const ai_u16 padding_y_b,
|
||||
const ai_i32 *pBias,
|
||||
const ai_i8 in_zeropoint,
|
||||
const ai_i8 out_zeropoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
const ai_layer_format_type out_ch_format,
|
||||
ai_i8 *pData_out,
|
||||
const ai_u16 dim_im_out_x,
|
||||
const ai_u16 dim_im_out_y,
|
||||
ai_i32 scratch_size,
|
||||
ai_i16 *pBuffer_a);
|
||||
LITE_API_ENTRY
|
||||
void
|
||||
forward_lite_conv2d_hsp_sssa8_ch(const ai_i8 *pData_in,
|
||||
const ai_u16 dim_im_in_x,
|
||||
const ai_u16 dim_im_in_y,
|
||||
const ai_u16 n_channel_in,
|
||||
const ai_i8 *pWeights,
|
||||
const ai_u16 n_channel_out,
|
||||
const ai_u16 dim_kernel_x,
|
||||
const ai_u16 dim_kernel_y,
|
||||
const ai_u16 stride_x,
|
||||
const ai_u16 stride_y,
|
||||
const ai_u16 padding_x,
|
||||
const ai_u16 padding_x_r,
|
||||
const ai_u16 padding_y,
|
||||
const ai_u16 padding_y_b,
|
||||
const ai_i32 *pBias,
|
||||
const ai_i8 in_zeropoint,
|
||||
const ai_i8 out_zeropoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
const ai_layer_format_type out_ch_format,
|
||||
ai_i8 *pData_out,
|
||||
const ai_u16 dim_im_out_x,
|
||||
const ai_u16 dim_im_out_y,
|
||||
ai_i32 scratch_size,
|
||||
ai_i16 *pBuffer_a);
|
||||
|
||||
LITE_API_ENTRY
|
||||
void
|
||||
forward_lite_conv2d_hsp_3step_sssa8_ch(const ai_i8 *pData_in,
|
||||
const ai_u16 dim_im_in_x,
|
||||
const ai_u16 dim_im_in_y,
|
||||
const ai_u16 n_channel_in,
|
||||
const ai_i8 *pWeights,
|
||||
const ai_u16 n_channel_out,
|
||||
const ai_u16 dim_kernel_x,
|
||||
const ai_u16 dim_kernel_y,
|
||||
const ai_u16 stride_x,
|
||||
const ai_u16 stride_y,
|
||||
const ai_u16 padding_x,
|
||||
const ai_u16 padding_x_r,
|
||||
const ai_u16 padding_y,
|
||||
const ai_u16 padding_y_b,
|
||||
const ai_i32 *pBias,
|
||||
const ai_i8 in_zeropoint,
|
||||
const ai_i8 out_zeropoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
const ai_layer_format_type out_ch_format,
|
||||
ai_i8 *pData_out,
|
||||
const ai_u16 dim_im_out_x,
|
||||
const ai_u16 dim_im_out_y,
|
||||
ai_i32 scratch_size,
|
||||
ai_i16 *pBuffer_a);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Handles conv2d convolutions with same padding or with number of channels < 8
|
||||
* @ingroup lite_conv2d
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void
|
||||
forward_lite_conv2d_sssa8_ch(const ai_i8 *pData_in,
|
||||
const ai_u16 dim_im_in_x,
|
||||
const ai_u16 dim_im_in_y,
|
||||
const ai_u16 n_channel_in,
|
||||
const ai_i8 *pWeights,
|
||||
const ai_u16 n_channel_out,
|
||||
const ai_u16 dim_kernel_x,
|
||||
const ai_u16 dim_kernel_y,
|
||||
const ai_u16 stride_x,
|
||||
const ai_u16 stride_y,
|
||||
const ai_u16 padding_x,
|
||||
const ai_u16 padding_y,
|
||||
const ai_i32 *pBias,
|
||||
const ai_i8 in_zeropoint,
|
||||
const ai_i8 out_zeropoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
const ai_layer_format_type out_ch_format,
|
||||
ai_i8 *pData_out,
|
||||
const ai_u16 dim_im_out_x,
|
||||
const ai_u16 dim_im_out_y,
|
||||
const ai_u16 weights_prefetch_enabled,
|
||||
ai_i32 scratch_size,
|
||||
ai_i16 *pBuffer_a);
|
||||
|
||||
/*!
|
||||
* @brief Handles rgb conv2d convolutions
|
||||
* @ingroup lite_conv2d
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void
|
||||
forward_lite_conv2d_rgb_sssa8_ch(const ai_i8 *pData_in,
|
||||
const ai_u16 dim_im_in,
|
||||
const ai_i8 *pWeights,
|
||||
const ai_u16 n_channel_out,
|
||||
const ai_u16 dim_kernel,
|
||||
const ai_u16 padding,
|
||||
const ai_u16 stride,
|
||||
const ai_i32 *pBias,
|
||||
const ai_i8 in_zeropoint,
|
||||
const ai_i8 out_zeropoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
const ai_layer_format_type out_ch_format,
|
||||
ai_i8 *pData_out,
|
||||
const ai_u16 dim_im_out,
|
||||
ai_i32 scratch_size,
|
||||
ai_i16 *pBuffer_a);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with float input, float output and
|
||||
* float weights with pool fused
|
||||
* @ingroup lite_conv2d
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_conv2d_if32of32wf32_pool(const ai_float *pDataIn_init,
|
||||
ai_float *pDataOut_init,
|
||||
const ai_float *pWeights_init,
|
||||
const ai_float *pBias_init,
|
||||
ai_float *pScratch_init,
|
||||
ai_float *pWeights_prefetch,
|
||||
const ai_short_size n_channel_in,
|
||||
const ai_short_size n_channel_out,
|
||||
const ai_short_size width_in,
|
||||
const ai_short_size height_in,
|
||||
const ai_short_size width_out,
|
||||
const ai_short_size height_out,
|
||||
const ai_short_size filt_width,
|
||||
const ai_short_size filt_height,
|
||||
const ai_u16 filt_pad_x,
|
||||
const ai_u16 filt_pad_y,
|
||||
const ai_u16 filt_stride_x,
|
||||
const ai_u16 filt_stride_y,
|
||||
const ai_short_size filt_height_dilated,
|
||||
const ai_short_size filt_width_dilated,
|
||||
const ai_u16 dilation_x,
|
||||
const ai_u16 dilation_y,
|
||||
const ai_short_size n_groups,
|
||||
const ai_short_size width_conv_out,
|
||||
const ai_short_size height_conv_out,
|
||||
func_nl_lite nl_func_lite,
|
||||
ai_ptr_const nl_params,
|
||||
const ai_ptr_offset nl_params_step,
|
||||
const ai_ptr_offset nl_params_size,
|
||||
ai_handle pool_func,
|
||||
const ai_short_size pool_width,
|
||||
const ai_short_size pool_height,
|
||||
const ai_short_size pool_stride_x,
|
||||
const ai_short_size pool_stride_y,
|
||||
const ai_short_size pool_pad_x,
|
||||
const ai_short_size pool_pad_y);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D depthwise convolution with float input, float output and
|
||||
* float weights with pool fused
|
||||
* @ingroup lite_conv2d
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_dw_if32of32wf32_pool(const ai_float *pDataIn_init,
|
||||
ai_float *pDataOut_init,
|
||||
const ai_float *pWeights_init,
|
||||
const ai_float *pBias_init,
|
||||
ai_float *pScratch_init,
|
||||
const ai_short_size n_channel_in,
|
||||
const ai_short_size n_channel_out,
|
||||
const ai_short_size width_in,
|
||||
const ai_short_size height_in,
|
||||
const ai_short_size width_out,
|
||||
const ai_short_size height_out,
|
||||
const ai_short_size filt_width,
|
||||
const ai_short_size filt_height,
|
||||
const ai_u16 filt_pad_x,
|
||||
const ai_u16 filt_pad_y,
|
||||
const ai_u16 filt_stride_x,
|
||||
const ai_u16 filt_stride_y,
|
||||
const ai_short_size filt_height_dilated,
|
||||
const ai_short_size filt_width_dilated,
|
||||
const ai_u16 dilation_x,
|
||||
const ai_u16 dilation_y,
|
||||
const ai_short_size n_groups,
|
||||
const ai_short_size width_conv_out,
|
||||
const ai_short_size height_conv_out,
|
||||
func_nl_lite nl_func_lite,
|
||||
ai_ptr_const nl_params,
|
||||
const ai_ptr_offset nl_params_step,
|
||||
const ai_ptr_offset nl_params_size,
|
||||
ai_handle pool_func,
|
||||
const ai_short_size pool_width,
|
||||
const ai_short_size pool_height,
|
||||
const ai_short_size pool_stride_x,
|
||||
const ai_short_size pool_stride_y,
|
||||
const ai_short_size pool_pad_x,
|
||||
const ai_short_size pool_pad_y);
|
||||
/*!
|
||||
* @brief Handles 2D grouped convolution with float input, float output and
|
||||
* float weights with pool fused
|
||||
* @ingroup lite_conv2d
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_conv2d_if32of32wf32_group_pool(const ai_float *pDataIn_init,
|
||||
ai_float *pDataOut_init,
|
||||
const ai_float *pWeights_init,
|
||||
const ai_float *pBias_init,
|
||||
ai_float *pScratch_init,
|
||||
const ai_short_size n_channel_in,
|
||||
const ai_short_size n_channel_out,
|
||||
const ai_short_size width_in,
|
||||
const ai_short_size height_in,
|
||||
const ai_short_size width_out,
|
||||
const ai_short_size height_out,
|
||||
const ai_short_size filt_width,
|
||||
const ai_short_size filt_height,
|
||||
const ai_u16 filt_pad_x,
|
||||
const ai_u16 filt_pad_y,
|
||||
const ai_u16 filt_stride_x,
|
||||
const ai_u16 filt_stride_y,
|
||||
const ai_short_size filt_height_dilated,
|
||||
const ai_short_size filt_width_dilated,
|
||||
const ai_u16 dilation_x,
|
||||
const ai_u16 dilation_y,
|
||||
const ai_short_size n_groups,
|
||||
const ai_short_size width_conv_out,
|
||||
const ai_short_size height_conv_out,
|
||||
func_nl_lite nl_func_lite,
|
||||
ai_ptr_const nl_params,
|
||||
const ai_ptr_offset nl_params_step,
|
||||
const ai_ptr_offset nl_params_size,
|
||||
ai_handle pool_func,
|
||||
const ai_short_size pool_width,
|
||||
const ai_short_size pool_height,
|
||||
const ai_short_size pool_stride_x,
|
||||
const ai_short_size pool_stride_y,
|
||||
const ai_short_size pool_pad_x,
|
||||
const ai_short_size pool_pad_y);
|
||||
|
||||
#endif /*LITE_CONV2D_H*/
|
||||
568
lib/stai/libstai/include/lite_conv2d_dqnn.h
Normal file
568
lib/stai/libstai/include/lite_conv2d_dqnn.h
Normal file
@ -0,0 +1,568 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file lite_conv2d_dqnn.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform lite dqnn conv kernel datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2021 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LITE_CONV2D_DQNN_H
|
||||
#define LITE_CONV2D_DQNN_H
|
||||
|
||||
#include "ai_lite_interface.h"
|
||||
|
||||
# define AI_16_OVERFLOW_CHECK(val_) (val_ <= 32767)
|
||||
|
||||
/******************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/******************************************************************************/
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with binary input, binary output and
|
||||
* binary weights - with 0 padding (QKeras like) - Lite I/F
|
||||
* @ingroup lite_conv2d_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_conv2d_is1os1ws1_bn_pad0(const ai_u32 *pDataIn_init,
|
||||
ai_u32 *pDataOut_init,
|
||||
const ai_u32 *pWeights_init,
|
||||
ai_float *pScratch_32,
|
||||
const ai_u32 n_channel_in,
|
||||
const ai_u32 n_channel_out,
|
||||
const ai_i32 width_in,
|
||||
const ai_i32 height_in,
|
||||
const ai_i32 width_out,
|
||||
const ai_i32 height_out,
|
||||
const ai_i32 filt_width,
|
||||
const ai_i32 filt_height,
|
||||
const ai_i32 filt_pad_x,
|
||||
const ai_i32 filt_pad_y,
|
||||
const ai_i32 filt_stride_x,
|
||||
const ai_i32 filt_stride_y,
|
||||
const ai_i32 *pThreshold);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with binary input, binary output and
|
||||
* binary weights - with 0 padding (QKeras like) - Lite I/F
|
||||
* - Optimized thanks to Optim0 assumptions
|
||||
* @ingroup lite_conv2d_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_conv2d_is1os1ws1_bn_pad0_optim0(const ai_u32 *pDataIn_init,
|
||||
ai_u32 *pDataOut_init,
|
||||
const ai_u32 *pWeights_init,
|
||||
ai_float *pScratch_32,
|
||||
const ai_u32 n_channel_in,
|
||||
const ai_u32 n_channel_out,
|
||||
const ai_i32 width_in,
|
||||
const ai_i32 height_in,
|
||||
const ai_i32 width_out,
|
||||
const ai_i32 height_out,
|
||||
const ai_i32 filt_width,
|
||||
const ai_i32 filt_height,
|
||||
const ai_i32 filt_pad_x,
|
||||
const ai_i32 filt_pad_y,
|
||||
const ai_i32 filt_stride_x,
|
||||
const ai_i32 filt_stride_y,
|
||||
const ai_i32 *pThreshold);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with binary input, 8-bits output and
|
||||
* binary weights - with 0 padding (QKeras like) - Lite I/F
|
||||
* @ingroup lite_conv2d_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_conv2d_is1os8ws1_bn_pad0(const ai_u32 *pDataIn_init,
|
||||
ai_i8 *pDataOut_init,
|
||||
const ai_u32 *pWeights_init,
|
||||
ai_float *pScratch_32,
|
||||
const ai_u32 n_channel_in,
|
||||
const ai_u32 n_channel_out,
|
||||
const ai_i32 width_in,
|
||||
const ai_i32 height_in,
|
||||
const ai_i32 width_out,
|
||||
const ai_i32 height_out,
|
||||
const ai_i32 filt_width,
|
||||
const ai_i32 filt_height,
|
||||
const ai_i32 filt_pad_x,
|
||||
const ai_i32 filt_pad_y,
|
||||
const ai_i32 filt_stride_x,
|
||||
const ai_i32 filt_stride_y,
|
||||
const ai_float *pScale,
|
||||
const ai_float *pOffset);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with binary input, binary output and
|
||||
* binary weights - with +1/-1 padding (Larq like) - Lite I/F
|
||||
* @ingroup lite_conv2d_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_conv2d_is1os1ws1_bn_pad1(const ai_u32 *pDataIn_init,
|
||||
ai_u32 *pDataOut_init,
|
||||
const ai_u32 *pWeights_init,
|
||||
ai_float *pScratch_32,
|
||||
const ai_u32 n_channel_in,
|
||||
const ai_u32 n_channel_out,
|
||||
const ai_i32 width_in,
|
||||
const ai_i32 height_in,
|
||||
const ai_i32 width_out,
|
||||
const ai_i32 height_out,
|
||||
const ai_i32 filt_width,
|
||||
const ai_i32 filt_height,
|
||||
const ai_i32 filt_pad_x,
|
||||
const ai_i32 filt_pad_y,
|
||||
const ai_i32 filt_stride_x,
|
||||
const ai_i32 filt_stride_y,
|
||||
const ai_i32 *pThreshold,
|
||||
const ai_i32 pad_value);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with binary input, binary output and
|
||||
* binary weights - with +1/-1 padding (Larq like) - Lite I/F
|
||||
* - Optimized thanks to Optim2 assumptions
|
||||
* @ingroup lite_conv2d_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_conv2d_is1os1ws1_bn_pad1_optim2(const ai_u32 *pDataIn_init,
|
||||
ai_u32 *pDataOut_init,
|
||||
const ai_u32 *pWeights_init,
|
||||
ai_float *pScratch_32,
|
||||
const ai_u32 n_channel_in,
|
||||
const ai_u32 n_channel_out,
|
||||
const ai_i32 width_in,
|
||||
const ai_i32 height_in,
|
||||
const ai_i32 width_out,
|
||||
const ai_i32 height_out,
|
||||
const ai_i32 filt_width,
|
||||
const ai_i32 filt_height,
|
||||
const ai_i32 filt_pad_x,
|
||||
const ai_i32 filt_pad_y,
|
||||
const ai_i32 filt_stride_x,
|
||||
const ai_i32 filt_stride_y,
|
||||
const ai_i32 *pThreshold,
|
||||
const ai_i32 pad_value);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with binary input, binary output and
|
||||
* binary weights
|
||||
* @ingroup lite_conv2d_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_conv2d_is1os1ws1_bn(const ai_u32 *pDataIn_init,
|
||||
ai_u32 * pDataOut_init,
|
||||
const ai_u32 *pWeights_init,
|
||||
ai_float *pScratch_32,
|
||||
const ai_u16 n_channel_in,
|
||||
const ai_u16 n_channel_out,
|
||||
const ai_u16 width_in,
|
||||
const ai_u16 height_in,
|
||||
const ai_u16 width_out,
|
||||
const ai_u16 height_out,
|
||||
const ai_u16 filt_width,
|
||||
const ai_u16 filt_height,
|
||||
const ai_u16 filt_pad_x,
|
||||
const ai_u16 filt_pad_y,
|
||||
const ai_u16 filt_stride_x,
|
||||
const ai_u16 filt_stride_y,
|
||||
const ai_i32 *pThreshold,
|
||||
const ai_u8 flatten_output);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with binary input, 8-bits output and
|
||||
* binary weights - with +1/-1 padding (Larq like) - Lite I/F
|
||||
* @ingroup lite_conv2d_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_conv2d_is1os8ws1_bn_pad1(const ai_u32 *pDataIn_init,
|
||||
ai_i8 *pDataOut_init,
|
||||
const ai_u32 *pWeights_init,
|
||||
ai_float *pScratch_32,
|
||||
const ai_u32 n_channel_in,
|
||||
const ai_u32 n_channel_out,
|
||||
const ai_i32 width_in,
|
||||
const ai_i32 height_in,
|
||||
const ai_i32 width_out,
|
||||
const ai_i32 height_out,
|
||||
const ai_i32 filt_width,
|
||||
const ai_i32 filt_height,
|
||||
const ai_i32 filt_pad_x,
|
||||
const ai_i32 filt_pad_y,
|
||||
const ai_i32 filt_stride_x,
|
||||
const ai_i32 filt_stride_y,
|
||||
const ai_float *pScale,
|
||||
const ai_float *pOffset,
|
||||
const ai_i32 pad_value);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with binary input, 8-bits output and
|
||||
* binary weights - with +1/-1 padding (Larq like) - Lite I/F
|
||||
* - Optimized thanks to Optim1 assumptions
|
||||
* @ingroup lite_conv2d_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_conv2d_is1os8ws1_bn_pad1_optim1(const ai_u32 *pDataIn_init,
|
||||
ai_i8 *pDataOut_init,
|
||||
const ai_u32 *pWeights_init,
|
||||
ai_float *pScratch_32,
|
||||
const ai_u32 n_channel_in,
|
||||
const ai_u32 n_channel_out,
|
||||
const ai_i32 width_in,
|
||||
const ai_i32 height_in,
|
||||
const ai_i32 width_out,
|
||||
const ai_i32 height_out,
|
||||
const ai_i32 filt_width,
|
||||
const ai_i32 filt_height,
|
||||
const ai_i32 filt_pad_x,
|
||||
const ai_i32 filt_pad_y,
|
||||
const ai_i32 filt_stride_x,
|
||||
const ai_i32 filt_stride_y,
|
||||
const ai_float *pScale,
|
||||
const ai_float *pOffset,
|
||||
const ai_i32 pad_value);
|
||||
|
||||
/**
|
||||
* @brief Handles 2D convolution with binary input, fixed point 16-bits output and
|
||||
* binary weights - with 0 padding (QKeras like) - Lite I/F
|
||||
|
||||
* @ingroup lite_conv2d_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_conv2d_is1os16ws1_bn_pad0_fxp(const ai_u32 *pDataIn_init,
|
||||
ai_i16 *pDataOut_init,
|
||||
const ai_u32 *pWeights_init,
|
||||
ai_float *pScratch_32,
|
||||
const ai_u32 n_channel_in,
|
||||
const ai_u32 n_channel_out,
|
||||
const ai_i32 width_in,
|
||||
const ai_i32 height_in,
|
||||
const ai_i32 width_out,
|
||||
const ai_i32 height_out,
|
||||
const ai_i32 filt_width,
|
||||
const ai_i32 filt_height,
|
||||
const ai_i32 filt_pad_x,
|
||||
const ai_i32 filt_pad_y,
|
||||
const ai_i32 filt_stride_x,
|
||||
const ai_i32 filt_stride_y,
|
||||
const ai_float *pScale_init,
|
||||
const ai_float *pOffset_init);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with binary input, fixed point 16-bits output and
|
||||
* binary weights - with +1/-1 padding (Larq like) - Lite I/F
|
||||
*
|
||||
* @ingroup lite_conv2d_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_conv2d_is1os16ws1_bn_pad1_fxp(const ai_u32 *pDataIn_init,
|
||||
ai_i16 *pDataOut_init,
|
||||
const ai_u32 *pWeights_init,
|
||||
ai_float *pScratch_32,
|
||||
const ai_u32 n_channel_in,
|
||||
const ai_u32 n_channel_out,
|
||||
const ai_i32 width_in,
|
||||
const ai_i32 height_in,
|
||||
const ai_i32 width_out,
|
||||
const ai_i32 height_out,
|
||||
const ai_i32 filt_width,
|
||||
const ai_i32 filt_height,
|
||||
const ai_i32 filt_pad_x,
|
||||
const ai_i32 filt_pad_y,
|
||||
const ai_i32 filt_stride_x,
|
||||
const ai_i32 filt_stride_y,
|
||||
const ai_float *pScale_init,
|
||||
const ai_float *pOffset_init,
|
||||
const ai_i32 pad_value);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with binary input, fixed point 16-bits output and
|
||||
* binary weights - with +1/-1 padding (Larq like) - Lite I/F
|
||||
* - Optimized thanks to Optim1 assumptions
|
||||
*
|
||||
* @ingroup lite_conv2d_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_conv2d_is1os16ws1_bn_pad1_optim1_fxp(const ai_u32 *pDataIn_init,
|
||||
ai_i16 *pDataOut_init,
|
||||
const ai_u32 *pWeights_init,
|
||||
ai_float *pScratch_32,
|
||||
const ai_u32 n_channel_in,
|
||||
const ai_u32 n_channel_out,
|
||||
const ai_i32 width_in,
|
||||
const ai_i32 height_in,
|
||||
const ai_i32 width_out,
|
||||
const ai_i32 height_out,
|
||||
const ai_i32 filt_width,
|
||||
const ai_i32 filt_height,
|
||||
const ai_i32 filt_pad_x,
|
||||
const ai_i32 filt_pad_y,
|
||||
const ai_i32 filt_stride_x,
|
||||
const ai_i32 filt_stride_y,
|
||||
const ai_float *pScale_init,
|
||||
const ai_float *pOffset_init,
|
||||
const ai_i32 pad_value);
|
||||
|
||||
|
||||
/**
|
||||
* @brief Handles 2D convolution with binary input, fixed point 16-bits unsigned output and
|
||||
* binary weights - with 0 padding (QKeras like) - Lite I/F
|
||||
*
|
||||
* @ingroup lite_conv2d_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_conv2d_is1ou16ws1_bn_pad1_fxp(const ai_u32 *pDataIn_init,
|
||||
ai_u16 *pDataOut_init,
|
||||
const ai_u32 *pWeights_init,
|
||||
ai_float *pScratch_32,
|
||||
const ai_u32 n_channel_in,
|
||||
const ai_u32 n_channel_out,
|
||||
const ai_i32 width_in,
|
||||
const ai_i32 height_in,
|
||||
const ai_i32 width_out,
|
||||
const ai_i32 height_out,
|
||||
const ai_i32 filt_width,
|
||||
const ai_i32 filt_height,
|
||||
const ai_i32 filt_pad_x,
|
||||
const ai_i32 filt_pad_y,
|
||||
const ai_i32 filt_stride_x,
|
||||
const ai_i32 filt_stride_y,
|
||||
const ai_float *pScale_init,
|
||||
const ai_float *pOffset_init,
|
||||
const ai_i32 pad_value);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with binary input, fixed point 16-bits unsigned output and
|
||||
* binary weights - with +1/-1 padding (Larq like) - Lite I/F
|
||||
*
|
||||
* @ingroup lite_conv2d_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_conv2d_is1ou16ws1_bn_pad0_fxp(const ai_u32 *pDataIn_init,
|
||||
ai_u16 *pDataOut_init,
|
||||
const ai_u32 *pWeights_init,
|
||||
ai_float *pScratch_32,
|
||||
const ai_u32 n_channel_in,
|
||||
const ai_u32 n_channel_out,
|
||||
const ai_i32 width_in,
|
||||
const ai_i32 height_in,
|
||||
const ai_i32 width_out,
|
||||
const ai_i32 height_out,
|
||||
const ai_i32 filt_width,
|
||||
const ai_i32 filt_height,
|
||||
const ai_i32 filt_pad_x,
|
||||
const ai_i32 filt_pad_y,
|
||||
const ai_i32 filt_stride_x,
|
||||
const ai_i32 filt_stride_y,
|
||||
const ai_float *pScale_init,
|
||||
const ai_float *pOffset_init);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with binary input, fixed point 16-bits unsigned output and
|
||||
* binary weights - with +1/-1 padding (Larq like) - Lite I/F.
|
||||
* - Optimized thanks to Optim1 assumptions
|
||||
*
|
||||
* @ingroup lite_conv2d_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_conv2d_is1ou16ws1_bn_pad1_optim1_fxp(const ai_u32 *pDataIn_init,
|
||||
ai_u16 *pDataOut_init,
|
||||
const ai_u32 *pWeights_init,
|
||||
ai_float *pScratch_32,
|
||||
const ai_u32 n_channel_in,
|
||||
const ai_u32 n_channel_out,
|
||||
const ai_i32 width_in,
|
||||
const ai_i32 height_in,
|
||||
const ai_i32 width_out,
|
||||
const ai_i32 height_out,
|
||||
const ai_i32 filt_width,
|
||||
const ai_i32 filt_height,
|
||||
const ai_i32 filt_pad_x,
|
||||
const ai_i32 filt_pad_y,
|
||||
const ai_i32 filt_stride_x,
|
||||
const ai_i32 filt_stride_y,
|
||||
const ai_float *pScale_init,
|
||||
const ai_float *pOffset_init,
|
||||
const ai_i32 pad_value);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with 8-bits quantized Input and weights and
|
||||
* binary output - Lite I/F
|
||||
* @ingroup lite_conv2d_dqnn
|
||||
* @param layer conv2d_dqnn layer
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_conv2d_is8os1ws8(const ai_i8 *pDataIn_init,
|
||||
ai_u32 *pDataOut_init,
|
||||
const ai_i8 *pWeights_init,
|
||||
ai_float *pScratch_32,
|
||||
const ai_u32 n_channel_in,
|
||||
const ai_u32 n_channel_out,
|
||||
const ai_i32 width_in,
|
||||
const ai_i32 height_in,
|
||||
const ai_i32 width_out,
|
||||
const ai_i32 height_out,
|
||||
const ai_i32 filt_width,
|
||||
const ai_i32 filt_height,
|
||||
const ai_i32 filt_pad_x,
|
||||
const ai_i32 filt_pad_y,
|
||||
const ai_i32 filt_stride_x,
|
||||
const ai_i32 filt_stride_y,
|
||||
const ai_i32 *pThreshold,
|
||||
const ai_i8 in_zeropoint);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with 8-bits quantized Input and weights and
|
||||
* binary output - Lite I/F - Optimized thanks to Optim2 assumptions
|
||||
* @ingroup lite_conv2d_dqnn
|
||||
* @param layer conv2d_dqnn layer
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_conv2d_is8os1ws8_optim2(const ai_i8 *pDataIn_init,
|
||||
ai_u32 *pDataOut_init,
|
||||
const ai_i8 *pWeights_init,
|
||||
ai_float *pScratch_32,
|
||||
const ai_u32 n_channel_in,
|
||||
const ai_u32 n_channel_out,
|
||||
const ai_i32 width_in,
|
||||
const ai_i32 height_in,
|
||||
const ai_i32 width_out,
|
||||
const ai_i32 height_out,
|
||||
const ai_i32 filt_width,
|
||||
const ai_i32 filt_height,
|
||||
const ai_i32 filt_pad_x,
|
||||
const ai_i32 filt_pad_y,
|
||||
const ai_i32 filt_stride_x,
|
||||
const ai_i32 filt_stride_y,
|
||||
const ai_i32 *pThreshold,
|
||||
const ai_i8 in_zeropoint);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with 8-bits quantized Input and weights and
|
||||
* binary output - quantized with DoReFa SotA quantizer, lite I/F
|
||||
* @ingroup lite_conv2d_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_conv2d_dorefa_is8os1ws8(const ai_i8 *pDataIn_init,
|
||||
ai_u32 *pDataOut_init,
|
||||
const ai_u8 *pWeights_init,
|
||||
ai_float *pScratch_32,
|
||||
const ai_u32 n_channel_in,
|
||||
const ai_u32 n_channel_out,
|
||||
const ai_i32 width_in,
|
||||
const ai_i32 height_in,
|
||||
const ai_i32 width_out,
|
||||
const ai_i32 height_out,
|
||||
const ai_i32 filt_width,
|
||||
const ai_i32 filt_height,
|
||||
const ai_i32 filt_pad_x,
|
||||
const ai_i32 filt_pad_y,
|
||||
const ai_i32 filt_stride_x,
|
||||
const ai_i32 filt_stride_y,
|
||||
const ai_i32 *pThreshold,
|
||||
const ai_i8 in_zeropoint);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with 8-bits quantized input, output and weights
|
||||
* - quantized with with different quantization for channel
|
||||
* @ingroup lite_conv2d_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_conv2d_is8os8ws8_sssa_ch(const ai_i8 *pData_in,
|
||||
ai_i8 *pData_out,
|
||||
const ai_i8 *pWeights,
|
||||
const ai_i32 *pBias,
|
||||
ai_u16 *pBuffer_a,
|
||||
const ai_size width_in,
|
||||
const ai_size height_in,
|
||||
const ai_size width_out,
|
||||
const ai_size height_out,
|
||||
const ai_u16 n_channel_in,
|
||||
const ai_u16 n_channel_out,
|
||||
const ai_size filt_width,
|
||||
const ai_size filt_height,
|
||||
const ai_u16 filt_pad_x,
|
||||
const ai_u16 filt_pad_y,
|
||||
const ai_u16 filt_stride_x,
|
||||
const ai_u16 filt_stride_y,
|
||||
const ai_u16 dilation_x,
|
||||
const ai_u16 dilation_y,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
const ai_i8 in_zeropoint,
|
||||
const ai_i8 out_zeropoint,
|
||||
const ai_i32 scratch_size);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with 16-bits quantized inputs, binary outputs and binary weights - Lite I/F.
|
||||
* Vanilla version.
|
||||
* @ingroup lite_conv2d_dqnn
|
||||
* @param layer conv2d_dqnn layer
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_conv2d_is16os1ws1_bn_fxp(const ai_i16 *pIn,
|
||||
ai_u32 *pOut_32,
|
||||
const ai_u32 *pWeights,
|
||||
const ai_i32 *pThreshold,
|
||||
ai_i8 *pBufferA,
|
||||
const ai_i32 dim_kernel,
|
||||
const ai_i16 dim_im_in_x,
|
||||
const ai_i16 dim_im_in_y,
|
||||
const ai_i16 dim_im_out_x,
|
||||
const ai_i16 dim_im_out_y,
|
||||
const ai_i16 ch_im_in,
|
||||
const ai_i16 ch_im_out,
|
||||
const ai_i16 dim_kernel_x,
|
||||
const ai_i16 dim_kernel_y,
|
||||
const ai_i16 padding_x,
|
||||
const ai_i16 padding_y,
|
||||
const ai_i16 stride_x,
|
||||
const ai_i16 stride_y,
|
||||
const ai_i16 dilation_x,
|
||||
const ai_i16 dilation_y,
|
||||
const ai_i16 in_zeropoint);
|
||||
|
||||
|
||||
/**
|
||||
* @brief Handles 2D convolution with 16-bits quantized inputs, 16-bits quantized outputs and binary weights - Lite I/F
|
||||
*
|
||||
* @ingroup lite_conv2d_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_conv2d_is16os16ws1_fxp(const ai_i16 *pIn,
|
||||
ai_i16 *pOut,
|
||||
const ai_u32 *pWeights,
|
||||
ai_i8 *pBufferA,
|
||||
const ai_i16 dim_im_in_x,
|
||||
const ai_i16 dim_im_in_y,
|
||||
const ai_i16 dim_im_out_x,
|
||||
const ai_i16 dim_im_out_y,
|
||||
const ai_i16 ch_im_in,
|
||||
const ai_i16 ch_im_out,
|
||||
const ai_u32 dim_kernel,
|
||||
const ai_i16 dim_kernel_x,
|
||||
const ai_i16 dim_kernel_y,
|
||||
const ai_i16 padding_x,
|
||||
const ai_i16 padding_y,
|
||||
const ai_i16 stride_x,
|
||||
const ai_i16 stride_y,
|
||||
const ai_i16 dilation_x,
|
||||
const ai_i16 dilation_y,
|
||||
const ai_i16 in_zeropoint);
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*LITE_CONV2D_DQNN_H*/
|
||||
213
lib/stai/libstai/include/lite_conv2d_is16.h
Normal file
213
lib/stai/libstai/include/lite_conv2d_is16.h
Normal file
@ -0,0 +1,213 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file lite_dense_is16.h
|
||||
* @author Giacomo Turati
|
||||
* @brief header file of AI platform lite conv2d kernel (with signed int16 input)
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2024 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LITE_CONV2D_IS16_H
|
||||
#define LITE_CONV2D_IS16_H
|
||||
|
||||
|
||||
#include "stai.h"
|
||||
#include "ai_lite_interface.h"
|
||||
|
||||
/*!
|
||||
* @brief Conv2d layer with fixed-point int16_t weights (e.g., Qkeras "auto_po2").
|
||||
* Support signed integer 16 input and signed integer 16 output activations.
|
||||
* Both weights and bias (if any) must be quantized with 16 bits.
|
||||
* Manage different fixed-point scales between weights and bias (if any).
|
||||
* @param output Pointer to the output buffer
|
||||
* @param input Pointer to the input buffer
|
||||
* @param weights Pointer to the weights array
|
||||
* @param n_channel_in Number of input channels
|
||||
* @param n_channel_out Number of output channels, i.e.,the number of conv2d hidden filters
|
||||
* @param width_in Input width
|
||||
* @param height_in Input height
|
||||
* @param width_out Output width
|
||||
* @param height_out Output height
|
||||
* @param filt_width Filters width
|
||||
* @param filt_height Filters height
|
||||
* @param filt_pad_x Filters pad width
|
||||
* @param filt_pad_y Filters pad height
|
||||
* @param stride_x Stride width
|
||||
* @param stride_y Stride height
|
||||
* @param shifts Array of fixed-point binary scales for the weights
|
||||
* @param bias_shifts Array of fixed-point binary scales for the bias
|
||||
* @param signed_input Signed input flag
|
||||
* @param signed_output Signed output flag
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_conv2d_is16os16ws16_fxp(
|
||||
int16_t* output,
|
||||
const int16_t* input,
|
||||
const int16_t* weights,
|
||||
const int16_t* bias,
|
||||
const ai_size n_channel_in,
|
||||
const ai_size n_channel_out,
|
||||
const ai_size width_in,
|
||||
const ai_size height_in,
|
||||
const ai_size width_out,
|
||||
const ai_size height_out,
|
||||
const ai_size filt_width,
|
||||
const ai_size filt_height,
|
||||
const ai_size filt_pad_x,
|
||||
const ai_size filt_pad_y,
|
||||
const uint16_t stride_x,
|
||||
const uint16_t stride_y,
|
||||
const uint8_t* shifts,
|
||||
const uint8_t* bias_shifts
|
||||
);
|
||||
|
||||
/*!
|
||||
* @brief Conv2d layer with fixed-point int16_t weights (e.g., Qkeras "auto_po2").
|
||||
* Support signed integer 16 input and unsigned integer 16 output activations.
|
||||
* Both weights and bias (if any) must be quantized with 16 bits.
|
||||
* Manage different fixed-point scales between weights and bias (if any).
|
||||
* @param output Pointer to the output buffer
|
||||
* @param input Pointer to the input buffer
|
||||
* @param weights Pointer to the weights array
|
||||
* @param n_channel_in Number of input channels
|
||||
* @param n_channel_out Number of output channels, i.e.,the number of conv2d hidden filters
|
||||
* @param width_in Input width
|
||||
* @param height_in Input height
|
||||
* @param width_out Output width
|
||||
* @param height_out Output height
|
||||
* @param filt_width Filters width
|
||||
* @param filt_height Filters height
|
||||
* @param filt_pad_x Filters pad width
|
||||
* @param filt_pad_y Filters pad height
|
||||
* @param stride_x Stride width
|
||||
* @param stride_y Stride height
|
||||
* @param shifts Array of fixed-point binary scales for the weights
|
||||
* @param bias_shifts Array of fixed-point binary scales for the bias
|
||||
* @param signed_input Signed input flag
|
||||
* @param signed_output Signed output flag
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_conv2d_is16ou16ws16_fxp(
|
||||
uint16_t* output,
|
||||
const int16_t* input,
|
||||
const int16_t* weights,
|
||||
const int16_t* bias,
|
||||
const ai_size n_channel_in,
|
||||
const ai_size n_channel_out,
|
||||
const ai_size width_in,
|
||||
const ai_size height_in,
|
||||
const ai_size width_out,
|
||||
const ai_size height_out,
|
||||
const ai_size filt_width,
|
||||
const ai_size filt_height,
|
||||
const ai_size filt_pad_x,
|
||||
const ai_size filt_pad_y,
|
||||
const uint16_t stride_x,
|
||||
const uint16_t stride_y,
|
||||
const uint8_t* shifts,
|
||||
const uint8_t* bias_shifts
|
||||
);
|
||||
|
||||
/*!
|
||||
* @brief Conv2d layer with fixed-point int16_t weights (e.g., Qkeras "auto_po2").
|
||||
* Support unsigned integer 16 input and signed integer 16 output activations.
|
||||
* Both weights and bias (if any) must be quantized with 16 bits.
|
||||
* Manage different fixed-point scales between weights and bias (if any).
|
||||
* @param output Pointer to the output buffer
|
||||
* @param input Pointer to the input buffer
|
||||
* @param weights Pointer to the weights array
|
||||
* @param n_channel_in Number of input channels
|
||||
* @param n_channel_out Number of output channels, i.e.,the number of conv2d hidden filters
|
||||
* @param width_in Input width
|
||||
* @param height_in Input height
|
||||
* @param width_out Output width
|
||||
* @param height_out Output height
|
||||
* @param filt_width Filters width
|
||||
* @param filt_height Filters height
|
||||
* @param filt_pad_x Filters pad width
|
||||
* @param filt_pad_y Filters pad height
|
||||
* @param stride_x Stride width
|
||||
* @param stride_y Stride height
|
||||
* @param shifts Array of fixed-point binary scales for the weights
|
||||
* @param bias_shifts Array of fixed-point binary scales for the bias
|
||||
* @param signed_input Signed input flag
|
||||
* @param signed_output Signed output flag
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_conv2d_iu16os16ws16_fxp(
|
||||
int16_t* output,
|
||||
const uint16_t* input,
|
||||
const int16_t* weights,
|
||||
const int16_t* bias,
|
||||
const ai_size n_channel_in,
|
||||
const ai_size n_channel_out,
|
||||
const ai_size width_in,
|
||||
const ai_size height_in,
|
||||
const ai_size width_out,
|
||||
const ai_size height_out,
|
||||
const ai_size filt_width,
|
||||
const ai_size filt_height,
|
||||
const ai_size filt_pad_x,
|
||||
const ai_size filt_pad_y,
|
||||
const uint16_t stride_x,
|
||||
const uint16_t stride_y,
|
||||
const uint8_t* shifts,
|
||||
const uint8_t* bias_shifts
|
||||
);
|
||||
|
||||
/*!
|
||||
* @brief Conv2d layer with fixed-point int16_t weights (e.g., Qkeras "auto_po2").
|
||||
* Support unsigned integer 16 input and unsigned integer 16 output activations.
|
||||
* Both weights and bias (if any) must be quantized with 16 bits.
|
||||
* Manage different fixed-point scales between weights and bias (if any).
|
||||
* @param output Pointer to the output buffer
|
||||
* @param input Pointer to the input buffer
|
||||
* @param weights Pointer to the weights array
|
||||
* @param n_channel_in Number of input channels
|
||||
* @param n_channel_out Number of output channels, i.e.,the number of conv2d hidden filters
|
||||
* @param width_in Input width
|
||||
* @param height_in Input height
|
||||
* @param width_out Output width
|
||||
* @param height_out Output height
|
||||
* @param filt_width Filters width
|
||||
* @param filt_height Filters height
|
||||
* @param filt_pad_x Filters pad width
|
||||
* @param filt_pad_y Filters pad height
|
||||
* @param stride_x Stride width
|
||||
* @param stride_y Stride height
|
||||
* @param shifts Array of fixed-point binary scales for the weights
|
||||
* @param bias_shifts Array of fixed-point binary scales for the bias
|
||||
* @param signed_input Signed input flag
|
||||
* @param signed_output Signed output flag
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_conv2d_iu16ou16ws16_fxp(
|
||||
uint16_t* output,
|
||||
const uint16_t* input,
|
||||
const int16_t* weights,
|
||||
const int16_t* bias,
|
||||
const ai_size n_channel_in,
|
||||
const ai_size n_channel_out,
|
||||
const ai_size width_in,
|
||||
const ai_size height_in,
|
||||
const ai_size width_out,
|
||||
const ai_size height_out,
|
||||
const ai_size filt_width,
|
||||
const ai_size filt_height,
|
||||
const ai_size filt_pad_x,
|
||||
const ai_size filt_pad_y,
|
||||
const uint16_t stride_x,
|
||||
const uint16_t stride_y,
|
||||
const uint8_t* shifts,
|
||||
const uint8_t* bias_shifts
|
||||
);
|
||||
|
||||
#endif /* LITE_CONV2D_IS16_H */
|
||||
498
lib/stai/libstai/include/lite_conv2d_sssa8_ch.h
Normal file
498
lib/stai/libstai/include/lite_conv2d_sssa8_ch.h
Normal file
@ -0,0 +1,498 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file lite_conv2d_sssa8_ch.h
|
||||
* @author AIS
|
||||
* @brief ST header for signed simmetric signed antisimmetric 8 bits
|
||||
* layers with channel quantization
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2021-2023 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LITE_CONV2D_SSSA8_CH_H
|
||||
#define LITE_CONV2D_SSSA8_CH_H
|
||||
|
||||
void
|
||||
forward_lite_conv2d_rgb_sssa8_ch(const ai_i8 *pData_in,
|
||||
const ai_u16 dim_im_in,
|
||||
const ai_i8 *pWeights,
|
||||
const ai_u16 n_channel_out,
|
||||
const ai_u16 dim_kernel,
|
||||
const ai_u16 padding,
|
||||
const ai_u16 stride,
|
||||
const ai_i32 *pBias,
|
||||
const ai_i8 in_zeropoint,
|
||||
const ai_i8 out_zeropoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
const ai_layer_format_type out_ch_format,
|
||||
ai_i8 *pData_out,
|
||||
const ai_u16 dim_im_out,
|
||||
ai_i32 scratch_size,
|
||||
ai_i16 *pBuffer_a);
|
||||
void
|
||||
forward_lite_conv2d_sssa8_ch(const ai_i8 *pData_in,
|
||||
const ai_u16 dim_im_in_x,
|
||||
const ai_u16 dim_im_in_y,
|
||||
const ai_u16 n_channel_in,
|
||||
const ai_i8 *pWeights,
|
||||
const ai_u16 n_channel_out,
|
||||
const ai_u16 dim_kernel_x,
|
||||
const ai_u16 dim_kernel_y,
|
||||
const ai_u16 stride_x,
|
||||
const ai_u16 stride_y,
|
||||
const ai_u16 padding_x,
|
||||
const ai_u16 padding_y,
|
||||
const ai_i32 *pBias,
|
||||
const ai_i8 in_zeropoint,
|
||||
const ai_i8 out_zeropoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
const ai_layer_format_type out_ch_format,
|
||||
ai_i8 *pData_out,
|
||||
const ai_u16 dim_im_out_x,
|
||||
const ai_u16 dim_im_out_y,
|
||||
const ai_u16 weights_prefetch_enabled,
|
||||
ai_i32 scratch_size,
|
||||
ai_i16 *pBuffer_a);
|
||||
|
||||
|
||||
void forward_lite_conv2d_hsp_1step_sssa8_ch(const ai_i8 *pData_in,
|
||||
const ai_u16 dim_im_in_x,
|
||||
const ai_u16 dim_im_in_y,
|
||||
const ai_u16 n_channel_in,
|
||||
const ai_i8 *pWeights,
|
||||
const ai_u16 n_channel_out,
|
||||
const ai_u16 dim_kernel_x,
|
||||
const ai_u16 dim_kernel_y,
|
||||
const ai_u16 stride_x,
|
||||
const ai_u16 stride_y,
|
||||
const ai_u16 padding_x,
|
||||
const ai_u16 padding_x_r,
|
||||
const ai_u16 padding_y,
|
||||
const ai_u16 padding_y_b,
|
||||
const ai_i32 *pBias,
|
||||
const ai_i8 in_zeropoint,
|
||||
const ai_i8 out_zeropoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
const ai_layer_format_type out_ch_format,
|
||||
ai_i8 *pData_out,
|
||||
const ai_u16 dim_im_out_x,
|
||||
const ai_u16 dim_im_out_y,
|
||||
ai_i32 scratch_size,
|
||||
ai_i16 *pBuffer_a);
|
||||
|
||||
void forward_lite_conv2d_hsp_sssa8_ch(const ai_i8 *pData_in,
|
||||
const ai_u16 dim_im_in_x,
|
||||
const ai_u16 dim_im_in_y,
|
||||
const ai_u16 n_channel_in,
|
||||
const ai_i8 *pWeights,
|
||||
const ai_u16 n_channel_out,
|
||||
const ai_u16 dim_kernel_x,
|
||||
const ai_u16 dim_kernel_y,
|
||||
const ai_u16 stride_x,
|
||||
const ai_u16 stride_y,
|
||||
const ai_u16 padding_x,
|
||||
const ai_u16 padding_x_r,
|
||||
const ai_u16 padding_y,
|
||||
const ai_u16 padding_y_b,
|
||||
const ai_i32 *pBias,
|
||||
const ai_i8 in_zeropoint,
|
||||
const ai_i8 out_zeropoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
const ai_layer_format_type out_ch_format,
|
||||
ai_i8 *pData_out,
|
||||
const ai_u16 dim_im_out_x,
|
||||
const ai_u16 dim_im_out_y,
|
||||
ai_i32 scratch_size,
|
||||
ai_i16 *pBuffer_a);
|
||||
|
||||
void forward_lite_conv2d_hsp_3step_sssa8_ch(const ai_i8 *pData_in,
|
||||
const ai_u16 dim_im_in_x,
|
||||
const ai_u16 dim_im_in_y,
|
||||
const ai_u16 n_channel_in,
|
||||
const ai_i8 *pWeights,
|
||||
const ai_u16 n_channel_out,
|
||||
const ai_u16 dim_kernel_x,
|
||||
const ai_u16 dim_kernel_y,
|
||||
const ai_u16 stride_x,
|
||||
const ai_u16 stride_y,
|
||||
const ai_u16 padding_x,
|
||||
const ai_u16 padding_x_r,
|
||||
const ai_u16 padding_y,
|
||||
const ai_u16 padding_y_b,
|
||||
const ai_i32 *pBias,
|
||||
const ai_i8 in_zeropoint,
|
||||
const ai_i8 out_zeropoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
const ai_layer_format_type out_ch_format,
|
||||
ai_i8 *pData_out,
|
||||
const ai_u16 dim_im_out_x,
|
||||
const ai_u16 dim_im_out_y,
|
||||
ai_i32 scratch_size,
|
||||
ai_i16 *pBuffer_a);
|
||||
|
||||
void
|
||||
forward_lite_conv2d_dilated_sssa8_ch(const ai_i8 *pData_in,
|
||||
const ai_u16 dim_im_in_x,
|
||||
const ai_u16 dim_im_in_y,
|
||||
const ai_u16 n_channel_in,
|
||||
const ai_i8 *pWeights,
|
||||
const ai_u16 n_channel_out,
|
||||
const ai_u16 dim_kernel_x,
|
||||
const ai_u16 dim_kernel_y,
|
||||
const ai_u16 stride_x,
|
||||
const ai_u16 stride_y,
|
||||
const ai_u16 dilation_x,
|
||||
const ai_u16 dilation_y,
|
||||
const ai_i32 *pBias,
|
||||
const ai_i8 in_zeropoint,
|
||||
const ai_i8 out_zeropoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
const ai_layer_format_type out_ch_format,
|
||||
ai_i8 *pData_out,
|
||||
const ai_u16 dim_im_out_x,
|
||||
const ai_u16 dim_im_out_y,
|
||||
ai_u32 height_loop_cnt_0,
|
||||
const ai_u16 weights_prefetch_enabled,
|
||||
ai_i32 scratch_size,
|
||||
ai_i16 *pBuffer_a);
|
||||
|
||||
void forward_lite_conv2d_deep_sssa8_ch(const ai_i8 *pData_in,
|
||||
const ai_u16 dim_im_in_x,
|
||||
const ai_u16 dim_im_in_y,
|
||||
const ai_u16 n_channel_in,
|
||||
const ai_i8 *pWeights,
|
||||
const ai_u16 n_channel_out,
|
||||
const ai_u16 dim_kernel_x,
|
||||
const ai_u16 dim_kernel_y,
|
||||
const ai_u16 stride_x,
|
||||
const ai_u16 stride_y,
|
||||
const ai_i32 *pBias,
|
||||
const ai_i8 in_zeropoint,
|
||||
const ai_i8 out_zeropoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
const ai_layer_format_type out_ch_format,
|
||||
ai_i8 *pData_out,
|
||||
const ai_u16 dim_im_out_x,
|
||||
const ai_u16 dim_im_out_y,
|
||||
ai_u32 height_loop_cnt_0,
|
||||
const ai_u16 weights_prefetch_enabled,
|
||||
ai_i32 scratch_size,
|
||||
ai_i16 *pBuffer_a);
|
||||
|
||||
void forward_lite_conv2d_deep_3x3_sssa8_ch(const ai_i8 *pData_in,
|
||||
const ai_u16 dim_im_in_x,
|
||||
const ai_u16 dim_im_in_y,
|
||||
const ai_u16 n_channel_in,
|
||||
const ai_i8 *pWeights,
|
||||
const ai_u16 n_channel_out,
|
||||
const ai_i32 *pBias,
|
||||
const ai_i8 in_zeropoint,
|
||||
const ai_i8 out_zeropoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
const ai_layer_format_type out_ch_format,
|
||||
ai_i8 *pData_out,
|
||||
const ai_u16 dim_im_out_x,
|
||||
const ai_u16 dim_im_out_y,
|
||||
ai_u32 height_loop_cnt_0,
|
||||
ai_i32 scratch_size,
|
||||
ai_i16 *pBuffer_a);
|
||||
|
||||
|
||||
void
|
||||
forward_lite_pw_sssa8_ch(const ai_i8 *pData_in,
|
||||
const ai_u16 width_in,
|
||||
const ai_u16 height_in,
|
||||
const ai_u16 filt_stride_x,
|
||||
const ai_u16 filt_stride_y,
|
||||
const ai_u16 n_channel_in,
|
||||
const ai_i8 *pWeights,
|
||||
const ai_u16 n_channel_out,
|
||||
const ai_i32 *pBias,
|
||||
const ai_i8 in_zeropoint,
|
||||
const ai_i8 out_zeropoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
const ai_layer_format_type out_ch_format,
|
||||
ai_i8 *pData_out,
|
||||
ai_u16 weights_prefetch_enabled,
|
||||
ai_i32 scratch_size,
|
||||
ai_i16 *pBuffer_a);
|
||||
|
||||
void
|
||||
forward_lite_pw_hsp_sssa8_ch(const ai_i8 *pData_in,
|
||||
const ai_u16 width_in,
|
||||
const ai_u16 height_in,
|
||||
const ai_u16 filt_stride_x,
|
||||
const ai_u16 filt_stride_y,
|
||||
const ai_u16 n_channel_in,
|
||||
const ai_i8 *pWeights,
|
||||
const ai_u16 n_channel_out,
|
||||
const ai_i32 *pBias,
|
||||
const ai_i8 in_zeropoint,
|
||||
const ai_i8 out_zeropoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
const ai_layer_format_type out_ch_format,
|
||||
ai_i8 *pData_out,
|
||||
ai_i32 scratch_size,
|
||||
ai_i16 *pBuffer_a);
|
||||
|
||||
void
|
||||
forward_lite_pw_hsp_1step_sssa8_ch(const ai_i8 *pData_in,
|
||||
const ai_u16 width_in,
|
||||
const ai_u16 height_in,
|
||||
const ai_u16 filt_stride_x,
|
||||
const ai_u16 filt_stride_y,
|
||||
const ai_u16 n_channel_in,
|
||||
const ai_i8 *pWeights,
|
||||
const ai_u16 n_channel_out,
|
||||
const ai_i32 *pBias,
|
||||
const ai_i8 in_zeropoint,
|
||||
const ai_i8 out_zeropoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
const ai_layer_format_type out_ch_format,
|
||||
ai_i8 *pData_out,
|
||||
ai_i32 scratch_size,
|
||||
ai_i16 *pBuffer_a);
|
||||
|
||||
void
|
||||
forward_lite_pw_hsp_3step_sssa8_ch(const ai_i8 *pData_in,
|
||||
const ai_u16 width_in,
|
||||
const ai_u16 height_in,
|
||||
const ai_u16 filt_stride_x,
|
||||
const ai_u16 filt_stride_y,
|
||||
const ai_u16 n_channel_in,
|
||||
const ai_i8 *pWeights,
|
||||
const ai_u16 n_channel_out,
|
||||
const ai_i32 *pBias,
|
||||
const ai_i8 in_zeropoint,
|
||||
const ai_i8 out_zeropoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
const ai_layer_format_type out_ch_format,
|
||||
ai_i8 *pData_out,
|
||||
ai_i32 scratch_size,
|
||||
ai_i16 *pBuffer_a);
|
||||
|
||||
void
|
||||
forward_lite_dw_dm_sssa8_ch(const ai_i8 *Im_in,
|
||||
const ai_u16 dim_im_in_x,
|
||||
const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_i8 *wt,
|
||||
const ai_u16 ch_im_out,
|
||||
const ai_u16 dim_kernel_x,
|
||||
const ai_u16 dim_kernel_y,
|
||||
const ai_u16 padding_x,
|
||||
const ai_u16 padding_y,
|
||||
const ai_u16 stride_x,
|
||||
const ai_u16 stride_y,
|
||||
const ai_i32 *bias,
|
||||
const ai_i8 In_ZeroPoint,
|
||||
const ai_i8 Out_ZeroPoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
ai_i8 *Im_out,
|
||||
const ai_u16 dim_im_out_x,
|
||||
const ai_u16 dim_im_out_y,
|
||||
const ai_i32 nl_pool_fused,
|
||||
const ai_u32 scratch_size,
|
||||
ai_i16 *bufferA);
|
||||
|
||||
void
|
||||
forward_lite_dw_sssa8_ch(const ai_i8 *Im_in,
|
||||
const ai_u16 dim_im_in_x,
|
||||
const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_i8 *wt,
|
||||
const ai_u16 dim_kernel_x,
|
||||
const ai_u16 dim_kernel_y,
|
||||
const ai_u16 padding_x,
|
||||
const ai_u16 padding_y,
|
||||
const ai_u16 stride_x,
|
||||
const ai_u16 stride_y,
|
||||
const ai_i32 *bias,
|
||||
const ai_i8 In_ZeroPoint,
|
||||
const ai_i8 Out_ZeroPoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
ai_i8 *Im_out,
|
||||
const ai_u16 dim_im_out_x,
|
||||
const ai_u16 dim_im_out_y,
|
||||
const ai_i32 nl_pool_fused,
|
||||
const ai_u32 scratch_size,
|
||||
ai_i16 *bufferA);
|
||||
|
||||
void
|
||||
forward_lite_dw_hsp_sssa8_ch(const ai_i8 *Im_in,
|
||||
const ai_u16 dim_im_in_x,
|
||||
const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_i8 *wt,
|
||||
const ai_u16 dim_kernel_x,
|
||||
const ai_u16 dim_kernel_y,
|
||||
const ai_u16 padding_x,
|
||||
const ai_u16 padding_x_r,
|
||||
const ai_u16 padding_y,
|
||||
const ai_u16 padding_y_b,
|
||||
const ai_u16 stride_x,
|
||||
const ai_u16 stride_y,
|
||||
const ai_i32 *bias,
|
||||
const ai_i8 In_ZeroPoint,
|
||||
const ai_i8 Out_ZeroPoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
ai_i8 *Im_out,
|
||||
const ai_u16 dim_im_out_x,
|
||||
const ai_u16 dim_im_out_y,
|
||||
const ai_u32 scratch_size,
|
||||
ai_i16 *bufferA);
|
||||
|
||||
void
|
||||
forward_lite_dw_hsp_1step_sssa8_ch(const ai_i8 *Im_in,
|
||||
const ai_u16 dim_im_in_x,
|
||||
const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_i8 *wt,
|
||||
const ai_u16 dim_kernel_x,
|
||||
const ai_u16 dim_kernel_y,
|
||||
const ai_u16 padding_x,
|
||||
const ai_u16 padding_x_r,
|
||||
const ai_u16 padding_y,
|
||||
const ai_u16 padding_y_b,
|
||||
const ai_u16 stride_x,
|
||||
const ai_u16 stride_y,
|
||||
const ai_i32 *bias,
|
||||
const ai_i8 In_ZeroPoint,
|
||||
const ai_i8 Out_ZeroPoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
ai_i8 *Im_out,
|
||||
const ai_u16 dim_im_out_x,
|
||||
const ai_u16 dim_im_out_y,
|
||||
const ai_u32 scratch_size,
|
||||
ai_i16 *bufferA);
|
||||
|
||||
void
|
||||
forward_lite_dw_hsp_3Step_sssa8_ch(const ai_i8 *Im_in,
|
||||
const ai_u16 dim_im_in_x,
|
||||
const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_i8 *wt,
|
||||
const ai_u16 dim_kernel_x,
|
||||
const ai_u16 dim_kernel_y,
|
||||
const ai_u16 padding_x,
|
||||
const ai_u16 padding_x_r,
|
||||
const ai_u16 padding_y,
|
||||
const ai_u16 padding_y_b,
|
||||
const ai_u16 stride_x,
|
||||
const ai_u16 stride_y,
|
||||
const ai_i32 *bias,
|
||||
const ai_i8 In_ZeroPoint,
|
||||
const ai_i8 Out_ZeroPoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
ai_i8 *Im_out,
|
||||
const ai_u16 dim_im_out_x,
|
||||
const ai_u16 dim_im_out_y,
|
||||
const ai_u32 scratch_size,
|
||||
ai_i16 *bufferA);
|
||||
|
||||
|
||||
void
|
||||
forward_lite_dw_3x3_sssa8_ch(const ai_i8 *Im_in,
|
||||
const ai_u16 dim_im_in_x,
|
||||
const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_i8 *wt,
|
||||
const ai_u16 stride_x,
|
||||
const ai_u16 stride_y,
|
||||
const ai_i32 *bias,
|
||||
const ai_i8 In_ZeroPoint,
|
||||
const ai_i8 Out_ZeroPoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
ai_i8 *Im_out,
|
||||
const ai_u16 dim_im_out_x,
|
||||
const ai_u16 dim_im_out_y,
|
||||
const ai_i32 nl_pool_fused,
|
||||
const ai_u32 scratch_size,
|
||||
ai_i16 *bufferA);
|
||||
|
||||
void
|
||||
forward_lite_dw_3x3_ch1st_sssa8_ch(const ai_i8 *Im_in,
|
||||
const ai_u16 dim_im_in_x,
|
||||
const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_i8 *wt,
|
||||
const ai_u16 stride_x,
|
||||
const ai_u16 stride_y,
|
||||
const ai_i32 *bias,
|
||||
const ai_i8 In_ZeroPoint,
|
||||
const ai_i8 Out_ZeroPoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
ai_i8 *Im_out,
|
||||
const ai_u16 dim_im_out_x,
|
||||
const ai_u16 dim_im_out_y,
|
||||
const ai_i32 nl_pool_fused,
|
||||
const ai_u32 scratch_size,
|
||||
ai_i16 *bufferA);
|
||||
|
||||
|
||||
void
|
||||
forward_lite_dw_1xN_sssa8_ch(const ai_i8 *Im_in,
|
||||
const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_i8 *wt,
|
||||
const ai_u16 dim_kernel_y,
|
||||
const ai_i32 *bias,
|
||||
const ai_i8 In_ZeroPoint,
|
||||
const ai_i8 Out_ZeroPoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
ai_i8 *Im_out,
|
||||
const ai_u16 dim_im_out_y,
|
||||
const ai_i32 nl_pool_fused,
|
||||
const ai_u32 scratch_size,
|
||||
ai_i16 *bufferA);
|
||||
|
||||
|
||||
|
||||
#endif /* LITE_CONV2D_SSSA8_CH_H */
|
||||
230
lib/stai/libstai/include/lite_convert_dqnn.h
Normal file
230
lib/stai/libstai/include/lite_convert_dqnn.h
Normal file
@ -0,0 +1,230 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file lite_convert_dqnn.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform lite convert kernel datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2021 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LITE_CONVERT_DQNN_H
|
||||
#define LITE_CONVERT_DQNN_H
|
||||
|
||||
|
||||
#include "ai_lite_interface.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/******************************************************************************/
|
||||
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_node_convert_is1os8(
|
||||
const ai_pbits *p_in,
|
||||
ai_i8 *p_out,
|
||||
const ai_i32 n_channels,
|
||||
const ai_i32 n_pixels,
|
||||
const ai_i8 *n_values);
|
||||
|
||||
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_node_convert_is1os16(
|
||||
const ai_pbits *p_in,
|
||||
ai_i16 *p_out,
|
||||
const ai_i32 n_channels,
|
||||
const ai_i32 n_pixels,
|
||||
const ai_i16 *n_values);
|
||||
|
||||
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_node_convert_is1of32(
|
||||
const ai_pbits *p_in,
|
||||
ai_float *p_out,
|
||||
const ai_i32 n_channels,
|
||||
const ai_i32 n_pixels,
|
||||
const ai_float *n_values);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Handles data conversion from 8-bits signed input to signed binary
|
||||
* outputs - Lite API version
|
||||
* @ingroup lite_pw_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_node_convert_is8os1(
|
||||
const ai_i8 *p_in,
|
||||
ai_pbits *p_out,
|
||||
const ai_i32 n_channels,
|
||||
const ai_i32 n_pixels,
|
||||
const ai_i8 zp,
|
||||
const ai_i8 pad);
|
||||
|
||||
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_node_convert_is16os1(
|
||||
const ai_i16 *p_in,
|
||||
ai_pbits *p_out,
|
||||
const ai_i32 n_channels,
|
||||
const ai_i32 n_pixels,
|
||||
const ai_i8 zp,
|
||||
const ai_i8 pad);
|
||||
|
||||
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_node_convert_if32os1(
|
||||
const ai_float *p_in,
|
||||
ai_pbits *p_out,
|
||||
const ai_i32 n_channels,
|
||||
const ai_i32 n_pixels,
|
||||
const ai_i8 zp,
|
||||
const ai_i8 pad);
|
||||
|
||||
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_node_convert_integer_if32os8(
|
||||
const ai_float *p_in,
|
||||
ai_i8 *p_out,
|
||||
const ai_u32 size,
|
||||
const ai_float out_scale,
|
||||
const ai_i8 out_zeropoint);
|
||||
|
||||
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_node_convert_integer_if32ou8(
|
||||
const ai_float *p_in,
|
||||
ai_u8 *p_out,
|
||||
const ai_u32 size,
|
||||
const ai_float out_scale,
|
||||
const ai_u8 out_zeropoint);
|
||||
|
||||
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_node_convert_integer_is8of32(
|
||||
const ai_i8 *p_in,
|
||||
ai_float *p_out,
|
||||
const ai_u32 size,
|
||||
const ai_float in_scale,
|
||||
const ai_i8 in_zeropoint);
|
||||
|
||||
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_node_convert_integer_iu8of32(
|
||||
const ai_u8 *p_in,
|
||||
ai_float *p_out,
|
||||
const ai_u32 size,
|
||||
const ai_float in_scale,
|
||||
const ai_u8 in_zeropoint);
|
||||
|
||||
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_node_convert_if32os16(
|
||||
const ai_float *p_in,
|
||||
ai_i16 *p_out,
|
||||
const ai_u32 size,
|
||||
const ai_float out_scale,
|
||||
const ai_i16 out_zeropoint);
|
||||
|
||||
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_node_convert_if32ou16(
|
||||
const ai_float *p_in,
|
||||
ai_u16 *p_out,
|
||||
const ai_u32 size,
|
||||
const ai_float out_scale,
|
||||
const ai_u16 out_zeropoint);
|
||||
|
||||
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_node_convert_is16of32(
|
||||
const ai_i16 *p_in,
|
||||
ai_float *p_out,
|
||||
const ai_u32 size,
|
||||
const ai_float in_scale,
|
||||
const ai_i16 in_zeropoint);
|
||||
|
||||
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_node_convert_iu16of32(
|
||||
const ai_u16 *p_in,
|
||||
ai_float *p_out,
|
||||
const ai_u32 size,
|
||||
const ai_float in_scale,
|
||||
const ai_u16 in_zeropoint);
|
||||
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_node_convert_integer_is8os8(
|
||||
const ai_i8 *p_in,
|
||||
ai_i8 *p_out,
|
||||
const ai_i32 n_elems,
|
||||
const ai_float scale_ratio,
|
||||
const ai_i16 in_zp,
|
||||
const ai_i16 out_zp);
|
||||
|
||||
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_node_convert_integer_iu8ou8(
|
||||
const ai_u8 *p_in,
|
||||
ai_u8 *p_out,
|
||||
const ai_i32 n_elems,
|
||||
const ai_float scale_ratio,
|
||||
const ai_u8 in_zp,
|
||||
const ai_u8 out_zp);
|
||||
|
||||
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_node_convert_integer_iu8os8(
|
||||
const ai_u8 *p_in,
|
||||
ai_i8 *p_out,
|
||||
const ai_i32 n_elems,
|
||||
const ai_float scale_ratio,
|
||||
const ai_u8 in_zp,
|
||||
const ai_i8 out_zp);
|
||||
|
||||
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_node_convert_integer_iu8os8_fast(
|
||||
const ai_u8 *p_in,
|
||||
ai_i8 *p_out,
|
||||
const ai_i32 n_elems,
|
||||
const ai_float scale_ratio,
|
||||
const ai_u8 in_zp,
|
||||
const ai_i8 out_zp);
|
||||
|
||||
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_node_convert_integer_is8ou8(
|
||||
const ai_i8 *p_in,
|
||||
ai_u8 *p_out,
|
||||
const ai_i32 n_elems,
|
||||
const ai_float scale_ratio,
|
||||
const ai_i8 in_zp,
|
||||
const ai_u8 out_zp);
|
||||
|
||||
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_node_convert_integer_is8ou8_fast(
|
||||
const ai_i8 *p_in,
|
||||
ai_u8 *p_out,
|
||||
const ai_i32 n_elems,
|
||||
const ai_float scale_ratio,
|
||||
const ai_i8 in_zp,
|
||||
const ai_u8 out_zp);
|
||||
|
||||
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_node_convert_is16ou16(
|
||||
const ai_i16 *p_in,
|
||||
ai_u16 *p_out,
|
||||
const ai_i32 n_elems,
|
||||
const ai_float scale_ratio,
|
||||
const ai_i16 in_zp,
|
||||
const ai_u16 out_zp);
|
||||
|
||||
#endif /*LITE_CONVERT_DQNN_H*/
|
||||
118
lib/stai/libstai/include/lite_dense_if32.h
Normal file
118
lib/stai/libstai/include/lite_dense_if32.h
Normal file
@ -0,0 +1,118 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file lite_dense_if32.h
|
||||
* @author STMicroelectronics
|
||||
* @brief Definitions of runtime-lite dense core kernels (with float f32 input)
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2022 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LITE_DENSE_IF32_H
|
||||
#define LITE_DENSE_IF32_H
|
||||
|
||||
#include "ai_lite_interface.h"
|
||||
|
||||
/*!
|
||||
* @brief decompress the weights into a scratch buffer
|
||||
|
||||
* @ingroup lite_dense_if32
|
||||
* @param out pointer to the scratch buffer data
|
||||
* @param lut pointer to the compression dictionary
|
||||
* @param lut_bits bits used for compression (only 4 or 8 supported)
|
||||
* @param n_in if last dimension is not even, specify its size for padding
|
||||
* @param n_out: number of elements to be decompressed
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
const uint8_t* lite_decompress_ilutof32(
|
||||
float* out, const uint8_t* data0,
|
||||
const float* lut, const uint16_t lut_bits,
|
||||
const ai_size n_in, const ai_size n_out);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief C struct for a dense layer with signed float input, signed float output, and float weights.
|
||||
* @ingroup lite_dense_if32
|
||||
* @param output The pointer to output buffer.
|
||||
* @param input The pointer to input buffer.
|
||||
* @param weights The pointer to weights.
|
||||
* @param bias The pointer to bias (NULL if not available).
|
||||
* @param n_channel_in The number of channels of the input.
|
||||
* @param n_channel_out The number of channels of the output, i.e., the number of dense hidden neurons.
|
||||
*/
|
||||
typedef struct {
|
||||
ai_float* output;
|
||||
const ai_float* input;
|
||||
const ai_float* weights;
|
||||
const ai_float* bias;
|
||||
const ai_size n_channel_in;
|
||||
const ai_size n_channel_out;
|
||||
const ai_size n_elements;
|
||||
} forward_lite_dense_if32of32wf32_args;
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed float input,
|
||||
* signed float output, and float weights.
|
||||
* @ingroup lite_dense_if32
|
||||
* @param args pointer to @ref forward_lite_dense_if32of32wf32_args structure
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_dense_if32of32wf32(
|
||||
forward_lite_dense_if32of32wf32_args* args);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed float input,
|
||||
* signed float output, and 4bit LUT compressed weights.
|
||||
* @ingroup lite_dense_if32
|
||||
* @param output The pointer to output buffer.
|
||||
* @param weights_lut The pointer to compressed weights LUT table (16 entries).
|
||||
* @param input The pointer to input buffer.
|
||||
* @param weights_indeces The pointer to compressed weights indeces table (packed 4bits buffer).
|
||||
* @param scratch_lut The pointer to cache buffer where to prefetch weights_lut values. (optional)
|
||||
* @param bias The pointer to bias (NULL if not available).
|
||||
* @param n_channel_in The number of channels of the input.
|
||||
* @param n_channel_out The number of channels of the output, i.e., the number of dense hidden neurons.
|
||||
* @param n_elements The number of elements to process
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_dense_if32of32wf32_lut4(
|
||||
ai_float* output, const ai_float* input,
|
||||
const ai_u8* weights_indeces, const ai_float* weights_lut, ai_float* scratch_lut,
|
||||
const ai_float* bias,
|
||||
const ai_size n_channel_in, const ai_size n_channel_out,
|
||||
const ai_size n_elements);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed float input,
|
||||
* signed float output, and 8bit LUT compressed weights.
|
||||
* @ingroup lite_dense_if32
|
||||
* @param output The pointer to output buffer.
|
||||
* @param input The pointer to input buffer.
|
||||
* @param weights_indeces The pointer to compressed weights indeces table (8bits buffer).
|
||||
* @param weights_lut The pointer to compressed weights LUT table (256 entries).
|
||||
* @param scratch_lut The pointer to cache buffer where to prefetch weights_lut values. (optional)
|
||||
* @param bias The pointer to bias (NULL if not available).
|
||||
* @param n_channel_in The number of channels of the input.
|
||||
* @param n_channel_out The number of channels of the output, i.e., the number of dense hidden neurons.
|
||||
* @param n_elements The number of elements to process
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_dense_if32of32wf32_lut8(
|
||||
ai_float* output, const ai_float* input,
|
||||
const ai_u8* weights_indeces, const ai_float* weights_lut, ai_float* scratch_lut,
|
||||
const ai_float* bias,
|
||||
const ai_size n_channel_in, const ai_size n_channel_out,
|
||||
const ai_size n_elements);
|
||||
|
||||
|
||||
#endif /* LITE_DENSE_IF32_H */
|
||||
71
lib/stai/libstai/include/lite_dense_is1.h
Normal file
71
lib/stai/libstai/include/lite_dense_is1.h
Normal file
@ -0,0 +1,71 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file lite_dense_is1.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform lite argmin argmax funcions
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2023 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LITE_DENSE_IS1_H
|
||||
#define LITE_DENSE_IS1_H
|
||||
|
||||
|
||||
#include "ai_lite_interface.h"
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed binary input,
|
||||
* signed float output, and float weights.
|
||||
* @ingroup lite_dense_is1
|
||||
* @param output The pointer to output buffer.
|
||||
* @param input The pointer to input buffer.
|
||||
* @param weights The pointer to weights.
|
||||
* @param bias The pointer to bias (NULL if not available).
|
||||
* @param scratch The pointer to the scratch buffer (unused).
|
||||
* @param n_channel_in The number of channels of the input.
|
||||
* @param n_channel_ouy The number of channels of the output, i.e.,
|
||||
* the number of dense hidden neurons.
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_dense_is1of32wf32(
|
||||
ai_float *output, const ai_pbits *input, const ai_float *weights,
|
||||
const ai_float *bias, ai_float *scratch,
|
||||
const ai_u32 n_channel_in, const ai_u32 n_channel_out
|
||||
);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed binary input,
|
||||
* signed float output, and float weights.
|
||||
* The BN is fused, i.e., the layer requires weights, scale, and offset, where
|
||||
* weights are those of the dense layer, scale is that of the BN, and the offset
|
||||
* corresponds to dense bias * bn scale + bn offset. If the parameters do not
|
||||
* agree with such convention, the behavior is undefined.
|
||||
* @ingroup lite_dense_is1
|
||||
* @param output The pointer to output buffer.
|
||||
* @param input The pointer to input buffer.
|
||||
* @param weights The pointer to weights.
|
||||
* @param scale The pointer to scale.
|
||||
* @param offset The pointer to offset.
|
||||
* @param scratch The pointer to the scratch buffer (unused).
|
||||
* @param n_channel_in The number of channels of the input.
|
||||
* @param n_channel_ouy The number of channels of the output, i.e.,
|
||||
* the number of dense hidden neurons.
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_dense_is1of32wf32_bn(
|
||||
ai_float *output, const ai_pbits *input, const ai_float *weights,
|
||||
const ai_float *scale, const ai_float *offset, ai_float *scratch,
|
||||
const ai_u32 n_channel_in, const ai_u32 n_channel_out
|
||||
);
|
||||
|
||||
#endif /* LITE_DENSE_IS1_H */
|
||||
140
lib/stai/libstai/include/lite_dense_is16.h
Normal file
140
lib/stai/libstai/include/lite_dense_is16.h
Normal file
@ -0,0 +1,140 @@
|
||||
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file lite_dense_is16.h
|
||||
* @author Giacomo Turati
|
||||
* @brief header file of AI platform lite dense kernel (with signed int16 input)
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2024 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LITE_DENSE_IS16_H
|
||||
#define LITE_DENSE_IS16_H
|
||||
|
||||
|
||||
#include "stai.h"
|
||||
#include "ai_lite_interface.h"
|
||||
|
||||
/*!
|
||||
* @brief Dense layer with fixed-point int16_t weights (e.g., Qkeras "auto_po2").
|
||||
* Support signed integer 16 input and signed integer 16 output activations.
|
||||
* Both weights and bias (if any) must be quantized with 16 bits.
|
||||
* Manage different fixed-point scales between weights and bias (if any).
|
||||
* @ingroup lite_dense_ws16
|
||||
* @param output The pointer to output buffer.
|
||||
* @param input The pointer to input buffer.
|
||||
* @param weights The pointer to weights.
|
||||
* @param n_channel_in The number of channels of the input.
|
||||
* @param n_channel_out The number of channels of the output, i.e.,
|
||||
* the number of dense hidden neurons.
|
||||
* @param shifts Array of fixed-point binary scales for the weights
|
||||
* @param bias_shifts Array of fixed-point binary scales for the bias
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_dense_is16os16ws16_fxp(
|
||||
int16_t* output,
|
||||
const int16_t* input,
|
||||
const int16_t* weights,
|
||||
const int16_t* bias,
|
||||
const uint32_t n_channel_in,
|
||||
const uint32_t n_channel_out,
|
||||
const uint8_t* shifts,
|
||||
const uint8_t* bias_shifts
|
||||
);
|
||||
|
||||
/*!
|
||||
* @brief Dense layer with fixed-point int16_t weights (e.g., Qkeras "auto_po2").
|
||||
* Support signed integer 16 input and unsigned integer 16 output activations.
|
||||
* Both weights and bias (if any) must be quantized with 16 bits.
|
||||
* Manage different fixed-point scales between weights and bias (if any).
|
||||
* @ingroup lite_dense_ws16
|
||||
* @param output The pointer to output buffer.
|
||||
* @param input The pointer to input buffer.
|
||||
* @param weights The pointer to weights.
|
||||
* @param n_channel_in The number of channels of the input.
|
||||
* @param n_channel_out The number of channels of the output, i.e.,
|
||||
* the number of dense hidden neurons.
|
||||
* @param shifts Array of fixed-point binary scales for the weights
|
||||
* @param bias_shifts Array of fixed-point binary scales for the bias
|
||||
*/
|
||||
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_dense_is16ou16ws16_fxp(
|
||||
uint16_t* output,
|
||||
const int16_t* input,
|
||||
const int16_t* weights,
|
||||
const int16_t* bias,
|
||||
const uint32_t n_channel_in,
|
||||
const uint32_t n_channel_out,
|
||||
const uint8_t* shifts,
|
||||
const uint8_t* bias_shifts
|
||||
);
|
||||
|
||||
/*!
|
||||
* @brief Dense layer with fixed-point int16_t weights (e.g., Qkeras "auto_po2").
|
||||
* Support unsigned integer 16 input and signed integer 16 output activations.
|
||||
* Both weights and bias (if any) must be quantized with 16 bits.
|
||||
* Manage different fixed-point scales between weights and bias (if any).
|
||||
* @ingroup lite_dense_ws16
|
||||
* @param output The pointer to output buffer.
|
||||
* @param input The pointer to input buffer.
|
||||
* @param weights The pointer to weights.
|
||||
* @param n_channel_in The number of channels of the input.
|
||||
* @param n_channel_out The number of channels of the output, i.e.,
|
||||
* the number of dense hidden neurons.
|
||||
* @param shifts Array of fixed-point binary scales for the weights
|
||||
* @param bias_shifts Array of fixed-point binary scales for the bias
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_dense_iu16os16ws16_fxp(
|
||||
int16_t* output,
|
||||
const uint16_t* input,
|
||||
const int16_t* weights,
|
||||
const int16_t* bias,
|
||||
const uint32_t n_channel_in,
|
||||
const uint32_t n_channel_out,
|
||||
const uint8_t* shifts,
|
||||
const uint8_t* bias_shifts
|
||||
);
|
||||
|
||||
/*!
|
||||
* @brief Dense layer with fixed-point int16_t weights (e.g., Qkeras "auto_po2").
|
||||
* Support unsigned integer 16 input and unsigned integer 16 output activations.
|
||||
* Both weights and bias (if any) must be quantized with 16 bits.
|
||||
* Manage different fixed-point scales between weights and bias (if any).
|
||||
* @ingroup lite_dense_ws16
|
||||
* @param output The pointer to output buffer.
|
||||
* @param input The pointer to input buffer.
|
||||
* @param weights The pointer to weights.
|
||||
* @param n_channel_in The number of channels of the input.
|
||||
* @param n_channel_out The number of channels of the output, i.e.,
|
||||
* the number of dense hidden neurons.
|
||||
* @param shifts Array of fixed-point binary scales for the weights
|
||||
* @param bias_shifts Array of fixed-point binary scales for the bias
|
||||
*/
|
||||
|
||||
/*!
|
||||
* @brief Signed input / signed output API wrapper for _conv2d_ws16_fxp_backend.
|
||||
* @ingroup lite_conv2d_ws16
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_dense_iu16ou16ws16_fxp(
|
||||
uint16_t* output,
|
||||
const uint16_t* input,
|
||||
const int16_t* weights,
|
||||
const int16_t* bias,
|
||||
const uint32_t n_channel_in,
|
||||
const uint32_t n_channel_out,
|
||||
const uint8_t* shifts,
|
||||
const uint8_t* bias_shifts
|
||||
);
|
||||
|
||||
#endif /* LITE_DENSE_IS16_H */
|
||||
170
lib/stai/libstai/include/lite_dense_is1ws1.h
Normal file
170
lib/stai/libstai/include/lite_dense_is1ws1.h
Normal file
@ -0,0 +1,170 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file lite_dense_is1ws1.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform lite dense kernel datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2022 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LITE_DENSE_IS1WS1_H
|
||||
#define LITE_DENSE_IS1WS1_H
|
||||
|
||||
#include "stai.h"
|
||||
#include "ai_lite_interface.h"
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed binary input,
|
||||
* signed binary output, and signed binary weights.
|
||||
* @ingroup lite_dense_is1ws1
|
||||
* @param output The pointer to output buffer.
|
||||
* @param input The pointer to input buffer.
|
||||
* @param weights The pointer to weights.
|
||||
* @param bias The pointer to bias (NULL if not available).
|
||||
* @param scratch The pointer to the scratch buffer.
|
||||
* @param n_channel_in The number of channels of the input.
|
||||
* @param n_channel_ouy The number of channels of the output, i.e.,
|
||||
* the number of dense hidden neurons.
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_dense_is1os1ws1(
|
||||
ai_pbits *output, const ai_pbits *input, const ai_pbits *weights,
|
||||
const ai_pbits *bias, ai_i32 *scratch,
|
||||
const ai_u32 n_channel_in, const ai_u32 n_channel_out
|
||||
);
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed binary input,
|
||||
* signed binary output, and signed binary weights.
|
||||
* The BN is fused, i.e., the layer requires weights, scale, and offset, where
|
||||
* weights are those of the dense layer, scale is that of the BN, and the offset
|
||||
* corresponds to dense bias * bn scale + bn offset. If the parameters do not
|
||||
* agree with such convention, the behavior is undefined.
|
||||
* @ingroup lite_dense_is1ws1
|
||||
* @param output The pointer to output buffer.
|
||||
* @param input The pointer to input buffer.
|
||||
* @param weights The pointer to weights.
|
||||
* @param scale The pointer to scale.
|
||||
* @param offset The pointer to offset.
|
||||
* @param scratch The pointer to the scratch buffer.
|
||||
* @param n_channel_in The number of channels of the input.
|
||||
* @param n_channel_ouy The number of channels of the output, i.e.,
|
||||
* the number of dense hidden neurons.
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_dense_is1os1ws1_bn(
|
||||
ai_pbits *output, const ai_pbits *input, const ai_pbits *weights,
|
||||
const ai_float *scale, const ai_float *offset, ai_i32 *scratch,
|
||||
const ai_u32 n_channel_in, const ai_u32 n_channel_out
|
||||
);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed binary input,
|
||||
* signed binary output, and signed 16bit weights.
|
||||
* @ingroup lite_dense_is1ws1
|
||||
* @param output The pointer to output buffer.
|
||||
* @param input The pointer to input buffer.
|
||||
* @param weights The pointer to weights.
|
||||
* @param bias The pointer to bias (NULL if not available).
|
||||
* @param scratch The pointer to the scratch buffer (signed 32bit).
|
||||
* @param n_channel_in The number of channels of the input.
|
||||
* @param n_channel_ouy The number of channels of the output, i.e.,
|
||||
* the number of dense hidden neurons.
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_dense_is1os16ws1(
|
||||
ai_i16 *output, const ai_pbits *input, const ai_pbits *weights,
|
||||
const ai_pbits *bias, ai_i32 *scratch,
|
||||
const ai_u32 n_channel_in, const ai_u32 n_channel_out);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed binary input,
|
||||
* signed binary output, and signed 16bit weights.
|
||||
* The BN is fused, i.e., the layer requires weights, scale, and offset, where
|
||||
* weights are those of the dense layer, scale is that of the BN, and the offset
|
||||
* corresponds to dense bias * bn scale + bn offset. If the parameters do not
|
||||
* agree with such convention, the behavior is undefined.
|
||||
* @ingroup lite_dense_is1ws1
|
||||
* @param output The pointer to output buffer.
|
||||
* @param input The pointer to input buffer.
|
||||
* @param weights The pointer to weights.
|
||||
* @param bias The pointer to bias (NULL if not available).
|
||||
* @param scratch The pointer to the scratch buffer (signed 32bit).
|
||||
* @param n_channel_in The number of channels of the input.
|
||||
* @param n_channel_ouy The number of channels of the output, i.e.,
|
||||
* the number of dense hidden neurons.
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_dense_is1os16ws1_bn(
|
||||
ai_i16 *output, const ai_pbits *input, const ai_pbits *weights,
|
||||
const ai_float *scale, const ai_float *offset, ai_i32 *scratch,
|
||||
const ai_u32 n_channel_in, const ai_u32 n_channel_out);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed binary input,
|
||||
* signed float output, and signed binary weights.
|
||||
* @ingroup lite_dense_is1ws1
|
||||
* @param output The pointer to output buffer.
|
||||
* @param input The pointer to input buffer.
|
||||
* @param weights The pointer to weights.
|
||||
* @param bias The pointer to bias (NULL if not available).
|
||||
* @param scratch The pointer to the scratch buffer (unused).
|
||||
* @param n_channel_in The number of channels of the input.
|
||||
* @param n_channel_ouy The number of channels of the output, i.e.,
|
||||
* the number of dense hidden neurons.
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_dense_is1of32ws1(
|
||||
ai_float *output, const ai_pbits *input, const ai_pbits *weights,
|
||||
const ai_pbits *bias, ai_i32 *scratch,
|
||||
const ai_u32 n_channel_in, const ai_u32 n_channel_out
|
||||
);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief C struct for a dense layer with signed binary input,
|
||||
* signed float output, and signed binary weights.
|
||||
* The BN is fused, i.e., the layer requires weights, scale, and offset, where
|
||||
* weights are those of the dense layer, scale is that of the BN, and the offset
|
||||
* corresponds to dense bias * bn scale + bn offset. If the parameters do not
|
||||
* agree with such convention, the behavior is undefined.
|
||||
* @ingroup lite_dense_is1ws1
|
||||
* @param output The pointer to output buffer.
|
||||
* @param input The pointer to input buffer.
|
||||
* @param weights The pointer to weights.
|
||||
* @param scale The pointer to scale.
|
||||
* @param offset The pointer to offset.
|
||||
* @param scratch The pointer to the scratch buffer (unused).
|
||||
* @param n_channel_in The number of channels of the input.
|
||||
* @param n_channel_out The number of channels of the output, i.e.,
|
||||
* the number of dense hidden neurons.
|
||||
*/
|
||||
typedef struct {
|
||||
float* output;
|
||||
const stai_pbits* input;
|
||||
const stai_pbits* weights;
|
||||
const float* scale;
|
||||
const float* offset;
|
||||
int32_t* scratch;
|
||||
const uint32_t n_channel_in;
|
||||
const uint32_t n_channel_out;
|
||||
} forward_lite_dense_is1of32ws1_bn_args;
|
||||
|
||||
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_dense_is1of32ws1_bn(
|
||||
forward_lite_dense_is1of32ws1_bn_args* args);
|
||||
|
||||
|
||||
#endif /*LITE_DENSE_IS1WS1_H*/
|
||||
53
lib/stai/libstai/include/lite_dense_is8os1ws1.h
Normal file
53
lib/stai/libstai/include/lite_dense_is8os1ws1.h
Normal file
@ -0,0 +1,53 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file lite_dense_is8os1ws1.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform lite dense kernel datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2023 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LITE_DENSE_IS8OS1WS1_H
|
||||
#define LITE_DENSE_IS8OS1WS1_H
|
||||
|
||||
|
||||
#include "ai_lite_interface.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/******************************************************************************/
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed 8 bits input,
|
||||
* binary weights and binary output.
|
||||
* @ingroup lite_dense_is8os1ws1
|
||||
* @param out_ptr The pointer to output buffer.
|
||||
*@param data_in_init_ptr The pointer to input buffer.
|
||||
* @param weights_ptr The pointer to weights.
|
||||
* @param scratch_ptr The pointer to scratch buffer.
|
||||
* @param scratch_size The value of scratch tensor size.
|
||||
* @param n_channel_out The number of channels of the output, i.e.,
|
||||
* the number of dense hidden neurons.
|
||||
* @param n_channel_in The number of channels of the input.
|
||||
* @param scale_ptr The pointer to scale buffer of BN.
|
||||
* @param offset_ptr The pointer to offset buffer of BN.
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_dense_is8os1ws1_bn_fxp(ai_pbits *out_ptr,
|
||||
const ai_i8 *data_in_init_ptr,
|
||||
const ai_pbits *weights_ptr,
|
||||
ai_i32 *scratch_ptr,
|
||||
const ai_u32 scratch_size,
|
||||
const ai_u32 n_channel_out,
|
||||
const ai_u32 n_channel_in,
|
||||
const ai_i32 *threshold_ptr);
|
||||
|
||||
#endif /*LITE_DENSE_IS8OS1WS1_H*/
|
||||
97
lib/stai/libstai/include/lite_dense_is8os8ws8.h
Normal file
97
lib/stai/libstai/include/lite_dense_is8os8ws8.h
Normal file
@ -0,0 +1,97 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file lite_dense_is8os8ws8.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform lite dense kernel datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2022 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LITE_DENSE_IS8OS8WS8_H
|
||||
#define LITE_DENSE_IS8OS8WS8_H
|
||||
|
||||
#include "ai_lite_interface.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/******************************************************************************/
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed input,
|
||||
* signed output and signed weights all at 8 bits.
|
||||
* @ingroup lite_dense_is8os8ws8
|
||||
* @param input The pointer to input buffer.
|
||||
* @param output The pointer to output buffer.
|
||||
* @param weights The pointer to weights.
|
||||
* @param bias The pointer to bias (NULL if not available).
|
||||
* @param in_zeropoint The value of the zero point of the input.
|
||||
* @param out_zeropoint TThe value of the zero point of the output.
|
||||
* @param n_channel_in The number of channels of the input.
|
||||
* @param n_channel_out The number of channels of the output, i.e.,
|
||||
* the number of dense hidden neurons.
|
||||
* @param n_pixels Total number of pixels.
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_dense_is8os8ws8(ai_i8 * pDataOut,
|
||||
const ai_i8 *pDataIn,
|
||||
const ai_i8 *pWeights,
|
||||
const ai_i32 *pBias,
|
||||
const ai_i8 in_zeropoint,
|
||||
const ai_i8 out_zeropoint,
|
||||
const ai_u16 n_channel_in,
|
||||
const ai_u16 n_channel_out,
|
||||
const ai_size n_pixels,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float Wt_scale,
|
||||
ai_i16 *pBuffer_a);
|
||||
|
||||
void forward_lite_dense_hsp_is8os8ws8(ai_i8 * pDataOut,
|
||||
const ai_i8 *pDataIn,
|
||||
const ai_i8 *pWeights,
|
||||
const ai_i32 *pBias,
|
||||
const ai_i8 in_zeropoint,
|
||||
const ai_i8 out_zeropoint,
|
||||
const ai_u16 n_channel_in,
|
||||
const ai_u16 n_channel_out,
|
||||
const ai_size n_pixels,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float Wt_scale);
|
||||
|
||||
void forward_lite_dense_hsp_3step_is8os8ws8(ai_i8 * pDataOut,
|
||||
const ai_i8 *pDataIn,
|
||||
const ai_i8 *pWeights,
|
||||
const ai_i32 *pBias,
|
||||
const ai_i8 in_zeropoint,
|
||||
const ai_i8 out_zeropoint,
|
||||
const ai_u16 n_channel_in,
|
||||
const ai_u16 n_channel_out,
|
||||
const ai_size n_pixels,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float Wt_scale);
|
||||
|
||||
void forward_lite_dense_is8os8ws8_ch(ai_i8 * pDataOut,
|
||||
const ai_i8 *pDataIn,
|
||||
const ai_i8 *pWeights,
|
||||
const ai_i32 *pBias,
|
||||
const ai_i8 in_zeropoint,
|
||||
const ai_i8 out_zeropoint,
|
||||
const ai_u16 n_channel_in,
|
||||
const ai_u16 n_channel_out,
|
||||
const ai_size n_pixels,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
ai_i16 *pBuffer_a);
|
||||
|
||||
#endif /*LITE_DENSE_IS8OS8WS8_H*/
|
||||
171
lib/stai/libstai/include/lite_dense_ws1.h
Normal file
171
lib/stai/libstai/include/lite_dense_ws1.h
Normal file
@ -0,0 +1,171 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file lite_dense_ws1.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform lite dense kernel datatypes (1bit weights)
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2022 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LITE_DENSE_WS1_H
|
||||
#define LITE_DENSE_WS1_H
|
||||
|
||||
|
||||
#include "stai.h"
|
||||
#include "ai_lite_interface.h"
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed 16bit input,
|
||||
* signed 16bit output, binary weights and binary bias.
|
||||
* @ingroup lite_dense_ws1
|
||||
* @param output The pointer to output buffer.
|
||||
* @param input The pointer to input buffer.
|
||||
* @param weights The pointer to weights.
|
||||
* @param bias The pointer to bias.
|
||||
* @param scratch The pointer to the scratch buffer (unused).
|
||||
* @param n_channel_in The number of channels of the input.
|
||||
* @param n_channel_out The number of channels of the output, i.e.,
|
||||
* the number of dense hidden neurons.
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_dense_is16os16ws1(
|
||||
ai_i16* output, const ai_i16* input,
|
||||
const ai_pbits* weights,
|
||||
const ai_pbits* bias, ai_i32* scratch,
|
||||
const ai_u32 n_channel_in, const ai_u32 n_channel_out);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed 16bit input,
|
||||
* signed 16bit output, binary weights and binary bias.
|
||||
* The BN is fused, i.e., the layer requires weights, scale, and offset, where
|
||||
* weights are those of the dense layer, scale is that of the BN, and the offset
|
||||
* corresponds to dense bias * bn scale + bn offset. If the parameters do not
|
||||
* agree with such convention, the behavior is undefined.
|
||||
* @ingroup lite_dense_ws1
|
||||
* @param output The pointer to output buffer.
|
||||
* @param input The pointer to input buffer.
|
||||
* @param weights The pointer to weights.
|
||||
* @param scale The pointer to scale.
|
||||
* @param offset The pointer to offset.
|
||||
* @param scratch The pointer to the scratch buffer (unused).
|
||||
* @param n_channel_in The number of channels of the input.
|
||||
* @param n_channel_out The number of channels of the output, i.e.,
|
||||
* the number of dense hidden neurons.
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_dense_is16os16ws1_bn(
|
||||
ai_i16* output, const ai_i16* input,
|
||||
const ai_pbits* weights,
|
||||
const ai_float *scale, const ai_float *offset, ai_i32* scratch,
|
||||
const ai_u32 n_channel_in, const ai_u32 n_channel_out);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed f32 input,
|
||||
* f32 output, binary weights and binary bias.
|
||||
* @ingroup lite_dense_ws1
|
||||
* @param output The pointer to output buffer.
|
||||
* @param input The pointer to input buffer.
|
||||
* @param weights The pointer to weights.
|
||||
* @param bias The pointer to bias.
|
||||
* @param scratch The pointer to the scratch buffer (unused).
|
||||
* @param n_channel_in The number of channels of the input.
|
||||
* @param n_channel_out The number of channels of the output, i.e.,
|
||||
* the number of dense hidden neurons.
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_dense_if32os1ws1(
|
||||
ai_pbits *output, const ai_float *input, const ai_pbits *weights,
|
||||
const ai_float *bias, ai_float *scratch,
|
||||
const ai_u32 n_channel_in, const ai_u32 n_channel_out);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief C struct for a dense layer with signed f32 input,
|
||||
* f32 output, binary weights.
|
||||
* The BN is fused, i.e., the layer requires weights, scale, and offset, where
|
||||
* weights are those of the dense layer, scale is that of the BN, and the offset
|
||||
* corresponds to dense bias * bn scale + bn offset. If the parameters do not
|
||||
* agree with such convention, the behavior is undefined.
|
||||
* @ingroup lite_dense_ws1
|
||||
* @param output The pointer to output buffer.
|
||||
* @param input The pointer to input buffer.
|
||||
* @param weights The pointer to weights.
|
||||
* @param scale The pointer to scale.
|
||||
* @param offset The pointer to offset.
|
||||
* @param scratch The pointer to the scratch buffer (unused).
|
||||
* @param n_channel_in The number of channels of the input.
|
||||
* @param n_channel_out The number of channels of the output, i.e.,
|
||||
* the number of dense hidden neurons.
|
||||
*/
|
||||
typedef struct {
|
||||
stai_pbits* output;
|
||||
const float* input;
|
||||
const stai_pbits* weights;
|
||||
const float* scale;
|
||||
const float* offset;
|
||||
float* scratch;
|
||||
const uint32_t n_channel_in;
|
||||
const uint32_t n_channel_out;
|
||||
} forward_lite_dense_if32os1ws1_bn_args;
|
||||
|
||||
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_dense_if32os1ws1_bn(forward_lite_dense_if32os1ws1_bn_args* args);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed f32 input,
|
||||
* f32 output, and binary weights.
|
||||
* @ingroup lite_dense_ws1
|
||||
* @param output The pointer to output buffer.
|
||||
* @param input The pointer to input buffer.
|
||||
* @param weights The pointer to weights.
|
||||
* @param bias The pointer to binary bias.
|
||||
* @param scratch The pointer to the scratch buffer (unused).
|
||||
* @param n_channel_in The number of channels of the input.
|
||||
* @param n_channel_out The number of channels of the output, i.e.,
|
||||
* the number of dense hidden neurons.
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_dense_if32of32ws1(
|
||||
ai_float* output, const ai_float* input,
|
||||
const ai_pbits* weights,
|
||||
const ai_pbits* bias, ai_float* scratch,
|
||||
const ai_u32 n_channel_in, const ai_u32 n_channel_out);
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a dense layer with signed f32 input,
|
||||
* f32 output, and binary weights.
|
||||
* The BN is fused, i.e., the layer requires weights, scale, and offset, where
|
||||
* weights are those of the dense layer, scale is that of the BN, and the offset
|
||||
* corresponds to dense bias * bn scale + bn offset. If the parameters do not
|
||||
* agree with such convention, the behavior is undefined.
|
||||
* @ingroup lite_dense_ws1
|
||||
* @param output The pointer to output buffer.
|
||||
* @param input The pointer to input buffer.
|
||||
* @param weights The pointer to weights.
|
||||
* @param scale The pointer to scale.
|
||||
* @param offset The pointer to offset.
|
||||
* @param scratch The pointer to the scratch buffer (unused).
|
||||
* @param n_channel_in The number of channels of the input.
|
||||
* @param n_channel_out The number of channels of the output, i.e.,
|
||||
* the number of dense hidden neurons.
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_dense_if32of32ws1_bn(
|
||||
ai_float *output, const ai_float *input, const ai_pbits *weights,
|
||||
const ai_float *scale, const ai_float *offset, ai_float *scratch,
|
||||
const ai_u32 n_channel_in, const ai_u32 n_channel_out);
|
||||
|
||||
#endif /* LITE_DENSE_IS1WS1_H */
|
||||
253
lib/stai/libstai/include/lite_dw.h
Normal file
253
lib/stai/libstai/include/lite_dw.h
Normal file
@ -0,0 +1,253 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file lite_dw.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform lite depthwise kernel datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2021 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LITE_DW_H
|
||||
#define LITE_DW_H
|
||||
|
||||
|
||||
#include "ai_lite_interface.h"
|
||||
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/******************************************************************************/
|
||||
|
||||
/*!
|
||||
* @brief Handles dw convolutions generic case (supports depth multiplier >= 1)
|
||||
* @ingroup lite_dw
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void
|
||||
forward_lite_dw_dm_sssa8_ch(const ai_i8 *Im_in,
|
||||
const ai_u16 dim_im_in_x,
|
||||
const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_i8 *wt,
|
||||
const ai_u16 ch_im_out,
|
||||
const ai_u16 dim_kernel_x,
|
||||
const ai_u16 dim_kernel_y,
|
||||
const ai_u16 padding_x,
|
||||
const ai_u16 padding_y,
|
||||
const ai_u16 stride_x,
|
||||
const ai_u16 stride_y,
|
||||
const ai_i32 *bias,
|
||||
const ai_i8 In_ZeroPoint,
|
||||
const ai_i8 Out_ZeroPoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
ai_i8 *Im_out,
|
||||
const ai_u16 dim_im_out_x,
|
||||
const ai_u16 dim_im_out_y,
|
||||
const ai_i32 nl_pool_fused,
|
||||
const ai_u32 scratch_size,
|
||||
ai_i16 *bufferA);
|
||||
|
||||
/*!
|
||||
* @brief Handles dw convolutions with depth multiplier = 1 only
|
||||
* @ingroup lite_dw
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void
|
||||
forward_lite_dw_sssa8_ch(const ai_i8 *Im_in,
|
||||
const ai_u16 dim_im_in_x,
|
||||
const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_i8 *wt,
|
||||
const ai_u16 dim_kernel_x,
|
||||
const ai_u16 dim_kernel_y,
|
||||
const ai_u16 padding_x,
|
||||
const ai_u16 padding_y,
|
||||
const ai_u16 stride_x,
|
||||
const ai_u16 stride_y,
|
||||
const ai_i32 *bias,
|
||||
const ai_i8 In_ZeroPoint,
|
||||
const ai_i8 Out_ZeroPoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
ai_i8 *Im_out,
|
||||
const ai_u16 dim_im_out_x,
|
||||
const ai_u16 dim_im_out_y,
|
||||
const ai_i32 nl_pool_fused,
|
||||
const ai_u32 scratch_size,
|
||||
ai_i16 *bufferA);
|
||||
|
||||
/* Variant optimized for HSP */
|
||||
void
|
||||
forward_lite_dw_hsp_1step_sssa8_ch(const ai_i8 *Im_in,
|
||||
const ai_u16 dim_im_in_x,
|
||||
const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_i8 *wt,
|
||||
const ai_u16 dim_kernel_x,
|
||||
const ai_u16 dim_kernel_y,
|
||||
const ai_u16 padding_x,
|
||||
const ai_u16 padding_x_r,
|
||||
const ai_u16 padding_y,
|
||||
const ai_u16 padding_y_b,
|
||||
const ai_u16 stride_x,
|
||||
const ai_u16 stride_y,
|
||||
const ai_i32 *bias,
|
||||
const ai_i8 In_ZeroPoint,
|
||||
const ai_i8 Out_ZeroPoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
ai_i8 *Im_out,
|
||||
const ai_u16 dim_im_out_x,
|
||||
const ai_u16 dim_im_out_y,
|
||||
const ai_u32 scratch_size,
|
||||
ai_i16 *bufferA);
|
||||
|
||||
void
|
||||
forward_lite_dw_hsp_sssa8_ch(const ai_i8 *Im_in,
|
||||
const ai_u16 dim_im_in_x,
|
||||
const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_i8 *wt,
|
||||
const ai_u16 dim_kernel_x,
|
||||
const ai_u16 dim_kernel_y,
|
||||
const ai_u16 padding_x,
|
||||
const ai_u16 padding_x_r,
|
||||
const ai_u16 padding_y,
|
||||
const ai_u16 padding_y_b,
|
||||
const ai_u16 stride_x,
|
||||
const ai_u16 stride_y,
|
||||
const ai_i32 *bias,
|
||||
const ai_i8 In_ZeroPoint,
|
||||
const ai_i8 Out_ZeroPoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
ai_i8 *Im_out,
|
||||
const ai_u16 dim_im_out_x,
|
||||
const ai_u16 dim_im_out_y,
|
||||
const ai_u32 scratch_size,
|
||||
ai_i16 *bufferA);
|
||||
|
||||
|
||||
/* Variant optimized for HSP: Large tensors */
|
||||
void
|
||||
forward_lite_dw_hsp_3step_sssa8_ch(const ai_i8 *Im_in,
|
||||
const ai_u16 dim_im_in_x,
|
||||
const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_i8 *wt,
|
||||
const ai_u16 dim_kernel_x,
|
||||
const ai_u16 dim_kernel_y,
|
||||
const ai_u16 padding_x,
|
||||
const ai_u16 padding_x_r,
|
||||
const ai_u16 padding_y,
|
||||
const ai_u16 padding_y_b,
|
||||
const ai_u16 stride_x,
|
||||
const ai_u16 stride_y,
|
||||
const ai_i32 *bias,
|
||||
const ai_i8 In_ZeroPoint,
|
||||
const ai_i8 Out_ZeroPoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
ai_i8 *Im_out,
|
||||
const ai_u16 dim_im_out_x,
|
||||
const ai_u16 dim_im_out_y,
|
||||
const ai_u32 scratch_size,
|
||||
ai_i16 *bufferA);
|
||||
|
||||
/*!
|
||||
* @brief Handles dw convolutions with depth multiplier = 1, valid padding
|
||||
* and 3*3 kernel size
|
||||
* @ingroup lite_dw
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void
|
||||
forward_lite_dw_3x3_sssa8_ch(const ai_i8 *Im_in,
|
||||
const ai_u16 dim_im_in_x,
|
||||
const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_i8 *wt,
|
||||
const ai_u16 stride_x,
|
||||
const ai_u16 stride_y,
|
||||
const ai_i32 *bias,
|
||||
const ai_i8 In_ZeroPoint,
|
||||
const ai_i8 Out_ZeroPoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
ai_i8 *Im_out,
|
||||
const ai_u16 dim_im_out_x,
|
||||
const ai_u16 dim_im_out_y,
|
||||
const ai_i32 nl_pool_fused,
|
||||
const ai_u32 scratch_size,
|
||||
ai_i16 *bufferA);
|
||||
|
||||
/*!
|
||||
* @brief Handles dw convolutions with depth multiplier = 1, valid padding,
|
||||
* 3*3 kernel size, stride_x = 1 and weights/input are channel first
|
||||
* @ingroup lite_dw
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void
|
||||
forward_lite_dw_3x3_ch1st_sssa8_ch(const ai_i8 *Im_in,
|
||||
const ai_u16 dim_im_in_x,
|
||||
const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_i8 *wt,
|
||||
const ai_u16 stride_x,
|
||||
const ai_u16 stride_y,
|
||||
const ai_i32 *bias,
|
||||
const ai_i8 In_ZeroPoint,
|
||||
const ai_i8 Out_ZeroPoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
ai_i8 *Im_out,
|
||||
const ai_u16 dim_im_out_x,
|
||||
const ai_u16 dim_im_out_y,
|
||||
const ai_i32 nl_pool_fused,
|
||||
const ai_u32 scratch_size,
|
||||
ai_i16 *bufferA);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Handles dw convolutions with depth multiplier = 1, valid padding,
|
||||
* 1*N kernel size, stride_x = 1
|
||||
* @ingroup lite_dw
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void
|
||||
forward_lite_dw_1xN_sssa8_ch(const ai_i8 *Im_in,
|
||||
const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_i8 *wt,
|
||||
const ai_u16 dim_kernel_y,
|
||||
const ai_i32 *bias,
|
||||
const ai_i8 In_ZeroPoint,
|
||||
const ai_i8 Out_ZeroPoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
ai_i8 *Im_out,
|
||||
const ai_u16 dim_im_out_y,
|
||||
const ai_i32 nl_pool_fused,
|
||||
const ai_u32 scratch_size,
|
||||
ai_i16 *bufferA);
|
||||
|
||||
|
||||
|
||||
#endif /*LITE_DW_H*/
|
||||
131
lib/stai/libstai/include/lite_dw_dqnn.h
Normal file
131
lib/stai/libstai/include/lite_dw_dqnn.h
Normal file
@ -0,0 +1,131 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file lite_dw_dqnn.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform lite integer depthwise kernel datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2021 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LITE_DW_DQNN_H
|
||||
#define LITE_DW_DQNN_H
|
||||
|
||||
|
||||
#include "ai_lite_interface.h"
|
||||
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/******************************************************************************/
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D DW convolution with binary input, binary output and
|
||||
* binary weights - with 0 padding (QKeras like) - Lite I/F
|
||||
* @ingroup lite_conv2d_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_dw_is1os1ws1_bn_pad0(const ai_u32 *pDataIn_init,
|
||||
ai_u32 * pDataOut_init,
|
||||
const ai_u32 *pWeights_init,
|
||||
ai_float *pScratch_32,
|
||||
const ai_u32 n_channel_in,
|
||||
const ai_u32 n_channel_out,
|
||||
const ai_i32 width_in,
|
||||
const ai_i32 height_in,
|
||||
const ai_i32 width_out,
|
||||
const ai_i32 height_out,
|
||||
const ai_i32 filt_width,
|
||||
const ai_i32 filt_height,
|
||||
const ai_i32 filt_pad_x,
|
||||
const ai_i32 filt_pad_y,
|
||||
const ai_i32 filt_stride_x,
|
||||
const ai_i32 filt_stride_y,
|
||||
const ai_i32 *pThreshold);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D DW convolution with binary input, binary output and
|
||||
* binary weights - with 0 padding (QKeras like) - Lite I/F
|
||||
* - Optimized thanks to Optim3 assumptions
|
||||
* @ingroup lite_conv2d_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_dw_is1os1ws1_bn_pad0_optim3(const ai_u32 *pDataIn_init,
|
||||
ai_u32 * pDataOut_init,
|
||||
const ai_u32 *pWeights_init,
|
||||
ai_float *pScratch_32,
|
||||
const ai_u32 n_channel_in,
|
||||
const ai_u32 n_channel_out,
|
||||
const ai_i32 width_in,
|
||||
const ai_i32 height_in,
|
||||
const ai_i32 width_out,
|
||||
const ai_i32 height_out,
|
||||
const ai_i32 filt_width,
|
||||
const ai_i32 filt_height,
|
||||
const ai_i32 filt_pad_x,
|
||||
const ai_i32 filt_pad_y,
|
||||
const ai_i32 filt_stride_x,
|
||||
const ai_i32 filt_stride_y,
|
||||
const ai_i32 *pThreshold);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with binary input, binary output and
|
||||
* binary weights - with +1/-1 padding (Larq like) - Lite I/F
|
||||
* @ingroup lite_conv2d_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_dw_is1os1ws1_bn_pad1(const ai_u32 *pDataIn_init,
|
||||
ai_u32 * pDataOut_init,
|
||||
const ai_u32 *pWeights_init,
|
||||
ai_float *pScratch_32,
|
||||
const ai_u32 n_channel_in,
|
||||
const ai_u32 n_channel_out,
|
||||
const ai_i32 width_in,
|
||||
const ai_i32 height_in,
|
||||
const ai_i32 width_out,
|
||||
const ai_i32 height_out,
|
||||
const ai_i32 filt_width,
|
||||
const ai_i32 filt_height,
|
||||
const ai_i32 filt_pad_x,
|
||||
const ai_i32 filt_pad_y,
|
||||
const ai_i32 filt_stride_x,
|
||||
const ai_i32 filt_stride_y,
|
||||
const ai_i32 *pThreshold,
|
||||
const ai_i32 pad_value);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with binary input, binary output and
|
||||
* binary weights - with +1/-1 padding (Larq like) - Lite I/F
|
||||
* - Optimized thanks to Optim3 assumptions
|
||||
* @ingroup lite_conv2d_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_dw_is1os1ws1_bn_pad1_optim3(const ai_u32 *pDataIn_init,
|
||||
ai_u32 * pDataOut_init,
|
||||
const ai_u32 *pWeights_init,
|
||||
ai_float *pScratch_32,
|
||||
const ai_u32 n_channel_in,
|
||||
const ai_u32 n_channel_out,
|
||||
const ai_i32 width_in,
|
||||
const ai_i32 height_in,
|
||||
const ai_i32 width_out,
|
||||
const ai_i32 height_out,
|
||||
const ai_i32 filt_width,
|
||||
const ai_i32 filt_height,
|
||||
const ai_i32 filt_pad_x,
|
||||
const ai_i32 filt_pad_y,
|
||||
const ai_i32 filt_stride_x,
|
||||
const ai_i32 filt_stride_y,
|
||||
const ai_i32 *pThreshold,
|
||||
const ai_i32 pad_value);
|
||||
|
||||
|
||||
#endif /*LITE_DW_DQNN_H*/
|
||||
243
lib/stai/libstai/include/lite_generic_float.h
Normal file
243
lib/stai/libstai/include/lite_generic_float.h
Normal file
@ -0,0 +1,243 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file lite_conv2d_dqnn.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform lite conv kernel datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2021 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LITE_GENERIC_FLOAT_H
|
||||
#define LITE_GENERIC_FLOAT_H
|
||||
|
||||
#include "ai_lite_interface.h"
|
||||
#include "layers_generic.h"
|
||||
|
||||
/*****************************************************************************/
|
||||
/* Generic Forward Functions Section */
|
||||
/*****************************************************************************/
|
||||
|
||||
/** Reduce Generic Kernels *************************************************/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_func_reduce_l1_if32of32(
|
||||
ai_float* out_ptr, const ai_float* in_ptr,
|
||||
const ai_size out_size, const ai_size in_step,
|
||||
const ai_size axis_size, const ai_size axis_step);
|
||||
|
||||
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_func_reduce_l2_if32of32(
|
||||
ai_float* out_ptr, const ai_float* in_ptr,
|
||||
const ai_size out_size, const ai_size in_step,
|
||||
const ai_size axis_size, const ai_size axis_step);
|
||||
|
||||
/** GatherND Kernels **************************************************/
|
||||
|
||||
/*!
|
||||
* @brief C struct for a gather_nd layer.
|
||||
* @ingroup lite_generic
|
||||
* @param src_in list of pointers for the outputs buffers.
|
||||
* @param dst_out list of pointers for the outputs buffers.
|
||||
* @param index_data indices to select slices of input tensor.
|
||||
* @param height_in H dimension of input tensor.
|
||||
* @param width_in W dimension of input tensor.
|
||||
* @param n_channel_in CH dimension of input tensor.
|
||||
* @param height_index H dimension of indices tensor.
|
||||
* @param width_index W dimension of indices tensor.
|
||||
* @param d_in D dimension of input tensor.
|
||||
* @param ch_index CH dimension of indices tensor.
|
||||
* @param ch_stride_in CH stride of input tensor.
|
||||
*/
|
||||
typedef struct {
|
||||
stai_ptr src_in;
|
||||
stai_ptr dst_out;
|
||||
ai_i32* index_data;
|
||||
ai_size height_in;
|
||||
ai_size width_in;
|
||||
ai_size n_channel_in;
|
||||
ai_size height_index;
|
||||
ai_size width_index;
|
||||
ai_size d_in;
|
||||
ai_size ch_index;
|
||||
int32_t ch_stride_in;
|
||||
} forward_lite_gather_nd_args;
|
||||
|
||||
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_gather_nd(
|
||||
forward_lite_gather_nd_args* args);
|
||||
|
||||
/** GatherND channel first Kernels **************************************************/
|
||||
|
||||
/*!
|
||||
* @brief C struct for a gather_nd layer (Channel first).
|
||||
* @ingroup lite_generic
|
||||
* @param src_in list of pointers for the outputs buffers.
|
||||
* @param dst_out list of pointers for the outputs buffers.
|
||||
* @param index_data indices to select slices of input tensor.
|
||||
* @param height_in H dimension of input tensor.
|
||||
* @param width_in W dimension of input tensor.
|
||||
* @param n_channel_in CH dimension of input tensor.
|
||||
* @param height_index H dimension of indices tensor.
|
||||
* @param width_index W dimension of indices tensor.
|
||||
* @param ch_index CH dimension of indices tensor.
|
||||
* @param ch_stride_in CH stride of input tensor.
|
||||
* @param height_out H dimension of output tensor.
|
||||
* @param width_out W dimension of output tensor.
|
||||
* @param d_out D dimension of output tensor.
|
||||
* @param ch_out CH dimension of output tensor.
|
||||
*/
|
||||
typedef struct {
|
||||
stai_ptr src_in;
|
||||
stai_ptr dst_out;
|
||||
ai_i32* index_data;
|
||||
ai_size height_in;
|
||||
ai_size width_in;
|
||||
ai_size n_channel_in;
|
||||
ai_size height_index;
|
||||
ai_size width_index;
|
||||
ai_size ch_index;
|
||||
int32_t ch_stride_in;
|
||||
ai_size height_out;
|
||||
ai_size width_out;
|
||||
ai_size d_out;
|
||||
ai_size ch_out;
|
||||
} forward_lite_gather_nd_channel_first_args;
|
||||
|
||||
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_gather_nd_channel_first(
|
||||
forward_lite_gather_nd_channel_first_args* args);
|
||||
|
||||
/** ScatterND Kernels **************************************************/
|
||||
|
||||
/*!
|
||||
* @brief C struct for a scatter_nd layer.
|
||||
* @ingroup lite_generic
|
||||
* @param src_in list of pointers for the outputs buffers.
|
||||
* @param dst_out list of pointers for the outputs buffers.
|
||||
* @param index_data indices to select slices of input tensor.
|
||||
* @param update_data values to be inserted into the input tensor.
|
||||
* @param height_in H dimension of input tensor.
|
||||
* @param width_in W dimension of input tensor.
|
||||
* @param n_channel_in CH dimension of input tensor.
|
||||
* @param height_index H dimension of indices tensor.
|
||||
* @param width_index W dimension of indices tensor.
|
||||
* @param d_in D dimension of input tensor.
|
||||
* @param ch_index CH dimension of indices tensor.
|
||||
* @param ch_stride_in CH stride of input tensor.
|
||||
*/
|
||||
typedef struct {
|
||||
stai_ptr src_in;
|
||||
stai_ptr dst_out;
|
||||
ai_i32* index_data;
|
||||
stai_ptr update_data;
|
||||
ai_scatter_nd_reduction reduction;
|
||||
func_binary func;
|
||||
ai_size height_in;
|
||||
ai_size width_in;
|
||||
ai_size n_channel_in;
|
||||
ai_size height_index;
|
||||
ai_size width_index;
|
||||
ai_size d_index;
|
||||
ai_size d_in;
|
||||
ai_size ch_index;
|
||||
int32_t ch_stride_in;
|
||||
} forward_lite_scatter_nd_args;
|
||||
|
||||
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_scatter_nd(
|
||||
forward_lite_scatter_nd_args* args);
|
||||
|
||||
/** Split Generic Kernels **************************************************/
|
||||
|
||||
/*!
|
||||
* @brief C struct for a generic split layer.
|
||||
* @ingroup lite_generic
|
||||
* @param outputs_ptr list of pointers for the outputs buffers.
|
||||
* @param n_outputs_ptr the number of outputs
|
||||
* @param n_outer_elems the number of elements to copy in a single split
|
||||
* @param input_ptr the pointer to input buffer to split.
|
||||
* @param splits_strides the pointer to array defining outputs split strides.
|
||||
* @param splits_step the offset between split strides
|
||||
*/
|
||||
typedef struct {
|
||||
stai_ptr* outputs_ptr;
|
||||
const stai_size n_outputs_ptr;
|
||||
const stai_size n_outer_elems;
|
||||
const stai_ptr input_ptr;
|
||||
const int32_t* splits_strides;
|
||||
const stai_size splits_step;
|
||||
} forward_lite_split_generic_args;
|
||||
|
||||
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_split_generic(
|
||||
forward_lite_split_generic_args* args);
|
||||
|
||||
|
||||
/** TopK Generic Kernels ***************************************************/
|
||||
/*!
|
||||
* @brief Handles 2D convolution with binary input, binary output and
|
||||
* binary weights - with 0 padding (QKeras like) - Lite I/F
|
||||
* @ingroup lite_conv2d_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_topK_axis_0_if32of32(
|
||||
const ai_float *pDataIn_init,
|
||||
ai_float *pDataOut_values_init,
|
||||
ai_i32 *pDataOut_index_init,
|
||||
const ai_size height_in,
|
||||
const ai_size width_in,
|
||||
const ai_size n_channel_in,
|
||||
const ai_size k, ai_i16 largest,
|
||||
void (*f)(const ai_float* inputs, ai_float* values, ai_i32* indices, ai_size k, ai_size n_elements, ai_i32 stride, ai_i16 largest)
|
||||
);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with binary input, binary output and
|
||||
* binary weights - with 0 padding (QKeras like) - Lite I/F
|
||||
* - Optimized thanks to Optim0 assumptions
|
||||
* @ingroup lite_conv2d_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_topK_axis_1_if32of32(
|
||||
const ai_float *pDataIn_init,
|
||||
ai_float *pDataOut_values_init,
|
||||
ai_i32 *pDataOut_index_init,
|
||||
const ai_size height_in,
|
||||
const ai_size width_in,
|
||||
const ai_size n_channel_in,
|
||||
const ai_size k, ai_i16 largest,
|
||||
void (*f)(const ai_float* inputs, ai_float* values, ai_i32* indices, ai_size k, ai_size n_elements, ai_i32 stride, ai_i16 largest)
|
||||
);
|
||||
|
||||
/*!
|
||||
* @brief Handles 2D convolution with binary input, 8-bits output and
|
||||
* binary weights - with 0 padding (QKeras like) - Lite I/F
|
||||
* @ingroup lite_conv2d_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_topK_axis_2_if32of32(
|
||||
const ai_float *pDataIn_init,
|
||||
ai_float *pDataOut_values_init,
|
||||
ai_i32 *pDataOut_index_init,
|
||||
const ai_size height_in,
|
||||
const ai_size width_in,
|
||||
const ai_size n_channel_in,
|
||||
const ai_size k, ai_i16 largest,
|
||||
void (*f)(const ai_float* inputs, ai_float* values, ai_i32* indices, ai_size k, ai_size n_elements, ai_i32 stride, ai_i16 largest)
|
||||
);
|
||||
|
||||
|
||||
#endif /*LITE_GENERIC_FLOAT_H*/
|
||||
58
lib/stai/libstai/include/lite_gru_f32.h
Normal file
58
lib/stai/libstai/include/lite_gru_f32.h
Normal file
@ -0,0 +1,58 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file lite_gru_f32.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform lite gru kernel datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2023 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LITE_GRU_F32_H
|
||||
#define LITE_GRU_F32_H
|
||||
|
||||
|
||||
#include "ai_lite_interface.h"
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a stateless GRU (gate recurrent unit) layer with
|
||||
* signed float input, signed float output, and float parameters.
|
||||
* @ingroup lite_gru_f32
|
||||
* @param output The pointer to output buffer.
|
||||
* @param input The pointer to input buffer.
|
||||
* @param gru_kernel The pointer to gru kernel param.
|
||||
* @param gru_recurrent The pointer to gru recurrent param.
|
||||
* @param gru_bias The pointer to bias.
|
||||
* @param gru_scratch The pointer to GRU scratch.
|
||||
* @param n_units The number of GRU cells (dimensionality of output space).
|
||||
* @param n_timesteps The number of timesteps of the input sequence.
|
||||
* @param n_features The number of features of the input sequence.
|
||||
* @param activation_nl The activation function used to update memory state.
|
||||
* @param recurrent_nl The activation function to use for the recurrent step.
|
||||
* @param return_seq If True, returns the full output sequence, else only the last output.
|
||||
* @param go_backwards If True, process the input sequence backwards.
|
||||
* @param reverse_seq If True, reverse the input sequence
|
||||
* @param reset_after Whether to apply reset gate after (True) or before (False) matmul.
|
||||
* @param activation_param The parameters for activation_nl (can be NULL)
|
||||
* @param recurrent_param The parameters for recurrent_nl (can be NULL)
|
||||
* @param initial_hidden Initial state of hidden layer (can be NULL)
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_gru_if32of32wf32(
|
||||
ai_float* output, const ai_float* input, const ai_float* gru_kernel,
|
||||
const ai_float* gru_recurrent, const ai_float* gru_bias, ai_float* gru_scratch,
|
||||
const ai_u32 n_units, const ai_size n_timesteps, const ai_size n_features,
|
||||
ai_handle activation_nl, ai_handle recurrent_nl, ai_bool return_seq,
|
||||
ai_bool go_backwards, ai_bool reverse_seq, ai_bool reset_after,
|
||||
const ai_float* activation_param, const ai_float* recurrent_param,
|
||||
const ai_float* initial_hidden);
|
||||
|
||||
|
||||
#endif /* LITE_GRU_F32_H */
|
||||
93
lib/stai/libstai/include/lite_internal_apis.h
Normal file
93
lib/stai/libstai/include/lite_internal_apis.h
Normal file
@ -0,0 +1,93 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file lite_internal_apis.h
|
||||
* @author STMicroelectronics
|
||||
* @brief
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2023 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef LITE_INTERNAL_APIS
|
||||
#define LITE_INTERNAL_APIS
|
||||
|
||||
#include "ai_platform.h"
|
||||
#include "ai_lite_interface.h"
|
||||
|
||||
/* lite_nl_generic_float */
|
||||
#define LITE_NL_ENTRY(nl_id_, nl_name_, nl_op_, nl_op_args_) \
|
||||
/** \
|
||||
* @brief lite function for a templated non-linearity nl_op_. \
|
||||
* @ingroup lite_nl_generic_float \
|
||||
* @param out_ptr The pointer to output buffer. \
|
||||
* @param in_ptr The pointer to input buffer. \
|
||||
* @param in_size. The size of the input. \
|
||||
* @param params opaque handler to optional NL params (not used). \
|
||||
*/ \
|
||||
LITE_API_ENTRY \
|
||||
void forward_lite_nl_ ## nl_name_ ## _if32of32( \
|
||||
ai_handle out_ptr, const ai_handle in_ptr, const ai_i32 in_size, const ai_handle params);
|
||||
|
||||
#include "lite_nl_list.h"
|
||||
|
||||
/**
|
||||
* @brief lite function for a float softmax non-linearity where the softmax is applied per channel.
|
||||
* @ingroup lite_nl_generic_float
|
||||
* @param output The pointer to output buffer.
|
||||
* @param input The pointer to input buffer.
|
||||
* @param in_size The size of the input.
|
||||
* @param inner_loop_cnt The size of the inner loop (elements after the selected axis)
|
||||
* @param axis_elem The elements number along the selected axis.
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_nl_softmax_if32of32(
|
||||
ai_handle out_ptr, const ai_handle in_ptr,
|
||||
const ai_size in_size, const ai_size inner_loop_cnt, const ai_size axis_elem);
|
||||
|
||||
|
||||
/**
|
||||
* @brief lite function for a float softmax zero channel non-linearity where the softmax is applied per channel.
|
||||
* @ingroup lite_nl_generic_float
|
||||
* @param output The pointer to output buffer.
|
||||
* @param input The pointer to input buffer.
|
||||
* @param in_size. The size of the input.
|
||||
* @param channel_size The nsize of each channel.
|
||||
* @param in_channel_step
|
||||
* @param out_channel_step
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_nl_softmax_zero_channel_if32of32(
|
||||
ai_handle out_ptr, const ai_handle in_ptr, const ai_i32 in_size, const ai_size ch_size,
|
||||
const ai_i32 in_ch_step, const ai_i32 out_ch_step);
|
||||
|
||||
/*!
|
||||
* @typedef (*func_nl_lite)
|
||||
* @ingroup layers_nl
|
||||
* @brief Fuction pointer for generic non linear transform
|
||||
* this function pointer abstracts a generic non linear layer.
|
||||
* see @ref nl_func_tanh_array_f32 and similar as examples.
|
||||
*/
|
||||
typedef void (*func_nl_lite)(ai_handle out_ptr, const ai_handle in_ptr,
|
||||
const ai_i32 in_size, const ai_handle params);
|
||||
|
||||
/**
|
||||
* @brief lite function for a float gelu non-linearity.
|
||||
* @ingroup lite_nl_generic_float \
|
||||
* @param out_ptr The pointer to output buffer. \
|
||||
* @param in_ptr The pointer to input buffer. \
|
||||
* @param in_size. The size of the input. \
|
||||
* @param params opaque handler to optional NL params. \
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_nl_gelu_if32of32(
|
||||
ai_handle out_ptr, const ai_handle in_ptr, const ai_i32 in_size, const ai_handle params);
|
||||
|
||||
#endif /* LITE_INTERNAL_APIS */
|
||||
94
lib/stai/libstai/include/lite_lstm.h
Normal file
94
lib/stai/libstai/include/lite_lstm.h
Normal file
@ -0,0 +1,94 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file lite_lstm.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform lite lstm kernel
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2022 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LITE_LSTM_H
|
||||
#define LITE_LSTM_H
|
||||
|
||||
#include "ai_lite_interface.h"
|
||||
#include "ai_math_helpers.h"
|
||||
|
||||
#include "lite_internal_apis.h"
|
||||
|
||||
enum {
|
||||
AI_LITE_LSTM_INPUT = 0,
|
||||
AI_LITE_LSTM_FORGET = 1,
|
||||
AI_LITE_LSTM_CELL = 2,
|
||||
AI_LITE_LSTM_OUTPUT = 3,
|
||||
AI_LITE_LSTM_MAX
|
||||
};
|
||||
|
||||
void forward_lite_lstm_if32of32wf32( AI_CONST ai_float* kernel,
|
||||
AI_CONST ai_float* recurrent,
|
||||
AI_CONST ai_float* bias,
|
||||
func_nl_lite activation_nl, /**< activation nonlinearity (input to cell) */
|
||||
AI_CONST ai_float* activation_param, /**< activation NL parameters */
|
||||
func_nl_lite recurrent_nl, /**< recurrent nonlinearity (hidden to cell) */
|
||||
AI_CONST ai_float* recurrent_param, /**< activation NL parameters */
|
||||
ai_float* hidden,
|
||||
AI_CONST ai_float* initial_hidden,
|
||||
ai_float* out_hidden,
|
||||
ai_u16 n_features,
|
||||
ai_u16 n_cell,
|
||||
AI_CONST ai_float* peepholes,
|
||||
ai_handle state,
|
||||
ai_float* cell,
|
||||
AI_CONST ai_float* initial_cell,
|
||||
func_nl_lite out_nl,
|
||||
AI_CONST ai_float* out_param,
|
||||
ai_float cell_clip,
|
||||
ai_float* data_in,
|
||||
ai_float** data_out,
|
||||
ai_ptr_offset *out_offset,
|
||||
ai_float* gates,
|
||||
ai_i32 timesteps,
|
||||
ai_i32 nb_t_out,
|
||||
ai_bool go_backwards,
|
||||
ai_bool reverse_seq,
|
||||
ai_bool stateful,
|
||||
ai_bool return_state);
|
||||
|
||||
|
||||
void forward_lite_lstm_is8os8ws8( AI_CONST ai_i8* kernel[AI_LITE_LSTM_MAX],
|
||||
AI_CONST ai_i8* recurrent[AI_LITE_LSTM_MAX],
|
||||
AI_CONST ai_i32* bias[AI_LITE_LSTM_MAX],
|
||||
AI_CONST ai_i8* initial_hidden,
|
||||
ai_i8* out_hidden,
|
||||
ai_u16 batch_size,
|
||||
ai_u16 n_features,
|
||||
ai_u16 n_cell,
|
||||
AI_CONST ai_i16* initial_cell,
|
||||
ai_i16* out_cell,
|
||||
ai_i8* data_in,
|
||||
const ai_i8 in_zeropoint,
|
||||
const ai_float in_scale,
|
||||
ai_i8* data_out,
|
||||
const ai_i8 out_zeropoint,
|
||||
const ai_float out_scale,
|
||||
const ai_float kernel_scale[AI_LITE_LSTM_MAX],
|
||||
const ai_float recurrent_scale[AI_LITE_LSTM_MAX],
|
||||
ai_i32 timesteps,
|
||||
ai_i32 nb_t_out,
|
||||
ai_bool go_backwards,
|
||||
ai_bool reverse_seq,
|
||||
ai_bool stateful,
|
||||
ai_bool time_major,
|
||||
ai_bool return_state,
|
||||
ai_i32 scratch_size,
|
||||
ai_i8 *p_scratch_data);
|
||||
|
||||
#endif /* LITE_LSTM_H */
|
||||
|
||||
157
lib/stai/libstai/include/lite_maxpool_dqnn.h
Normal file
157
lib/stai/libstai/include/lite_maxpool_dqnn.h
Normal file
@ -0,0 +1,157 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file lite_maxpool_dqnn.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform lite dqnn maxpool kernel datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2021 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LITE_MAXPOOL_DQNN_H
|
||||
#define LITE_MAXPOOL_DQNN_H
|
||||
|
||||
|
||||
#include "ai_lite_interface.h"
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/******************************************************************************/
|
||||
|
||||
/*!
|
||||
* @brief Handles maxpool with binary input and binary output - Lite I/F
|
||||
* @ingroup lite_maxpool_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_maxpool_is1os1(const ai_u32 *pDataIn_init,
|
||||
ai_u32 *pDataOut_init,
|
||||
const ai_i32 width_in,
|
||||
const ai_i32 width_out,
|
||||
const ai_i32 height_in,
|
||||
const ai_i32 height_out,
|
||||
const ai_u32 n_channel_in,
|
||||
const ai_u32 n_channel_out,
|
||||
const ai_i32 pool_width,
|
||||
const ai_i32 pool_height,
|
||||
const ai_i32 pool_pad_x,
|
||||
const ai_i32 pool_pad_y,
|
||||
const ai_i32 pool_stride_x,
|
||||
const ai_i32 pool_stride_y,
|
||||
const ai_u32 pool_pad_value,
|
||||
ai_float *pScratch_32);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Handles maxpool with 8 bits signed input and output with a positive scale of the input- Lite I/F
|
||||
* @ingroup lite_maxpool_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_maxpool_is8os8_scalepos(const ai_i8 *pDataIn,
|
||||
ai_i8 *pDataOut,
|
||||
const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y,
|
||||
const ai_u16 padding_x, const ai_u16 padding_y,
|
||||
const ai_u16 stride_x, const ai_u16 stride_y,
|
||||
const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y,
|
||||
const ai_float InOut_ScaleRatio,
|
||||
const ai_i8 In_ZeroPoint,
|
||||
const ai_i8 Out_ZeroPoint);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Handles maxpool with 8 bits signed input and output with a negative scale of the input- Lite I/F
|
||||
* @ingroup lite_maxpool_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_maxpool_is8os8_scaleneg(const ai_i8 *pDataIn,
|
||||
ai_i8 *pDataOut,
|
||||
const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y,
|
||||
const ai_u16 padding_x, const ai_u16 padding_y,
|
||||
const ai_u16 stride_x, const ai_u16 stride_y,
|
||||
const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y,
|
||||
const ai_float InOut_ScaleRatio,
|
||||
const ai_i8 In_ZeroPoint,
|
||||
const ai_i8 Out_ZeroPoint);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Handles maxpool with 8 bits unsigned input and output with a positive scale of the input- Lite I/F
|
||||
* @ingroup lite_maxpool_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_maxpool_iu8ou8_scalepos(const ai_u8 *pDataIn,
|
||||
ai_u8 *pDataOut,
|
||||
const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y,
|
||||
const ai_u16 padding_x, const ai_u16 padding_y,
|
||||
const ai_u16 stride_x, const ai_u16 stride_y,
|
||||
const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y,
|
||||
const ai_float InOut_ScaleRatio,
|
||||
const ai_u8 In_ZeroPoint,
|
||||
const ai_u8 Out_ZeroPoint);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Handles maxpool with 8 bits unsigned input and output with a negative scale of the input- Lite I/F
|
||||
* @ingroup lite_maxpool_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_maxpool_iu8ou8_scaleneg(const ai_u8 *pDataIn,
|
||||
ai_u8 *pDataOut,
|
||||
const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y,
|
||||
const ai_u16 padding_x, const ai_u16 padding_y,
|
||||
const ai_u16 stride_x, const ai_u16 stride_y,
|
||||
const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y,
|
||||
const ai_float InOut_ScaleRatio,
|
||||
const ai_u8 In_ZeroPoint,
|
||||
const ai_u8 Out_ZeroPoint);
|
||||
|
||||
/*!
|
||||
* @brief Handles maxpool with 16 bits signed input and output with a positive scale of the input- Lite I/F
|
||||
* @ingroup lite_maxpool_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_maxpool_is16os16_scalepos(const ai_i16 *pApInput,
|
||||
ai_i16 *pApOutput,
|
||||
const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y,
|
||||
const ai_u16 padding_x, const ai_u16 padding_y,
|
||||
const ai_u16 stride_x, const ai_u16 stride_y,
|
||||
const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y,
|
||||
const ai_float InOut_ScaleRatio,
|
||||
const ai_i16 In_ZeroPoint,
|
||||
const ai_i16 Out_ZeroPoint);
|
||||
|
||||
/*!
|
||||
* @brief Handles maxpool with 16 bits unsigned input and output with a positive scale of the input- Lite I/F
|
||||
* @ingroup lite_maxpool_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_maxpool_iu16ou16_scalepos(const ai_u16 *pApInput,
|
||||
ai_u16 *pApOutput,
|
||||
const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y,
|
||||
const ai_u16 padding_x, const ai_u16 padding_y,
|
||||
const ai_u16 stride_x, const ai_u16 stride_y,
|
||||
const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y,
|
||||
const ai_float InOut_ScaleRatio,
|
||||
const ai_u16 In_ZeroPoint,
|
||||
const ai_u16 Out_ZeroPoint);
|
||||
|
||||
#endif /*LITE_MAXPOOL_DQNN_H*/
|
||||
63
lib/stai/libstai/include/lite_nl_generic_integer.h
Normal file
63
lib/stai/libstai/include/lite_nl_generic_integer.h
Normal file
@ -0,0 +1,63 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file lite_nl_generic_integer.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform lite integer non linearities
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2023 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LITE_NL_GENERIC_INTEGER_H
|
||||
#define LITE_NL_GENERIC_INTEGER_H
|
||||
|
||||
|
||||
#include "ai_lite_interface.h"
|
||||
|
||||
/**
|
||||
* @brief forward lite function for a s8 softmax non-linearity where the softmax is applied per channel.
|
||||
* @ingroup lite_nl_generic_integer
|
||||
* @param output The pointer to output buffer (s8).
|
||||
* @param input The pointer to input buffer (s8).
|
||||
* @param in_size The size of the input (including channels).
|
||||
* @param inner_loop_cnt The size after the selected axis.
|
||||
* @param axis_elem size The number of elements along the selected axis.
|
||||
* @param mult
|
||||
* @param shift
|
||||
* @param min_diff
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_nl_softmax_is8os8(
|
||||
ai_i8* out_ptr, const ai_i8* in_ptr,
|
||||
const ai_size in_size, const ai_size inner_loop_cnt, const ai_size axis_elem,
|
||||
const ai_i32 mult, const ai_i32 shift, const ai_i32 min_diff,
|
||||
ai_i32* scratch);
|
||||
|
||||
|
||||
/**
|
||||
* @brief forward lite function for a u8 softmax non-linearity where the softmax is applied per channel.
|
||||
* @ingroup lite_nl_generic_integer
|
||||
* @param output The pointer to output buffer (s8).
|
||||
* @param input The pointer to input buffer (s8).
|
||||
* @param in_size The size of the input (including channels).
|
||||
* @param inner_loop_cnt The size after the selected axis.
|
||||
* @param axis_elem size The number of elements along the selected axis.
|
||||
* @param mult
|
||||
* @param shift
|
||||
* @param min_diff
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_nl_softmax_iu8ou8(
|
||||
ai_u8* out_ptr, const ai_u8* in_ptr,
|
||||
const ai_size in_size, const ai_size inner_loop_cnt, const ai_size axis_elem,
|
||||
const ai_i32 mult, const ai_i32 shift, const ai_i32 min_diff,
|
||||
ai_i32* scratch);
|
||||
|
||||
#endif /* LITE_NL_GENERIC_INTEGER_H */
|
||||
75
lib/stai/libstai/include/lite_nl_list.h
Normal file
75
lib/stai/libstai/include/lite_nl_list.h
Normal file
@ -0,0 +1,75 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file lite_nl_list.h
|
||||
* @author STMicroelectronics
|
||||
* @brief header file of lite supported non-linearities routines
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2022 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
// #define LITE_NL_ENTRY(nl_id_, nl_name_, nl_op_, nl_op_args_)
|
||||
|
||||
/* No sentry. This is deliberate!! */
|
||||
|
||||
LITE_NL_ENTRY(1, abs, AI_ABS, 1)
|
||||
LITE_NL_ENTRY(2, acos, AI_MATH_ACOS, 1)
|
||||
LITE_NL_ENTRY(3, acosh, AI_MATH_ACOSH, 1)
|
||||
LITE_NL_ENTRY(4, asin, AI_MATH_ASIN, 1)
|
||||
LITE_NL_ENTRY(5, asinh, AI_MATH_ASINH, 1)
|
||||
LITE_NL_ENTRY(6, atan, AI_MATH_ATAN, 1)
|
||||
LITE_NL_ENTRY(7, atanh, AI_MATH_ATANH, 1)
|
||||
LITE_NL_ENTRY(8, ceil, AI_CEIL, 1)
|
||||
LITE_NL_ENTRY(9, cos, AI_MATH_COS, 1)
|
||||
LITE_NL_ENTRY(10, cosh, AI_MATH_COSH, 1)
|
||||
LITE_NL_ENTRY(11, erf, AI_MATH_ERF, 1)
|
||||
LITE_NL_ENTRY(12, exp, AI_MATH_EXP, 1)
|
||||
LITE_NL_ENTRY(13, floor, AI_FLOOR, 1)
|
||||
LITE_NL_ENTRY(14, hardmax, /**/, 0)
|
||||
LITE_NL_ENTRY(15, log, AI_MATH_LOG, 1)
|
||||
LITE_NL_ENTRY(16, logistic, AI_MATH_LOGISTIC, 1)
|
||||
LITE_NL_ENTRY(17, neg, AI_NEG, 1)
|
||||
LITE_NL_ENTRY(18, rsqrt, AI_MATH_RSQRT, 1)
|
||||
LITE_NL_ENTRY(19, sin, AI_MATH_SIN, 1)
|
||||
LITE_NL_ENTRY(20, sinh, AI_MATH_SINH, 1)
|
||||
LITE_NL_ENTRY(21, tan, AI_MATH_TAN, 1)
|
||||
LITE_NL_ENTRY(22, square, AI_MATH_SQUARE, 1)
|
||||
LITE_NL_ENTRY(23, reciprocal, AI_RECIPROCAL, 1)
|
||||
LITE_NL_ENTRY(24, round, AI_ROUND, 1)
|
||||
LITE_NL_ENTRY(25, sigmoid, AI_MATH_SIGMOID, 1)
|
||||
LITE_NL_ENTRY(26, swish, AI_MATH_SWISH, 1)
|
||||
LITE_NL_ENTRY(27, hard_swish, AI_MATH_HARD_SWISH, 1)
|
||||
LITE_NL_ENTRY(28, sign, AI_SIGN, 1)
|
||||
LITE_NL_ENTRY(29, sqrt, AI_MATH_SQRT, 1)
|
||||
// LITE_NL_ENTRY(30, softmax, /**/, 0) // for future changes
|
||||
// LITE_NL_ENTRY(31, softmax_zero_channel, /**/, 0) // for future changes
|
||||
LITE_NL_ENTRY(32, soft_plus, AI_MATH_SOFT_PLUS, 1)
|
||||
LITE_NL_ENTRY(33, soft_sign, AI_MATH_SOFT_SIGN, 1)
|
||||
LITE_NL_ENTRY(34, tanh, AI_MATH_TANH, 1)
|
||||
LITE_NL_ENTRY(35, prelu, /**/, 0)
|
||||
LITE_NL_ENTRY(36, relu, AI_MATH_RELU, 1)
|
||||
LITE_NL_ENTRY(37, relu_generic, /**/, 0)
|
||||
|
||||
LITE_NL_ENTRY(101, elu, AI_MATH_ELU, 2)
|
||||
LITE_NL_ENTRY(102, relu_thresholded, AI_MATH_RELU_THRESHOLDED, 2)
|
||||
|
||||
|
||||
LITE_NL_ENTRY(201, clip, AI_CLAMP, 3)
|
||||
LITE_NL_ENTRY(202, hard_sigmoid, AI_MATH_HARD_SIGMOID, 3)
|
||||
LITE_NL_ENTRY(203, selu, AI_MATH_SELU, 3)
|
||||
// LITE_NL_ENTRY(204, gelu, AI_MATH_GELU, 2)
|
||||
|
||||
|
||||
#undef LITE_NL_ENTRY
|
||||
#undef LITE_NL_IIF_0
|
||||
#undef LITE_NL_IIF_1
|
||||
#undef LITE_NL_IIF_2
|
||||
#undef LITE_NL_IIF_3
|
||||
52
lib/stai/libstai/include/lite_norm_f32.h
Normal file
52
lib/stai/libstai/include/lite_norm_f32.h
Normal file
@ -0,0 +1,52 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file lite_norm_f32.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform norm in lite mode
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2023 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LITE_NORM_F32_H
|
||||
#define LITE_NORM_F32_H
|
||||
#pragma once
|
||||
|
||||
#include "ai_lite_interface.h"
|
||||
|
||||
enum ai_lite_norm_type_ {
|
||||
AI_LITE_NORM_NONE = 0,
|
||||
AI_LITE_NORM_L1 = 1,
|
||||
AI_LITE_NORM_L2 = 2,
|
||||
AI_LITE_NORM_MAX = 3,
|
||||
};
|
||||
|
||||
/*!
|
||||
* @brief Forward function for a batch normalization (BN) layer with
|
||||
* signed float input, signed float output, and float parameters.
|
||||
* @ingroup lite_norm_f32
|
||||
* @param output The pointer to output buffer.
|
||||
* @param input The pointer to input buffer.
|
||||
* @param scale The pointer to BN scale param.
|
||||
* @param bias The pointer to bias.
|
||||
* @param n_elements The number of elements in the input tensor.
|
||||
* @param n_channel_in The number of channel in the input tensor.
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_norm_if32of32( ai_float* output,
|
||||
const ai_float* input,
|
||||
const ai_u32 ai_lite_norm_type,
|
||||
const ai_float exponent,
|
||||
const ai_size n_axis,
|
||||
const ai_size n_axis_stride,
|
||||
const ai_size n_el,
|
||||
ai_bool scale);
|
||||
|
||||
#endif /* LITE_NORM_F32_H */
|
||||
53
lib/stai/libstai/include/lite_operators.h
Normal file
53
lib/stai/libstai/include/lite_operators.h
Normal file
@ -0,0 +1,53 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file lite_operators.h
|
||||
* @author AIS
|
||||
* @brief main header file of AI platform lite operators list
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2021 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LITE_OPERATORS_H
|
||||
#define LITE_OPERATORS_H
|
||||
|
||||
#include "lite_internal_apis.h"
|
||||
|
||||
#include "lite_bn_f32.h"
|
||||
#include "lite_bn_integer.h"
|
||||
#include "lite_conv2d.h"
|
||||
#include "lite_conv2d_dqnn.h"
|
||||
#include "lite_conv2d_is16.h"
|
||||
#include "lite_convert_dqnn.h"
|
||||
#include "lite_dense_if32.h"
|
||||
#include "lite_dense_is1.h"
|
||||
#include "lite_dense_is16.h"
|
||||
#include "lite_dense_is1ws1.h"
|
||||
#include "lite_dense_ws1.h"
|
||||
#include "lite_gru_f32.h"
|
||||
#include "lite_dw_dqnn.h"
|
||||
#include "lite_pw_dqnn.h"
|
||||
#include "lite_conv2d_sssa8_ch.h"
|
||||
|
||||
#include "lite_dense_is8os8ws8.h"
|
||||
#include "lite_dense_is8os1ws1.h"
|
||||
#include "lite_generic_float.h"
|
||||
#include "lite_pool_f32.h"
|
||||
#include "lite_maxpool_dqnn.h"
|
||||
#include "lite_nl_generic_integer.h"
|
||||
#include "lite_pad_generic.h"
|
||||
#include "lite_pad_dqnn.h"
|
||||
#include "lite_upsample_generic.h"
|
||||
#include "lite_resize.h"
|
||||
#include "lite_lstm.h"
|
||||
#include "lite_argminmax.h"
|
||||
#include "lite_pool_is8os8.h"
|
||||
|
||||
#endif /* LITE_OPERATORS_H */
|
||||
48
lib/stai/libstai/include/lite_pad_dqnn.h
Normal file
48
lib/stai/libstai/include/lite_pad_dqnn.h
Normal file
@ -0,0 +1,48 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file lite_pad_dqnn.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform lite padding kernel datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2021 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LITE_PADDING_DQNN_H
|
||||
#define LITE_PADDING_DQNN_H
|
||||
|
||||
|
||||
#include "ai_lite_interface.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/******************************************************************************/
|
||||
|
||||
/*!
|
||||
* @brief Handles padding with binary input and binary output - Lite I/F
|
||||
* @ingroup lite_padding_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_pad_is1os1(const ai_u32 *pDataIn_init,
|
||||
ai_u32 *pDataOut_init,
|
||||
const ai_i32 width_in,
|
||||
const ai_i32 width_out,
|
||||
const ai_i32 height_in,
|
||||
const ai_i32 height_out,
|
||||
const ai_u32 n_channel_out,
|
||||
const ai_i32 mode,
|
||||
const ai_u16 pads_x,
|
||||
const ai_u16 pads_y,
|
||||
const ai_u16 pads_x_r,
|
||||
const ai_u16 pads_y_b,
|
||||
const ai_u32 pad_value);
|
||||
|
||||
|
||||
#endif /*LITE_PADDING_DQNN_H*/
|
||||
111
lib/stai/libstai/include/lite_pad_generic.h
Normal file
111
lib/stai/libstai/include/lite_pad_generic.h
Normal file
@ -0,0 +1,111 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file lite_pad_generic.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform lite padding kernel datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2022 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LITE_PAD_GENERIC_H
|
||||
#define LITE_PAD_GENERIC_H
|
||||
|
||||
|
||||
#include "ai_lite_interface.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/******************************************************************************/
|
||||
|
||||
/*!
|
||||
* @brief Handles padding with 8 bits input/output in constant mode - Lite I/F
|
||||
* Channel 1st Format Input and Output
|
||||
* @ingroup lite_padding_dqnn
|
||||
*/
|
||||
|
||||
/* Variant used for padding pattern = (1, 1, 1, 1) */
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_pad_8bit_ch1st_3x3_constant_P1111(ai_ptr_const in_data_tensor,
|
||||
ai_ptr out_data_tensor,
|
||||
const ai_handle fill_value,
|
||||
const ai_i32 height_in,
|
||||
const ai_i32 channel_in,
|
||||
const ai_ptr_offset ch_stride_in,
|
||||
const ai_ptr_offset h_stride_in,
|
||||
const ai_ptr_offset h_stride_pad);
|
||||
|
||||
/* Variant used for padding pattern = (0, 0, 2, 2) */
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_pad_8bit_ch1st_3x3_constant_P0022(ai_ptr_const in_data_tensor,
|
||||
ai_ptr out_data_tensor,
|
||||
const ai_handle fill_value,
|
||||
const ai_i32 height_in,
|
||||
const ai_i32 channel_in,
|
||||
const ai_ptr_offset ch_stride_in,
|
||||
const ai_ptr_offset h_stride_in,
|
||||
const ai_ptr_offset h_stride_pad);
|
||||
|
||||
/*!
|
||||
* @brief Handles padding with 8 bits input/output in constant mode - Lite I/F
|
||||
* @ingroup lite_padding_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_pad_constant(ai_ptr_const in_data,
|
||||
ai_ptr out_data,
|
||||
const ai_handle fill_value,
|
||||
const ai_i16 in_bits,
|
||||
const ai_i32 height_in,
|
||||
const ai_ptr_offset ch_stride_in,
|
||||
const ai_ptr_offset h_stride_in,
|
||||
const ai_ptr_offset h_stride_pad,
|
||||
const ai_ptr_offset h_stride_pad_b,
|
||||
const ai_ptr_offset w_stride_pad,
|
||||
const ai_ptr_offset w_stride_pad_r);
|
||||
|
||||
/*!
|
||||
* @brief Handles padding with 8 bits input/output in edge mode - Lite I/F
|
||||
* @ingroup lite_padding_dqnn
|
||||
*/
|
||||
void forward_lite_pad_edge(ai_ptr_const in_data_tensor,
|
||||
ai_ptr out_data,
|
||||
const ai_i32 height_in,
|
||||
const ai_i16 pads_y,
|
||||
const ai_i16 pads_x_r,
|
||||
const ai_ptr_offset h_stride_in,
|
||||
const ai_ptr_offset w_stride_in,
|
||||
const ai_ptr_offset h_stride_out,
|
||||
const ai_ptr_offset h_stride_pad,
|
||||
const ai_ptr_offset w_stride_pad,
|
||||
const ai_ptr_offset h_stride_pad_b);
|
||||
|
||||
/*!
|
||||
* @brief Handles padding with 8 bits input/output in reflect mode - Lite I/F
|
||||
* @ingroup lite_padding_dqnn
|
||||
*/
|
||||
void forward_lite_pad_reflect(ai_ptr_const in_data,
|
||||
ai_ptr out_data,
|
||||
const ai_i32 depth,
|
||||
const ai_i32 height_in,
|
||||
const ai_i32 width_in,
|
||||
const ai_i32 height_out,
|
||||
const ai_i32 width_out,
|
||||
const ai_ptr_offset h_stride_in,
|
||||
const ai_ptr_offset w_stride_in,
|
||||
const ai_ptr_offset h_stride_out,
|
||||
const ai_ptr_offset w_stride_out,
|
||||
const ai_i16 pads_x,
|
||||
const ai_i16 pads_y,
|
||||
const ai_i16 pads_y_b,
|
||||
const ai_ptr_offset h_stride_pad,
|
||||
const ai_ptr_offset w_stride_pad,
|
||||
const ai_ptr_offset w_stride_pad_r);
|
||||
|
||||
#endif /* LITE_PAD_GENERIC_H */
|
||||
69
lib/stai/libstai/include/lite_pool_f32.h
Normal file
69
lib/stai/libstai/include/lite_pool_f32.h
Normal file
@ -0,0 +1,69 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file lite_maxpool_dqnn.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform lite maxpool kernel datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2021 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LITE_POOL_F32_H
|
||||
#define LITE_POOL_F32_H
|
||||
|
||||
#include "ai_lite_interface.h"
|
||||
|
||||
|
||||
#define FUNC_POOL(handle) \
|
||||
((func_pool)(handle))
|
||||
|
||||
|
||||
/*!
|
||||
* @typedef (*func_pool)
|
||||
* @ingroup layers_pool
|
||||
* @brief Fuction pointer for generic pooling transform
|
||||
* this function pointer abstracts a generic pooling layer.
|
||||
* see @ref pool_func_ap_array_f32 as examples
|
||||
*/
|
||||
typedef void (*func_pool)(ai_float* in,
|
||||
const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y,
|
||||
const ai_u16 padding_x, const ai_u16 padding_y,
|
||||
const ai_u16 stride_x, const ai_u16 stride_y,
|
||||
const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y,
|
||||
ai_float* out);
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
/** Conv2d Functions Section **/
|
||||
/******************************************************************************/
|
||||
|
||||
AI_INTERNAL_API
|
||||
void pool_func_mp_array_f32(ai_float* pData_in,
|
||||
const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y,
|
||||
const ai_u16 padding_x, const ai_u16 padding_y,
|
||||
const ai_u16 stride_x, const ai_u16 stride_y,
|
||||
const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y,
|
||||
ai_float* pData_out);
|
||||
|
||||
AI_INTERNAL_API
|
||||
void pool_func_ap_array_f32(ai_float *pData_in,
|
||||
const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y,
|
||||
const ai_u16 padding_x, const ai_u16 padding_y,
|
||||
const ai_u16 stride_x, const ai_u16 stride_y,
|
||||
const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y,
|
||||
ai_float *pData_out);
|
||||
|
||||
#endif // LITE_POOL_F32_H_
|
||||
62
lib/stai/libstai/include/lite_pool_is8os8.h
Normal file
62
lib/stai/libstai/include/lite_pool_is8os8.h
Normal file
@ -0,0 +1,62 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file lite_pool_is8os8.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform lite integer pooling function
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2022 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LITE_POOL_IS8OS8
|
||||
#define LITE_POOL_IS8OS8
|
||||
|
||||
#include "ai_lite_interface.h"
|
||||
|
||||
/**
|
||||
* @brief lite function for average pooling.
|
||||
* @ingroup lite_nl_generic_integer
|
||||
* @param input The pointer to input buffer.
|
||||
* @param output The pointer to output buffer.
|
||||
* @param[in] dim_im_in_x dimension of the input width
|
||||
* @param[in] dim_im_in_y dimension of the input height
|
||||
* @param[in] ch_im_in number of the input channel
|
||||
* @param[in] dim_kernel_x dimension of the kernel width
|
||||
* @param[in] dim_kernel_y dimension of the kernel height
|
||||
* @param[in] padding_x first dimension of the padding
|
||||
* @param[in] padding_y second dimension of the padding
|
||||
* @param[in] stride_x first dimension of the stride
|
||||
* @param[in] stride_y second dimension of the stride
|
||||
* @param[in] dim_im_out_x dimension of the output width
|
||||
* @param[in] dim_im_out_y dimension of the output height
|
||||
* @param[in] in_scale input scale
|
||||
* @param[in] in_zeropoint input zero point
|
||||
* @param[in] out_scale output scale
|
||||
* @param[in] out_zeropoint output zero point
|
||||
*/
|
||||
void forward_lite_avepool_is8os8( const ai_i8 *pData_in,
|
||||
ai_i8* pData_out,
|
||||
const ai_u16 dim_im_in_x,
|
||||
const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_u16 dim_kernel_x,
|
||||
const ai_u16 dim_kernel_y,
|
||||
const ai_u16 padding_x,
|
||||
const ai_u16 padding_y,
|
||||
const ai_u16 stride_x,
|
||||
const ai_u16 stride_y,
|
||||
const ai_u16 dim_im_out_x,
|
||||
const ai_u16 dim_im_out_y,
|
||||
const ai_float in_scale,
|
||||
const ai_i8 in_zeropoint,
|
||||
const ai_float out_scale,
|
||||
const ai_i8 out_zeropoint);
|
||||
#endif /* LITE_POOL_IS8OS8 */
|
||||
|
||||
117
lib/stai/libstai/include/lite_pw.h
Normal file
117
lib/stai/libstai/include/lite_pw.h
Normal file
@ -0,0 +1,117 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file lite_pw.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform lite pointwise kernel datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2021 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LITE_PW_H
|
||||
#define LITE_PW_H
|
||||
|
||||
|
||||
#include "ai_lite_interface.h"
|
||||
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/******************************************************************************/
|
||||
|
||||
/*!
|
||||
* @brief Handles pw convolutions generic case
|
||||
* @ingroup lite_pw
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void
|
||||
forward_lite_pw_sssa8_ch(const ai_i8 *pData_in,
|
||||
const ai_u16 width_in,
|
||||
const ai_u16 height_in,
|
||||
const ai_u16 filt_stride_x,
|
||||
const ai_u16 filt_stride_y,
|
||||
const ai_u16 n_channel_in,
|
||||
const ai_i8 *pWeights,
|
||||
const ai_u16 n_channel_out,
|
||||
const ai_i32 *pBias,
|
||||
const ai_i8 in_zeropoint,
|
||||
const ai_i8 out_zeropoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
const ai_layer_format_type out_ch_format,
|
||||
ai_i8 *pData_out,
|
||||
ai_u16 weights_prefetch_enabled,
|
||||
ai_i32 scratch_size,
|
||||
ai_i16 *pBuffer_a);
|
||||
|
||||
void
|
||||
forward_lite_pw_hsp_sssa8_ch(const ai_i8 *pData_in,
|
||||
const ai_u16 width_in,
|
||||
const ai_u16 height_in,
|
||||
const ai_u16 filt_stride_x,
|
||||
const ai_u16 filt_stride_y,
|
||||
const ai_u16 n_channel_in,
|
||||
const ai_i8 *pWeights,
|
||||
const ai_u16 n_channel_out,
|
||||
const ai_i32 *pBias,
|
||||
const ai_i8 in_zeropoint,
|
||||
const ai_i8 out_zeropoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
const ai_layer_format_type out_ch_format,
|
||||
ai_i8 *pData_out,
|
||||
ai_i32 scratch_size,
|
||||
ai_i16 *pBuffer_a);
|
||||
|
||||
void
|
||||
forward_lite_pw_hsp_1step_sssa8_ch(const ai_i8 *pData_in,
|
||||
const ai_u16 width_in,
|
||||
const ai_u16 height_in,
|
||||
const ai_u16 filt_stride_x,
|
||||
const ai_u16 filt_stride_y,
|
||||
const ai_u16 n_channel_in,
|
||||
const ai_i8 *pWeights,
|
||||
const ai_u16 n_channel_out,
|
||||
const ai_i32 *pBias,
|
||||
const ai_i8 in_zeropoint,
|
||||
const ai_i8 out_zeropoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
const ai_layer_format_type out_ch_format,
|
||||
ai_i8 *pData_out,
|
||||
ai_i32 scratch_size,
|
||||
ai_i16 *pBuffer_a);
|
||||
|
||||
|
||||
void
|
||||
forward_lite_pw_hsp_3step_sssa8_ch(const ai_i8 *pData_in,
|
||||
const ai_u16 width_in,
|
||||
const ai_u16 height_in,
|
||||
const ai_u16 filt_stride_x,
|
||||
const ai_u16 filt_stride_y,
|
||||
const ai_u16 n_channel_in,
|
||||
const ai_i8 *pWeights,
|
||||
const ai_u16 n_channel_out,
|
||||
const ai_i32 *pBias,
|
||||
const ai_i8 in_zeropoint,
|
||||
const ai_i8 out_zeropoint,
|
||||
const ai_float in_scale,
|
||||
const ai_float out_scale,
|
||||
const ai_float *pWt_scale,
|
||||
const ai_layer_format_type out_ch_format,
|
||||
ai_i8 *pData_out,
|
||||
ai_i32 scratch_size,
|
||||
ai_i16 *pBuffer_a);
|
||||
|
||||
#endif /*LITE_PW_H*/
|
||||
127
lib/stai/libstai/include/lite_pw_dqnn.h
Normal file
127
lib/stai/libstai/include/lite_pw_dqnn.h
Normal file
@ -0,0 +1,127 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file lite_pw_dqnn.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform lite dqnn pointwise kernel datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2021 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LITE_PW_DQNN_H
|
||||
#define LITE_PW_DQNN_H
|
||||
|
||||
|
||||
#include "ai_lite_interface.h"
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/******************************************************************************/
|
||||
|
||||
/*!
|
||||
* @brief Handles point wise convolution with binary input, binary output and
|
||||
* binary weights - Lite API version
|
||||
* @ingroup lite_pw_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_pw_is1os1ws1_bn(const ai_u32 *pDataIn_init,
|
||||
ai_u32 *pDataOut_init,
|
||||
const ai_u32 *pWeights_init,
|
||||
const ai_u32 n_channel_in,
|
||||
const ai_u32 n_channel_out,
|
||||
const ai_i32 width_out,
|
||||
const ai_i32 height_out,
|
||||
const ai_i32 *pThreshold);
|
||||
|
||||
/*!
|
||||
* @brief Handles point wise convolution with binary input, binary output and
|
||||
* binary weights - Lite API version - Optimized thanks to Optim2
|
||||
* assumptions
|
||||
* @ingroup lite_pw_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_pw_is1os1ws1_bn_optim2(const ai_u32 *pDataIn_init,
|
||||
ai_u32 *pDataOut_init,
|
||||
const ai_u32 *pWeights_init,
|
||||
const ai_u32 n_channel_in,
|
||||
const ai_u32 n_channel_out,
|
||||
const ai_i32 width_out,
|
||||
const ai_i32 height_out,
|
||||
const ai_i32 *pThreshold);
|
||||
|
||||
/*!
|
||||
* @brief Handles point wise convolution with binary input, 8-bits output and
|
||||
* binary weights - Lite API version
|
||||
* @ingroup lite_pw_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_pw_is1os8ws1_bn(const ai_u32 *pDataIn_init,
|
||||
ai_i8 *pDataOut_init,
|
||||
const ai_u32 *pWeights_init,
|
||||
const ai_u32 n_channel_in,
|
||||
const ai_u32 n_channel_out,
|
||||
const ai_i32 width_out,
|
||||
const ai_i32 height_out,
|
||||
const ai_float *pScale,
|
||||
const ai_float *pOffset);
|
||||
|
||||
/*!
|
||||
* @brief Handles point wise convolution with binary input, 8-bits output and
|
||||
* binary weights - Lite API version - Optimized thanks to Optim1
|
||||
* assumptions
|
||||
* @ingroup lite_pw_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_pw_is1os8ws1_bn_optim1(const ai_u32 *pDataIn_init,
|
||||
ai_i8 *pDataOut_init,
|
||||
const ai_u32 *pWeights_init,
|
||||
const ai_u32 n_channel_in,
|
||||
const ai_u32 n_channel_out,
|
||||
const ai_i32 width_out,
|
||||
const ai_i32 height_out,
|
||||
const ai_float *pScale,
|
||||
const ai_float *pOffset);
|
||||
|
||||
/*!
|
||||
* @brief Handles point-wise convolution with binary input, float32 output
|
||||
* and binary weights - Lite API version
|
||||
* @ingroup lite_pw_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_pw_is1of32ws1_bn(const ai_u32 *pDataIn_init,
|
||||
ai_float *pDataOut_init,
|
||||
const ai_u32 *pWeights_init,
|
||||
const ai_u32 n_channel_in,
|
||||
const ai_u32 n_channel_out,
|
||||
const ai_i32 width_out,
|
||||
const ai_i32 height_out,
|
||||
const ai_float *pScale,
|
||||
const ai_float *pOffset);
|
||||
|
||||
/*!
|
||||
* @brief Handles point-wise convolution with binary input, float32 output
|
||||
* and binary weights - Lite API version - Optimized thanks to Optim1
|
||||
* assumptions
|
||||
* @ingroup lite_pw_dqnn
|
||||
*/
|
||||
LITE_API_ENTRY
|
||||
void forward_lite_pw_is1of32ws1_bn_optim1(const ai_u32 *pDataIn_init,
|
||||
ai_float *pDataOut_init,
|
||||
const ai_u32 *pWeights_init,
|
||||
const ai_u32 n_channel_in,
|
||||
const ai_u32 n_channel_out,
|
||||
const ai_i32 width_out,
|
||||
const ai_i32 height_out,
|
||||
const ai_float *pScale,
|
||||
const ai_float *pOffset);
|
||||
|
||||
|
||||
#endif /*LITE_PW_DQNN_H*/
|
||||
79
lib/stai/libstai/include/lite_resize.h
Normal file
79
lib/stai/libstai/include/lite_resize.h
Normal file
@ -0,0 +1,79 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file lite_resize.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform lite resize kernel datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2023 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LITE_RESIZE_H
|
||||
#define LITE_RESIZE_H
|
||||
#pragma once
|
||||
|
||||
#include "ai_lite_interface.h"
|
||||
|
||||
void forward_lite_resize_nearest(ai_ptr in_data,
|
||||
ai_ptr out_data,
|
||||
const ai_size width_in,
|
||||
const ai_size height_in,
|
||||
const ai_size n_channel_in,
|
||||
const ai_ptr_offset stride_ch,
|
||||
const ai_float width_scale,
|
||||
const ai_float height_scale,
|
||||
const ai_size width_out,
|
||||
const ai_size height_out,
|
||||
const ai_nearest_mode mode,
|
||||
const ai_coord_transf_mode coord_transf_mode,
|
||||
const ai_handle extrapol_val,
|
||||
const ai_float* roi);
|
||||
|
||||
void forward_lite_resize_bilinear_if32of32( const ai_float* in_data,
|
||||
ai_float* out_data,
|
||||
const ai_size width_in,
|
||||
const ai_size height_in,
|
||||
const ai_size n_channel_in,
|
||||
const ai_float width_scale,
|
||||
const ai_float height_scale,
|
||||
const ai_size width_out,
|
||||
const ai_size height_out,
|
||||
const ai_coord_transf_mode coord_transf_mode,
|
||||
const ai_handle extrapol_val,
|
||||
const ai_float* roi);
|
||||
|
||||
void forward_lite_resize_bilinear_is8os8( const ai_i8* in_data,
|
||||
ai_i8* out_data,
|
||||
const ai_size width_in,
|
||||
const ai_size height_in,
|
||||
const ai_size n_channel_in,
|
||||
const ai_float width_scale,
|
||||
const ai_float height_scale,
|
||||
const ai_size width_out,
|
||||
const ai_size height_out,
|
||||
const ai_coord_transf_mode coord_transf_mode,
|
||||
const ai_handle extrapol_val,
|
||||
const ai_float* roi);
|
||||
|
||||
void forward_lite_resize_bilinear_is16os16( const ai_i16* in_data,
|
||||
ai_i16* out_data,
|
||||
const ai_size width_in,
|
||||
const ai_size height_in,
|
||||
const ai_size n_channel_in,
|
||||
const ai_float width_scale,
|
||||
const ai_float height_scale,
|
||||
const ai_size width_out,
|
||||
const ai_size height_out,
|
||||
const ai_coord_transf_mode coord_transf_mode,
|
||||
const ai_handle extrapol_val,
|
||||
const ai_float* roi);
|
||||
|
||||
#endif /*LITE_RESIZE__H*/
|
||||
239
lib/stai/libstai/include/lite_upsample.h
Normal file
239
lib/stai/libstai/include/lite_upsample.h
Normal file
@ -0,0 +1,239 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file lite_upsample.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform lite upsample kernel datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2021 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LITE_UPSAMPLE_H
|
||||
#define LITE_UPSAMPLE_H
|
||||
#pragma once
|
||||
|
||||
#include "ai_lite_interface.h"
|
||||
|
||||
|
||||
/**
|
||||
* @brief Function to upsample in bilinear mode.
|
||||
* The number of output channels is the same as the number of input channels.
|
||||
* Input and output types are float.
|
||||
*
|
||||
* @param[in] in_data input data, to be upsampled
|
||||
* @param[out] out_data upsampled output data
|
||||
* @param[in] width_in input data width
|
||||
* @param[in] height_in input data height
|
||||
* @param[in] width_scale width_out/width_in scale ratio
|
||||
* @param[in] height_scale height_out/height_in scale ratio
|
||||
* @param[in] width_out output data width
|
||||
* @param[in] height_out output data height
|
||||
* @param[in] channel_in input/output channels.
|
||||
* @param[in] center centered coordinates
|
||||
*/
|
||||
void forward_lite_upsample_bilinear_if32of32(const ai_float* in_data,
|
||||
ai_float* out_data,
|
||||
const ai_size width_in,
|
||||
const ai_size height_in,
|
||||
const ai_float width_scale,
|
||||
const ai_float height_scale,
|
||||
const ai_size width_out,
|
||||
const ai_size height_out,
|
||||
const ai_size n_channel,
|
||||
const ai_bool center);
|
||||
|
||||
/**
|
||||
* @brief Function to upsample in bilinear mode.
|
||||
* The number of output channels is the same as the number of input channels.
|
||||
* Input and output types are signed int8.
|
||||
*
|
||||
* @param[in] in_data input data, to be upsampled
|
||||
* @param[out] out_data upsampled output data
|
||||
* @param[in] width_in input data width
|
||||
* @param[in] height_in input data height
|
||||
* @param[in] width_scale width_out/width_in scale ratio
|
||||
* @param[in] height_scale height_out/height_in scale ratio
|
||||
* @param[in] width_out output data width
|
||||
* @param[in] height_out output data height
|
||||
* @param[in] channel_in input/output channels.
|
||||
* @param[in] center centered coordinates
|
||||
*/
|
||||
void forward_lite_upsample_bilinear_is8os8(const ai_i8* in_data,
|
||||
ai_i8* out_data,
|
||||
const ai_size width_in,
|
||||
const ai_size height_in,
|
||||
const ai_float width_scale,
|
||||
const ai_float height_scale,
|
||||
const ai_size width_out,
|
||||
const ai_size height_out,
|
||||
const ai_size n_channel,
|
||||
const ai_bool center);
|
||||
|
||||
/**
|
||||
* @brief Function to upsample in bilinear mode.
|
||||
* The number of output channels is the same as the number of input channels.
|
||||
* Input and output types are unsinged int8.
|
||||
*
|
||||
* @param[in] in_data input data, to be upsampled
|
||||
* @param[out] out_data upsampled output data
|
||||
* @param[in] width_in input data width
|
||||
* @param[in] height_in input data height
|
||||
* @param[in] width_scale width_out/width_in scale ratio
|
||||
* @param[in] height_scale height_out/height_in scale ratio
|
||||
* @param[in] width_out output data width
|
||||
* @param[in] height_out output data height
|
||||
* @param[in] channel_in input/output channels.
|
||||
* @param[in] center centered coordinates
|
||||
*/
|
||||
void forward_lite_upsample_bilinear_iu8ou8(const ai_u8* in_data,
|
||||
ai_u8* out_data,
|
||||
const ai_size width_in,
|
||||
const ai_size height_in,
|
||||
const ai_float width_scale,
|
||||
const ai_float height_scale,
|
||||
const ai_size width_out,
|
||||
const ai_size height_out,
|
||||
const ai_size n_channel,
|
||||
const ai_bool center);
|
||||
|
||||
/**
|
||||
* @brief Function to upsample in bilinear mode.
|
||||
* The number of output channels is the same as the number of input channels.
|
||||
* Input and output types are signed int16.
|
||||
*
|
||||
* @param[in] in_data input data, to be upsampled
|
||||
* @param[out] out_data upsampled output data
|
||||
* @param[in] width_in input data width
|
||||
* @param[in] height_in input data height
|
||||
* @param[in] width_scale width_out/width_in scale ratio
|
||||
* @param[in] height_scale height_out/height_in scale ratio
|
||||
* @param[in] width_out output data width
|
||||
* @param[in] height_out output data height
|
||||
* @param[in] center centered coordinates
|
||||
* @param[in] channel_in input/output channels.
|
||||
*/
|
||||
void forward_lite_upsample_bilinear_is16os16(const ai_i16* in_data,
|
||||
ai_i16* out_data,
|
||||
const ai_size width_in,
|
||||
const ai_size height_in,
|
||||
const ai_float width_scale,
|
||||
const ai_float height_scale,
|
||||
const ai_size width_out,
|
||||
const ai_size height_out,
|
||||
const ai_size n_channel,
|
||||
const ai_bool center);
|
||||
|
||||
/**
|
||||
* @brief Function to upsample in bilinear mode.
|
||||
* The number of output channels is the same as the number of input channels.
|
||||
* Input and output types are unsigned int16.
|
||||
*
|
||||
* @param[in] in_data input data, to be upsampled
|
||||
* @param[out] out_data upsampled output data
|
||||
* @param[in] width_in input data width
|
||||
* @param[in] height_in input data height
|
||||
* @param[in] width_scale width_out/width_in scale ratio
|
||||
* @param[in] height_scale height_out/height_in scale ratio
|
||||
* @param[in] width_out output data width
|
||||
* @param[in] height_out output data height
|
||||
* @param[in] channel_in input/output channels.
|
||||
* @param[in] center centered coordinates
|
||||
*/
|
||||
void forward_lite_upsample_bilinear_iu16ou16(const ai_u16* in_data,
|
||||
ai_u16* out_data,
|
||||
const ai_size width_in,
|
||||
const ai_size height_in,
|
||||
const ai_float width_scale,
|
||||
const ai_float height_scale,
|
||||
const ai_size width_out,
|
||||
const ai_size height_out,
|
||||
const ai_size n_channel,
|
||||
const ai_bool center);
|
||||
|
||||
/**
|
||||
* @brief Function to upsample in zero mode.
|
||||
* The number of output channels is the same as the number of input channels.
|
||||
* Input and output types are singed int8.
|
||||
*
|
||||
* @param[in] in_data input data, to be upsampled
|
||||
* @param[out] out_data upsampled output data
|
||||
* @param[in] width_in input data width
|
||||
* @param[in] height_in input data height
|
||||
* @param[in] width_scale width_out/width_in scale ratio
|
||||
* @param[in] height_scale height_out/height_in scale ratio
|
||||
* @param[in] width_out output data width
|
||||
* @param[in] height_out output data height
|
||||
* @param[in] channel_in input/output channels.
|
||||
* @param[in] zero_s8 out zeropoint value
|
||||
*/
|
||||
void forward_lite_upsample_zeros_is8os8( const ai_i8 *in_data,
|
||||
ai_i8 *out_data,
|
||||
const ai_size width_in,
|
||||
const ai_size height_in,
|
||||
const ai_float width_scale,
|
||||
const ai_float height_scale,
|
||||
const ai_size width_out,
|
||||
const ai_size height_out,
|
||||
const ai_size channel_in,
|
||||
const ai_i8 zero_s8);
|
||||
|
||||
/**
|
||||
* @brief Function to upsample in zero mode.
|
||||
* The number of output channels is the same as the number of input channels.
|
||||
* Input and output types are signed int16.
|
||||
*
|
||||
* @param[in] in_data input data, to be upsampled
|
||||
* @param[out] out_data upsampled output data
|
||||
* @param[in] width_in input data width
|
||||
* @param[in] height_in input data height
|
||||
* @param[in] width_scale width_out/width_in scale ratio
|
||||
* @param[in] height_scale height_out/height_in scale ratio
|
||||
* @param[in] width_out output data width
|
||||
* @param[in] height_out output data height
|
||||
* @param[in] channel_in input/output channels.
|
||||
* @param[in] zero_s16 out zeropoint value
|
||||
*/
|
||||
void forward_lite_upsample_zeros_is16os16( const ai_i16 *in_data,
|
||||
ai_i16 *out_data,
|
||||
const ai_size width_in,
|
||||
const ai_size height_in,
|
||||
const ai_float width_scale,
|
||||
const ai_float height_scale,
|
||||
const ai_size channel_in,
|
||||
const ai_size width_out,
|
||||
const ai_size height_out,
|
||||
const ai_i16 zero_s16);
|
||||
|
||||
/**
|
||||
* @brief Function to upsample in zero mode.
|
||||
* The number of output channels is the same as the number of input channels.
|
||||
* Input and output types are float.
|
||||
*
|
||||
* @param[in] in_data input data, to be upsampled
|
||||
* @param[out] out_data upsampled output data
|
||||
* @param[in] width_in input data width
|
||||
* @param[in] height_in input data height
|
||||
* @param[in] width_scale width_out/width_in scale ratio
|
||||
* @param[in] height_scale height_out/height_in scale ratio
|
||||
* @param[in] width_out output data width
|
||||
* @param[in] height_out output data height
|
||||
* @param[in] channel_in input/output channels.
|
||||
*/
|
||||
void forward_lite_upsample_zeros_if32of32( const ai_float *in_data,
|
||||
ai_float *out_data,
|
||||
const ai_size width_in,
|
||||
const ai_size height_in,
|
||||
const ai_float width_scale,
|
||||
const ai_float height_scale,
|
||||
const ai_size channel_in,
|
||||
const ai_size width_out,
|
||||
const ai_size height_out);
|
||||
|
||||
#endif /*LITE_UPSAMPLE__H*/
|
||||
58
lib/stai/libstai/include/lite_upsample_generic.h
Normal file
58
lib/stai/libstai/include/lite_upsample_generic.h
Normal file
@ -0,0 +1,58 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file lite_upsample.h
|
||||
* @author AIS
|
||||
* @brief header file of AI platform lite upsample kernel datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2021 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef LITE_UPSAMPLE_GENERIC_H
|
||||
#define LITE_UPSAMPLE_GENERIC_H
|
||||
|
||||
|
||||
#include "ai_lite_interface.h"
|
||||
|
||||
|
||||
void forward_lite_upsample_generic_nearest(const ai_u8* in_data,
|
||||
ai_u8* out_data,
|
||||
const ai_size width_in,
|
||||
const ai_size width_out,
|
||||
const ai_float width_scale,
|
||||
const ai_size height_out,
|
||||
const ai_float height_scale,
|
||||
const ai_u32 output_tensor_w_stride,
|
||||
const ai_float offset_round_coeff);
|
||||
|
||||
void forward_lite_upsample_nearest(ai_ptr in_data,
|
||||
ai_ptr out_data,
|
||||
const ai_size width_in,
|
||||
const ai_size height_in,
|
||||
const ai_float width_scale,
|
||||
const ai_float height_scale,
|
||||
const ai_size width_out,
|
||||
const ai_size height_out,
|
||||
const ai_ptr_offset stride_w,
|
||||
const ai_float offset_round_coeff);
|
||||
|
||||
void forward_lite_upsample_zeros( ai_ptr in_data,
|
||||
ai_ptr out_data,
|
||||
const ai_size width_in,
|
||||
const ai_size height_in,
|
||||
const ai_float width_scale,
|
||||
const ai_float height_scale,
|
||||
const ai_size width_out,
|
||||
const ai_size height_out,
|
||||
const ai_ptr_offset stride_ch,
|
||||
const ai_ptr_offset stride_w,
|
||||
const ai_handle p_zero_value);
|
||||
|
||||
#endif /*LITE_UPSAMPLE_GENERIC_H*/
|
||||
799
lib/stai/libstai/include/ll_aton.h
Normal file
799
lib/stai/libstai/include/ll_aton.h
Normal file
@ -0,0 +1,799 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file ll_aton.h
|
||||
* @author SRA Artificial Intelligence & Embedded Architectures
|
||||
* @brief Header file of ATON LL module.
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2024 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef __LL_ATON_H
|
||||
#define __LL_ATON_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif
|
||||
|
||||
#include <assert.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "ll_aton_attributes.h"
|
||||
#include "ll_aton_config.h"
|
||||
|
||||
#if (LL_ATON_PLATFORM != LL_ATON_PLAT_EC_TRACE)
|
||||
#include "ll_aton_osal.h"
|
||||
#include "ll_aton_platform.h"
|
||||
#endif // LL_ATON_PLATFORM != LL_ATON_PLAT_EC_TRACE
|
||||
|
||||
/** @defgroup ATON_LL ATON_LL_Driver
|
||||
* @{
|
||||
*/
|
||||
|
||||
/* LL ATON error codes */
|
||||
#define LL_ATON_OK (0)
|
||||
#define LL_ATON_INVALID_ID (-1)
|
||||
#define LL_ATON_INVALID_PARAM (-2)
|
||||
#define LL_ATON_TIMEOUT (-3)
|
||||
|
||||
/* this is needed to avoid some compilers (e.g. KEIL) that observe a strict semantic about conversion of
|
||||
* pointers to integers in const initializers
|
||||
*/
|
||||
typedef union
|
||||
{
|
||||
unsigned char *p;
|
||||
uintptr_t i;
|
||||
} ll_aton_pointer;
|
||||
|
||||
/* Method that translates an address from physical to virtual */
|
||||
unsigned char *LL_Address_Physical2Virtual(unsigned char *address);
|
||||
|
||||
/* Method that translates an address from virtual to physical */
|
||||
unsigned char *LL_Address_Virtual2Physical(unsigned char *address);
|
||||
|
||||
/**
|
||||
* @brief ATON User Configuration macros
|
||||
*/
|
||||
/* Set beyond macro to 1 if you want to enable the generation of ATON event interrupts */
|
||||
#ifndef LL_ATON_EN_EVENT_IRQ
|
||||
#define LL_ATON_EN_EVENT_IRQ 1
|
||||
#endif
|
||||
|
||||
/* Set beyond macro to 1 if you want to enable the generation of ATON configuration error interrupts */
|
||||
#ifndef LL_ATON_EN_ERROR_IRQ
|
||||
#define LL_ATON_EN_ERROR_IRQ 1
|
||||
#endif
|
||||
|
||||
/** @defgroup ATON_INIT ATON Global initialization/deinitialization functions
|
||||
* @{
|
||||
*/
|
||||
int LL_ATON_Init(void);
|
||||
int LL_ATON_DeInit(void);
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief List of acceleration units types
|
||||
*/
|
||||
enum AccelUnitsType
|
||||
{
|
||||
STRENG = 0,
|
||||
STRENG64,
|
||||
CONVACC,
|
||||
DECUN,
|
||||
ACTIV,
|
||||
ARITH,
|
||||
POOL,
|
||||
IMC,
|
||||
RECBUF,
|
||||
};
|
||||
|
||||
typedef struct
|
||||
{
|
||||
enum AccelUnitsType unit_type;
|
||||
unsigned short unit_num;
|
||||
} AccelUnits;
|
||||
|
||||
/**
|
||||
* @brief Converts an Aton unit cardinal group id into a global one
|
||||
* @param type enum specifying the unit group
|
||||
* @param id Cardinal id of the unit in the group
|
||||
* @retval ATON Unit index
|
||||
* @todo Add boundary checks
|
||||
*/
|
||||
static inline AccelUnits LL_ATON_GetUnit_From_Cardinal_ID(enum AccelUnitsType type, int id)
|
||||
{
|
||||
return (AccelUnits){type, (unsigned short)id};
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Activation Unit function types
|
||||
*/
|
||||
typedef enum
|
||||
{
|
||||
ACTIV_RELU = 1,
|
||||
ACTIV_PRELU,
|
||||
ACTIV_TRELU,
|
||||
ACTIV_FUNC,
|
||||
ACTIV_LUT
|
||||
} LL_Activacc_Op;
|
||||
|
||||
/**
|
||||
* @brief Activation unit Accelerator configuration structure
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
unsigned rounding_f : 1; /**< Input feature data rounding control: 1=enable,0=disable */
|
||||
unsigned saturation_f : 1; /**< Input feature data saturation control: 1=enable,0=disable */
|
||||
unsigned round_mode_f : 2; /**< Input feature round mode */
|
||||
unsigned inbytes_f : 2; /**< Input data width in bytes. Valid values are 1, 2 or 3 bytes */
|
||||
unsigned outbytes_f : 2; /**< Input feature output bytes after shift. Valid values are 1 or 2 bytes */
|
||||
unsigned rounding_o : 1; /**< Output rounding control, 1=enable,0=disable */
|
||||
unsigned saturation_o : 1; /**< Output saturation control,1=enable,0=disable */
|
||||
unsigned round_mode_o : 1; /**< Output rounding mode */
|
||||
unsigned relu_mode_o : 1; /**< Apply Relu operation before rounding */
|
||||
unsigned outbytes_o : 2; /**< Number of output bytes: 1, 2 or 3 */
|
||||
unsigned signedop : 1; /**< Signed/unsigned activations: 0: unsigned activations, 1 signed */
|
||||
unsigned char shift_f; /**< Input feature data shift. Negative values represent left shifts */
|
||||
unsigned char shift_o; /**< Optional right shift to be applied to the function evaluator final result */
|
||||
unsigned parameter; /**< ReLU parameter */
|
||||
unsigned parameter_2; /**< Zero offset for TRELU operation for use in scale/offset integer arithmetic.
|
||||
* Needs zp alignment */
|
||||
unsigned nbytes; /**< Number of bytes of input data */
|
||||
ll_aton_pointer ROM0_vector; /**< Address of ROM0 coefficients table */
|
||||
ll_aton_pointer ROM1_vector; /**< Address of ROM1 coefficients table */
|
||||
ll_aton_pointer LUT_vector; /**< Address of LUT coefficients table */
|
||||
unsigned ROM0_nbytes; /**< Length of ROM0 table */
|
||||
unsigned ROM1_nbytes; /**< Length of ROM1 table */
|
||||
unsigned char shift_b; /**< Optional left shift to be applied to coefficient B */
|
||||
unsigned char shift_c; /**< Optional left shift to be applied to coefficient C */
|
||||
unsigned char shift_norm; /**< Function input range normalization left shift parameter */
|
||||
unsigned char bwidth; /**< Number of MSB bits of the input activation to be used to address ROM0.
|
||||
* This field configures the number of outer segments (max outer segments = 32).
|
||||
* Valid values range = 0,1,2,3,4 and 5 corresponding to 1,2,4,8,16 and 32 outer segment(s)
|
||||
* respectively */
|
||||
int fsub; /**< Feature data subtract value */
|
||||
LL_Activacc_Op operation; /**< Activation type. See LL_Activacc_Op */
|
||||
} LL_Activacc_InitTypeDef;
|
||||
|
||||
/** @defgroup LL_ACTIV Activation unit configuration functions
|
||||
* @{
|
||||
*/
|
||||
int LL_Activacc_Init(int id, const LL_Activacc_InitTypeDef *Activacc_InitStruct);
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Arithmetic unit operations
|
||||
*/
|
||||
typedef enum
|
||||
{
|
||||
ARITH_AFFINE = 1,
|
||||
ARITH_MIN,
|
||||
ARITH_MAX,
|
||||
ARITH_MUL,
|
||||
ARITH_X_AND_Y,
|
||||
ARITH_X_OR_Y,
|
||||
ARITH_NOT_X,
|
||||
ARITH_X_XOR_Y,
|
||||
ARITH_X_EQ_Y,
|
||||
ARITH_X_LT_Y,
|
||||
ARITH_X_LE_Y,
|
||||
ARITH_X_GT_Y,
|
||||
ARITH_X_GE_Y,
|
||||
ARITH_ABS_X,
|
||||
ARITH_SIGN_X,
|
||||
ARITH_CLIP
|
||||
} LL_Arithacc_Op;
|
||||
|
||||
/**
|
||||
* @brief Arithmetic constant broadcast modes
|
||||
*/
|
||||
typedef enum
|
||||
{
|
||||
ARITH_BCAST_NONE,
|
||||
ARITH_BCAST_CHAN,
|
||||
ARITH_BCAST_HEIGHT,
|
||||
ARITH_BCAST_WIDTH,
|
||||
ARITH_BCAST_HEIGHT_WIDTH,
|
||||
ARITH_BCAST_SCALAR,
|
||||
} LL_Arithacc_Bcast;
|
||||
|
||||
/**
|
||||
* @brief Arithmetic unit Accelerator configuration structure
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
unsigned rounding_x : 1; /**< Input feature data rounding for stream X */
|
||||
unsigned saturation_x : 1; /**< Input feature data saturation for stream X */
|
||||
unsigned round_mode_x : 2; /**< Input feature data rounding mode for stream X */
|
||||
unsigned inbytes_x : 2; /**< Input data width in bytes for stream X. Valid values are 1, 2 or 3 bytes */
|
||||
unsigned outbytes_x : 2; /**< Number of output bytes to use for input feature data of stream X after rounding or
|
||||
* saturation. Valid values are 1 or 2 bytes */
|
||||
signed char shift_x; /**< Input feature data shift for stream X. Use negative values for left shift */
|
||||
unsigned rounding_y : 1; /**< Input feature data rounding for stream Y */
|
||||
unsigned saturation_y : 1; /**< Input feature data saturation for stream Y */
|
||||
unsigned round_mode_y : 2; /**< Input feature data rounding mode for stream Y */
|
||||
unsigned inbytes_y : 2; /**< Input data width in bytes for stream Y. Valid values are 1, 2 or 3 bytes */
|
||||
unsigned outbytes_y : 2; /**< Number of output bytes to use for input feature data of stream Y after rounding or
|
||||
* saturation. Valid values are 1 or 2 bytes */
|
||||
unsigned combinebc : 1; /**< Combine coeff B and C to form a 32b coeff BC = {B[15:0],C[15:0]} */
|
||||
unsigned clipout : 1; /**< Controls output clipping to range specified by clip range configuration, 1=enable,
|
||||
* 0=disable */
|
||||
signed char shift_y; /**< Input feature data shift for stream Y. Use negative values for left shift */
|
||||
unsigned rounding_o : 1; /**< Rounding control, 1=enable, 0=disable */
|
||||
unsigned saturation_o : 1; /**< Saturation control, 1=enable, 0=disable */
|
||||
unsigned round_mode_o : 1; /**< Otput rounding mode control */
|
||||
unsigned relu_mode_o : 1; /**< Apply Relu operation before rounding */
|
||||
unsigned outbytes_o : 2; /**< Number of output bytes: 1 or 2 */
|
||||
unsigned char shift_o; /**< Optional right shift to apply to final result of operation */
|
||||
unsigned scalar : 1; /**< Set Scalar/Vector mode */
|
||||
unsigned dualinput : 1; /**< Dual input control, 1=both X,Y streams valid, 0=only X stream is valid */
|
||||
LL_Arithacc_Op operation; /**< Arithmetic operation to be applied. See LL_Arithacc_Op */
|
||||
LL_Arithacc_Bcast bcast; /**< Set constant broadcast modes. See LL_Arithacc_Bcast */
|
||||
unsigned char Ax_shift; /**< Optional right shift to result of Ax */
|
||||
unsigned char By_shift; /**< Optional right shift to result of By */
|
||||
unsigned char C_shift; /**< Optional left shift to apply to C */
|
||||
unsigned fWidth; /**< Feature width */
|
||||
unsigned fHeight; /**< Feature height */
|
||||
unsigned short fChannels; /**< Number of feature channels */
|
||||
unsigned short batchDepth; /**< Batch depth */
|
||||
short clipmin; /**< Signed 16b value specifying output clip min */
|
||||
short clipmax; /**< Signed 16b value specifying output clip max */
|
||||
short A_scalar; /**< Scalar coefficient A */
|
||||
short B_scalar; /**< Scalar coefficient B */
|
||||
short C_scalar; /**< Scalar coefficient C */
|
||||
ll_aton_pointer A_vector; /**< Address of A vector table */
|
||||
ll_aton_pointer B_vector; /**< Address of B vector table */
|
||||
ll_aton_pointer C_vector; /**< Address of C vector table */
|
||||
unsigned char vec_precision[3]; /**< Number of bits for A, B and C vectors */
|
||||
} LL_Arithacc_InitTypeDef;
|
||||
|
||||
/** @defgroup LL_ARITH Arithmetic Unit configuration functions
|
||||
* @{
|
||||
*/
|
||||
int LL_Arithacc_Init(int id, const LL_Arithacc_InitTypeDef *Arithacc_InitStruct);
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
typedef enum
|
||||
{
|
||||
AFILT_MODE_NONE = 0,
|
||||
AFILT_MODE_PIXELDROP = 1,
|
||||
AFILT_MODE_FRAMEDROP = 2,
|
||||
AFILT_MODE_FRAMEZERO = 3
|
||||
} LL_Convacc_Afilt_Mode;
|
||||
|
||||
/**
|
||||
* @brief Convolutional Accelerator configuration structure
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
unsigned rounding_f : 1; /**< Input feature data rounding */
|
||||
unsigned saturation_f : 1; /**< Input feature data saturation */
|
||||
unsigned round_mode_f : 2; /**< Output data rounding mode */
|
||||
unsigned inbytes_f : 2; /**< Input data width in bytes */
|
||||
unsigned rounding_o : 1; /**< Output data rounding after right shift */
|
||||
unsigned saturation_o : 1; /**< Output saturation */
|
||||
unsigned round_mode_o : 1; /**< Output data rounding mode */
|
||||
unsigned relu_mode_o : 1; /**< Apply Relu operation before rounding */
|
||||
unsigned outbytes_o : 2; /**< Output data width in bytes */
|
||||
unsigned simd : 2; /**< Enable 8x8bit (1) or 16x8bit (2) SIMD mode */
|
||||
unsigned accumulate : 1; /**< Sum and synchronize with stream link input #2 */
|
||||
unsigned accumulate_first : 1; /**< No sum and synchronization with stream link input #2 for the first frame */
|
||||
unsigned accumulate_gen_first : 1; /**< Generate first accumulator input frame internally */
|
||||
unsigned fstat : 1; /**< Feature data stationary */
|
||||
unsigned raw_o : 1; /**< Use RAW file output format */
|
||||
unsigned kt1_mode : 1; /**< Load kernel from T1 buffer */
|
||||
unsigned deepmode : 1; /**< Enable Deep1x1 optimized mode */
|
||||
unsigned dss2mode : 1; /**< Enable DSS2 (depth separable stride 2) optimized mode */
|
||||
unsigned f_unsigned : 1; /**< Feature data unsigned */
|
||||
unsigned k_unsigned : 1; /**< Kernel data unsigned */
|
||||
unsigned kseten : 2; /**< Enable kernel set 0 (bit 0) or 1 (bit 1) if KT1 is 1,
|
||||
* otherwise select byte 1 (0), byte 2 (1), byte 3 (2) or
|
||||
* all bytes (Deep1x1 mode only) (3) of kernel stream in SIMD mode */
|
||||
unsigned char shift_f; /**< Input feature data shift */
|
||||
unsigned char shift_a; /**< Accumulator data input signed left shift */
|
||||
unsigned char shift_o; /**< Result data output signed right shift */
|
||||
unsigned fWidth; /**< Feature data width */
|
||||
unsigned fHeight; /**< Feature data height */
|
||||
unsigned char kernelWidth; /**< Kernel width */
|
||||
unsigned char kernelHeight; /**< Kernel height */
|
||||
unsigned char nKernels; /**< Total number of parallel kernels */
|
||||
unsigned short batchDepth; /**< Batch Depth */
|
||||
unsigned char hstride; /**< Horizontal stride */
|
||||
unsigned char vstride; /**< Vertical stride */
|
||||
unsigned short left_padding; /**< Number of vertical left dummy columns */
|
||||
unsigned short right_padding; /**< Number of vertical right dummy columns */
|
||||
unsigned short top_padding; /**< Number of horizontal top dummy lines */
|
||||
unsigned short bot_padding; /**< Number of horizontal bottom dummy lines */
|
||||
unsigned short left_crop; /**< Left feature data boundary */
|
||||
unsigned short right_crop; /**< Right feature data boundary */
|
||||
unsigned short top_crop; /**< Top feature data boundary */
|
||||
unsigned short bot_crop; /**< Bottom feature data boundary */
|
||||
unsigned short fstatcnt; /**< Number of frames before next reload of feature stationary frame */
|
||||
LL_Convacc_Afilt_Mode afilt_mode; /**< Accumulator port filter mode. See LL_Convacc_Afilt_Mode */
|
||||
unsigned char afilt_tot; /**< Total number of accumulation tensors */
|
||||
unsigned char afilt_first; /**< First accumulation tensor */
|
||||
unsigned char afilt_last; /**< Last accumulation tensor */
|
||||
unsigned char kfilt_tot; /**< Total number of kernels */
|
||||
unsigned char kfilt_first; /**< First kernel */
|
||||
unsigned char kfilt_last; /**< Last kernel */
|
||||
int fsub; /**< Feature data subtract value */
|
||||
short zfbias; /**< Bias added to zero frames */
|
||||
} LL_Convacc_InitTypeDef;
|
||||
|
||||
/** @defgroup LL_CONVACC Convolutional accelerator unit configuration functions
|
||||
* @{
|
||||
*/
|
||||
int LL_Convacc_Init(int id, const LL_Convacc_InitTypeDef *Convacc_InitStruct);
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Pooling Acceleration supported operations
|
||||
*/
|
||||
typedef enum
|
||||
{
|
||||
POOL_MAX = 1,
|
||||
POOL_MIN,
|
||||
POOL_AVG,
|
||||
POOL_GMAX,
|
||||
POOL_GMIN,
|
||||
POOL_GAVG
|
||||
} LL_Poolacc_Op;
|
||||
|
||||
/**
|
||||
* @brief Pooling Accelerator configuration structure
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
LL_Poolacc_Op operation; /**< Pooling operation type. See LL_Poolacc_Op */
|
||||
unsigned avgnopad : 1; /**< Average pooling operation without padding */
|
||||
unsigned short inputX; /**< Size X of the input feature */
|
||||
unsigned short inputY; /**< Size Y of the input feature */
|
||||
unsigned short outputX; /**< Size X of the output data */
|
||||
unsigned short outputY; /**< Size Y of the output data */
|
||||
unsigned char poolWinX; /**< Size X of pooling window */
|
||||
unsigned char poolWinY; /**< Size Y of pooling window */
|
||||
unsigned char strideX; /**< Stride value in X direction */
|
||||
unsigned char strideY; /**< Stride value in Y direction */
|
||||
unsigned short topCrop; /**< Top cropping size */
|
||||
unsigned short bottomCrop; /**< Bottom cropping size */
|
||||
unsigned short leftCrop; /**< Left cropping size */
|
||||
unsigned short rightCrop; /**< Right cropping size */
|
||||
unsigned short topPad; /**< Top padding size */
|
||||
unsigned short bottomPad; /**< Bottom padding size */
|
||||
unsigned short leftPad; /**< Left padding size */
|
||||
unsigned short rightPad; /**< Right padding size */
|
||||
unsigned short batchSize; /**< Batch size */
|
||||
unsigned char shift_f; /**< Input feature data shift */
|
||||
unsigned char shift_o; /**< Optional right shift to apply the average pooling output */
|
||||
unsigned dualLine : 1; /**< Enable dual line, allows each linebuffer line to work as 2 lines,
|
||||
* applicable for 8-bit data */
|
||||
unsigned nbytes : 2; /**< input data number of bytes */
|
||||
unsigned rounding_f : 1; /**< Input feature data rounding */
|
||||
unsigned saturation_f : 1; /**< Input feature data saturation */
|
||||
unsigned round_mode_f : 2; /**< Rounding mode to apply to input feature data */
|
||||
unsigned inbytes_f : 2; /**< Input data width in bytes. Valid values are 1, 2 or 3 bytes */
|
||||
unsigned outbytes_f : 2; /**< Number of output bytes to use for final result after rounding or saturation.
|
||||
* Valid values are 1 or 2 bytes */
|
||||
unsigned rounding_o : 1; /**< Enable output rounding using round-to-nearest (round up)
|
||||
* (applicable to average pooling operations) */
|
||||
unsigned saturation_o : 1; /**< Enable output saturation (applicable to average pooling operations) */
|
||||
unsigned round_mode_o : 1; /**< Rounding mode to apply to output feature data */
|
||||
unsigned relu_mode_o : 1; /**< Apply Relu operation before rounding */
|
||||
unsigned outbytes_o : 2; /**< Number of output bytes to use for final result after rounding or saturation.
|
||||
* Valid values are 1 or 2 bytes */
|
||||
short mulval; /**< constant to be multiplied to accumulated sum of pooling window.
|
||||
* For average operation, it represents the reciprocal of the divisor in 16-bit fixed point.
|
||||
* The average is computed by multiplying this constant with the accumulated sum and then applying the
|
||||
* relevant right shift at the output. (Applicable to average pooling operations) */
|
||||
unsigned pad_val_en : 1; /**< Enable padding value */
|
||||
short pad_val; /**< Padding value to be used for padding operation */
|
||||
} LL_Poolacc_InitTypeDef;
|
||||
|
||||
/**
|
||||
* @brief Epoch Controller configuration structure
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
uint32_t blobaddr; /**< Blob code start address. Must be 8 byte aligned */
|
||||
unsigned stepmode : 1; /**< Enable step mode. Used for debugging purposes */
|
||||
} LL_EpochCtrl_InitTypeDef;
|
||||
|
||||
/** @defgroup LL_POOL Pooling unit configuration functions
|
||||
* @{
|
||||
*/
|
||||
int LL_Poolacc_Init(int id, const LL_Poolacc_InitTypeDef *conf);
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Streaming engine configuration structure
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
unsigned dir : 1; /**< Stream Direction: 0 input, 1 output */
|
||||
unsigned raw : 1; /**< Set RAW mode (1) or raster mode (0) */
|
||||
unsigned raw_out : 1; /**< Force RAW output (bus to stream only)
|
||||
* even if the engine is programmed in raster mode */
|
||||
unsigned continuous : 1; /**< Do not restart address pointer at end of frame */
|
||||
unsigned noblk : 1; /**< Do not use blocks wider that the native bus size */
|
||||
unsigned noinc : 1; /**< Do not increment address */
|
||||
unsigned align_right : 1; /**< Alignment for data on switch (default left) */
|
||||
unsigned mem_lsb : 1; /**< For when nbits_in != nbits_out to decide which bits are read/written
|
||||
* (default msb) */
|
||||
unsigned sync_with_other : 1; /**< Enable synchronizations signals between engines */
|
||||
unsigned nbits_unsigned : 1; /**< Disable sign extension */
|
||||
unsigned bus_cid : 3; /**< Set Compartment ID cache attribute */
|
||||
unsigned cacheable : 1; /**< Set cacheable bus attribute */
|
||||
unsigned cache_allocate : 1; /**< Set cache allocate bus attribute */
|
||||
unsigned bus_pfetch : 1; /**< Enable bus prefetch */
|
||||
unsigned cache_linesize : 2; /**< Cache Line size: 0 -> 64B, 1 -> 128B, 2 -> 256B, 3 -> 512B */
|
||||
unsigned cipher_en : 1; /**< Enable ciphering: 0 -> disable, 1-> enable */
|
||||
unsigned key_sel : 1; /**< Bus Interface key to be used for ciphering (0, 1) */
|
||||
unsigned char sync_dma; /**< Synchronization signals source engine */
|
||||
ll_aton_pointer addr_base; /**< Source/Destination base address */
|
||||
unsigned offset_start; /**< Offset of the Source/Destination start address from the base address */
|
||||
unsigned offset_end; /**< Offset of the Source/Destination end address from the base address */
|
||||
unsigned offset_limit; /**< Offset of the Stream engine address limit from the base address.
|
||||
* Used to prevent prefetch beyond memory boundaries */
|
||||
unsigned frame_count; /**< Number of frames to transfer */
|
||||
unsigned fwidth; /**< Frame width (pixel per line) */
|
||||
unsigned fheight; /**< Frame height (number of lines) */
|
||||
unsigned batch_depth; /**< Batch depth (subpix per pixel) */
|
||||
unsigned batch_offset; /**< Offset (bytes) between batches */
|
||||
unsigned frame_offset; /**< Offset between multiple frames within frame repetition loop */
|
||||
unsigned line_offset; /**< Offset between multiple frames within frame repetition loop.
|
||||
* If set to zero it's derived from width and batch_offset */
|
||||
unsigned loop_offset; /**< Offset between frame repetition loops */
|
||||
unsigned frame_loop_cnt; /**< Number of frames to loop */
|
||||
unsigned frame_tot_cnt; /**< Frame limit */
|
||||
unsigned char nbits_in; /**< Data size in bits if reading */
|
||||
unsigned char nbits_out; /**< Data size in bits if writing */
|
||||
} LL_Streng_TensorInitTypeDef;
|
||||
|
||||
static inline unsigned char *LL_Streng_addr_start(const LL_Streng_TensorInitTypeDef *conf)
|
||||
{
|
||||
return conf->addr_base.p + conf->offset_start;
|
||||
}
|
||||
|
||||
static inline unsigned char *LL_Streng_addr_end(const LL_Streng_TensorInitTypeDef *conf)
|
||||
{
|
||||
return conf->addr_base.p + conf->offset_end;
|
||||
}
|
||||
|
||||
static inline unsigned char *LL_Streng_addr_limit(const LL_Streng_TensorInitTypeDef *conf)
|
||||
{
|
||||
return conf->addr_base.p + conf->offset_limit;
|
||||
}
|
||||
|
||||
static inline uint32_t LL_Streng_len(const LL_Streng_TensorInitTypeDef *conf)
|
||||
{
|
||||
return conf->offset_end - conf->offset_start;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Streaming engine External Sync configuration structure
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
unsigned int enable : 1; /**< Enable/disable external sync feature (0, 1) */
|
||||
unsigned int trig_source : 4; /**< Trigger source signal ID [0..3] */
|
||||
unsigned int lines; /**< Number of lines associated to each trigger rising edge */
|
||||
unsigned int lines_offset; /**< Number of lines after which the special offset will be applied */
|
||||
unsigned int offset; /**< Special line offset */
|
||||
} LL_Streng_ExtSyncTypedef;
|
||||
|
||||
/** @defgroup LL_STRENG Streaming Engine configuration and operation functions
|
||||
* @{
|
||||
*/
|
||||
int LL_Streng_TensorInit(int id, const LL_Streng_TensorInitTypeDef *, int n);
|
||||
int LL_Streng_ExtSyncInit(int id, LL_Streng_ExtSyncTypedef *);
|
||||
int LL_Streng_Wait(uint32_t mask);
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
/** @defgroup LL_BUSIF Bus Interface configuration functions
|
||||
* @{
|
||||
*/
|
||||
int LL_Busif_SetKeys(int id, int key, uint64_t key_low, uint64_t key_hi);
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
enum SwitchUnitsType
|
||||
{
|
||||
STRSWITCH = 0,
|
||||
STRSWITCH64,
|
||||
STRSWITCH_VC,
|
||||
};
|
||||
|
||||
extern unsigned __atonn_getSrcPortID(enum SwitchUnitsType sut, unsigned char su_num, enum AccelUnitsType aut,
|
||||
unsigned char au_num, unsigned char port);
|
||||
extern unsigned __atonn_getDstPortID(enum SwitchUnitsType sut, unsigned char su_num, enum AccelUnitsType aut,
|
||||
unsigned char au_num, unsigned char port);
|
||||
|
||||
#if (LL_ATON_PLATFORM != LL_ATON_PLAT_EC_TRACE)
|
||||
|
||||
typedef unsigned int SourcePort;
|
||||
#define ATONN_SRCPORT(S, J, U, I, P) ATON_##S##_##J##_LINK_##U##_##I##_##P
|
||||
// Convert SourcePort in ID needed to configure HW
|
||||
#define ATONN_SRCPORT_ID(S) (S)
|
||||
|
||||
typedef unsigned int DestPort;
|
||||
#define ATONN_DSTPORT(S, J, U, I, P) ATON_##S##_DST_OFFSET(J, ATON_##S##_##J##_DST##U##_##I##_##P##_IDX)
|
||||
|
||||
// Convert DestPort in ID needed to configure HW
|
||||
#define ATONN_DSTPORT_ID(D) (D)
|
||||
|
||||
#else
|
||||
|
||||
typedef struct
|
||||
{
|
||||
enum SwitchUnitsType s;
|
||||
unsigned char s_num;
|
||||
enum AccelUnitsType u;
|
||||
unsigned char u_num;
|
||||
unsigned char port;
|
||||
} SourcePort;
|
||||
#define ATONN_SRCPORT(S, J, U, I, P) \
|
||||
{ \
|
||||
.s = S, .s_num = J, .u = U, .u_num = I, .port = P \
|
||||
}
|
||||
static inline unsigned _atonn_getSrcPortID(SourcePort s)
|
||||
{
|
||||
return __atonn_getSrcPortID(s.s, s.s_num, s.u, s.u_num, s.port);
|
||||
}
|
||||
#define ATONN_SRCPORT_ID(S) _atonn_getSrcPortID(S)
|
||||
|
||||
typedef struct
|
||||
{
|
||||
enum SwitchUnitsType s;
|
||||
unsigned char s_num;
|
||||
enum AccelUnitsType u;
|
||||
unsigned char u_num;
|
||||
unsigned char port;
|
||||
} DestPort;
|
||||
#define ATONN_DSTPORT(S, J, U, I, P) \
|
||||
{ \
|
||||
.s = S, .s_num = J, .u = U, .u_num = I, .port = P \
|
||||
}
|
||||
static inline unsigned _atonn_getDstPortID(DestPort d)
|
||||
{
|
||||
return __atonn_getDstPortID(d.s, d.s_num, d.u, d.u_num, d.port);
|
||||
}
|
||||
#define ATONN_DSTPORT_ID(D) _atonn_getDstPortID(D)
|
||||
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Stream Switch source ports identifiers
|
||||
*/
|
||||
#define STRENG_SRC(I, P) ATONN_SRCPORT(STRSWITCH, 0, STRENG, I, P)
|
||||
#define CONVACC_SRC(I, P) ATONN_SRCPORT(STRSWITCH, 0, CONVACC, I, P)
|
||||
#define DECUN_SRC(I, P) ATONN_SRCPORT(STRSWITCH, 0, DECUN, I, P)
|
||||
#define ACTIV_SRC(I, P) ATONN_SRCPORT(STRSWITCH, 0, ACTIV, I, P)
|
||||
#define ARITH_SRC(I, P) ATONN_SRCPORT(STRSWITCH, 0, ARITH, I, P)
|
||||
#define POOL_SRC(I, P) ATONN_SRCPORT(STRSWITCH, 0, POOL, I, P)
|
||||
#define RECBUF_SRC(I, P) ATONN_SRCPORT(STRSWITCH, 0, RECBUF, I, P)
|
||||
|
||||
/**
|
||||
* @brief Stream Switch destination ports identifiers
|
||||
*/
|
||||
#define STRENG_DST(I, P) ATONN_DSTPORT(STRSWITCH, 0, STRENG, I, P)
|
||||
#define CONVACC_DST(I, P) ATONN_DSTPORT(STRSWITCH, 0, CONVACC, I, P)
|
||||
#define DECUN_DST(I, P) ATONN_DSTPORT(STRSWITCH, 0, DECUN, I, P)
|
||||
#define ACTIV_DST(I, P) ATONN_DSTPORT(STRSWITCH, 0, ACTIV, I, P)
|
||||
#define ARITH_DST(I, P) ATONN_DSTPORT(STRSWITCH, 0, ARITH, I, P)
|
||||
#define POOL_DST(I, P) ATONN_DSTPORT(STRSWITCH, 0, POOL, I, P)
|
||||
#define RECBUF_DST(I, P) ATONN_DSTPORT(STRSWITCH, 0, RECBUF, I, P)
|
||||
|
||||
/**
|
||||
* @brief Streaming switch configuration structure
|
||||
*/
|
||||
#define ATON_SWITCH_CONTEXT_NUM 2
|
||||
|
||||
#if ATON_SWITCH_CONTEXT_NUM == 2
|
||||
#define LL_Switch_Init_Dest() .dest
|
||||
#define LL_Switch_Init_Source(x) .source##x
|
||||
#define LL_Switch_Init_Context(x) .context##x
|
||||
#define LL_Switch_Init_Frames(x) .frames##x
|
||||
typedef struct
|
||||
{
|
||||
SourcePort source0; /**< Must be one of SourcePort */
|
||||
SourcePort source1; /**< Must be one of SourcePort */
|
||||
DestPort dest; /**< Must be one of DestPort */
|
||||
unsigned char frames0;
|
||||
unsigned char frames1;
|
||||
unsigned context0 : 1;
|
||||
unsigned context1 : 1;
|
||||
} LL_Switch_InitTypeDef;
|
||||
#else
|
||||
#define LL_Switch_Init_Dest() .dest
|
||||
#define LL_Switch_Init_Source(x) .source[x]
|
||||
#define LL_Switch_Init_Context(x) .context[x]
|
||||
#define LL_Switch_Init_Frames(x) .frames[x]
|
||||
typedef struct
|
||||
{
|
||||
SourcePort source[ATON_SWITCH_CONTEXT_NUM]; /**< Must be one of SourcePort */
|
||||
DestPort dest; /**< Must be one of DestPort */
|
||||
unsigned char context[ATON_SWITCH_CONTEXT_NUM];
|
||||
unsigned char frames[ATON_SWITCH_CONTEXT_NUM];
|
||||
} LL_Switch_InitTypeDef;
|
||||
#endif
|
||||
|
||||
typedef LL_Switch_InitTypeDef LL_Switch_DeinitTypeDef;
|
||||
|
||||
/**
|
||||
* @brief Streaming switch with virtual channels configuration structure
|
||||
*/
|
||||
|
||||
#define LL_SwitchVC_Init_Dest() .dest
|
||||
#define LL_SwitchVC_Init_Source() .source
|
||||
typedef struct
|
||||
{
|
||||
SourcePort source; /**< Must be one of SourcePort */
|
||||
DestPort dest; /**< Must be one of DestPort */
|
||||
} LL_SwitchVC_InitTypeDef;
|
||||
|
||||
typedef LL_SwitchVC_InitTypeDef LL_SwitchVC_DeinitTypeDef;
|
||||
|
||||
/** @defgroup STRSWTCH_VC Streaming Switch with virtual channels connection/disconnection functions
|
||||
* @{
|
||||
*/
|
||||
int LL_SwitchVC_Init_NoReset(const LL_SwitchVC_InitTypeDef *LL_SwitchVC_InitStruct, int n);
|
||||
int LL_SwitchVC_Init(const LL_SwitchVC_InitTypeDef *LL_SwitchVC_InitStruct, int n);
|
||||
int LL_SwitchVC_Deinit(const LL_SwitchVC_DeinitTypeDef *LL_SwitchVC_DenitStruct, int n);
|
||||
int LL_SwitchVC_Deinit_Fine_Grained(const LL_SwitchVC_DeinitTypeDef *LL_SwitchVC_DenitStruct, int n);
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
/** @defgroup STRSWTCH Streaming Switch connection/disconnection functions
|
||||
* @{
|
||||
*/
|
||||
int LL_Switch_Init_NoReset(const LL_Switch_InitTypeDef *LL_Switch_InitStruct, int n);
|
||||
int LL_Switch_Init(const LL_Switch_InitTypeDef *LL_Switch_InitStruct, int n);
|
||||
int LL_Switch_Deinit(const LL_Switch_DeinitTypeDef *LL_Switch_DenitStruct, int n);
|
||||
int LL_Switch_Deinit_Fine_Grained(const LL_Switch_DeinitTypeDef *LL_Switch_DenitStruct, int n);
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Decompression Unint configuration structure
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
unsigned short nCVperCB; /**< Number of CodeVectors per CodeBook */
|
||||
unsigned char nCWperCV; /**< Number of CodeWords per CodeVector */
|
||||
unsigned char nRCWlastCV; /**< Number of read CodeWords from the last CodeVector */
|
||||
unsigned char nFormatBytes; /**< Number of bytes of a CodeWord */
|
||||
unsigned short nBatches; /**< Number of consecutive Batches used with a CodeBook */
|
||||
unsigned noDualInput : 1; /**< Disable the CodeBook stream link */
|
||||
unsigned noOverWrite : 1; /**< Disable CodeBooks overwriting */
|
||||
ll_aton_pointer CBs_vector; /**< Pointer to CodeBooks storage */
|
||||
unsigned CBs_size; /**< Size of CodeBooks in Memory */
|
||||
} LL_Decun_InitTypeDef;
|
||||
|
||||
/** @defgroup LL_DECUN Decompression Unit configuration functions
|
||||
* @{
|
||||
*/
|
||||
int LL_Decun_Init(int id, const LL_Decun_InitTypeDef *LL_Decun_InitStruct);
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
/** @defgroup LL_EPOCHCTRL Epoch controller functions
|
||||
* @{
|
||||
*/
|
||||
int LL_EpochCtrl_Init(int id, const LL_EpochCtrl_InitTypeDef *conf);
|
||||
int LL_EpochCtrl_Step(int id);
|
||||
int LL_EpochCtrl_Wait(uint32_t mask);
|
||||
unsigned int LL_EpochCtrl_GetBlobSize(uint32_t *eb_addr);
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Structure defining a unit to be activated
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
AccelUnits unit; /**< Must be one of AccelUnits */
|
||||
// unsigned int flags; // To be implemented e.g. clear, etc.
|
||||
} LL_ATON_EnableUnits_InitTypeDef;
|
||||
|
||||
typedef LL_ATON_EnableUnits_InitTypeDef LL_ATON_DisableUnits_InitTypeDef;
|
||||
|
||||
/** @addtogroup ATON_LL_UNITS ATON Units enabling/disabling functions
|
||||
* @{
|
||||
*/
|
||||
int LL_ATON_EnableUnits_Init(const LL_ATON_EnableUnits_InitTypeDef *LL_ATON_EnableUnits_InitStruct, int n);
|
||||
int LL_ATON_DisableUnits_Init(const LL_ATON_DisableUnits_InitTypeDef *LL_ATON_DisableUnits_InitStruct, int n);
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
/** @addtogroup ATON Clock Gating functions
|
||||
* @{
|
||||
*/
|
||||
void LL_ATON_EnableClock(unsigned int clock);
|
||||
void LL_ATON_DisableClock(unsigned int clock);
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
/** @defgroup Helper functions (use just for debug/testing purposes)
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief DMA version of a memcpy functionality, this function could be overloaded if a system DMA could be used
|
||||
* @param dst destination memory address
|
||||
* @param src source memory address
|
||||
* @param src_limit memory pool end address of `src`
|
||||
* @param n number of bytes to be transferred
|
||||
* @param dst_cached Destination under cache flag
|
||||
* @param dst_cached Source under cache flag
|
||||
* @retval Error code E.g.: Invalid ID, invalid parameters, not idle,..
|
||||
*
|
||||
* @note: This function completely undermines any possibility for integrating correctly
|
||||
* SW operators (or any other functionality which calls this function) in any of the three ATON runtime
|
||||
* scheduling modes. In other words, function `LL_ATON_Dma_memcpy()` and its usage are incompatible with the ATON
|
||||
* runtime. Therefore either `memcpy()` should be used in its place or calls to `LL_ATON_Dma_memcpy()` need to be
|
||||
* transformed in a sequence of "epoch blocks" which can be integrated with the ATON runtime (as an example see the
|
||||
* ATON-accelerated implementation of operator `Concat`)!
|
||||
*/
|
||||
void *LL_ATON_Dma_memcpy(void *dst, void *src, void *src_limit, size_t n, int dst_cached, int src_cached);
|
||||
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
/** @defgroup Watchdog management functions. Used for polling mode only
|
||||
* @{
|
||||
*/
|
||||
int startWatchdog(uint32_t timeout);
|
||||
int checkWatchdog(void);
|
||||
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
/** @defgroup External Trigger functions. Used to trigger external units (e.g. HSP) using an ATON interrupt lines
|
||||
* @{
|
||||
*/
|
||||
int LL_TriggerHigh(int irq);
|
||||
int LL_TriggerLow(int irq);
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
612
lib/stai/libstai/include/ll_aton_NN_interface.h
Normal file
612
lib/stai/libstai/include/ll_aton_NN_interface.h
Normal file
@ -0,0 +1,612 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file ll_aton_NN_interface.h
|
||||
* @author SRA Artificial Intelligence & Embedded Architectures
|
||||
* @brief Interface that defines a NN generated by the AtoNN Compiler.
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2024 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef __LL_ATON_NN_INTERFACE_H
|
||||
#define __LL_ATON_NN_INTERFACE_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif
|
||||
|
||||
#include <assert.h>
|
||||
#include <stdbool.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "ll_aton_config.h"
|
||||
|
||||
#include "ll_aton_attributes.h"
|
||||
#include "ll_aton_util.h"
|
||||
|
||||
/* this is needed to avoid some compilers (e.g. KEIL) that observe a strict semantic about conversion of
|
||||
* pointers to integers in cost initializers
|
||||
*/
|
||||
typedef union
|
||||
{
|
||||
unsigned char *p;
|
||||
uintptr_t i;
|
||||
} __LL_address_t;
|
||||
|
||||
typedef void (*EpochBlock_FuncPtr_t)(const void *epoch_block);
|
||||
|
||||
typedef enum LL_ATON_RT_RetValues
|
||||
{
|
||||
LL_ATON_RT_NO_WFE = 0,
|
||||
LL_ATON_RT_WFE,
|
||||
LL_ATON_RT_DONE,
|
||||
} LL_ATON_RT_RetValues_t;
|
||||
|
||||
typedef enum LL_ATON_RT_Callbacktype
|
||||
{
|
||||
LL_ATON_RT_Callbacktype_PRE_START, /**< Callback called before start_epoch_block */
|
||||
LL_ATON_RT_Callbacktype_POST_START, /**< Callback called after start_epoch_block */
|
||||
LL_ATON_RT_Callbacktype_PRE_END, /**< Callback called before end_epoch_block */
|
||||
LL_ATON_RT_Callbacktype_POST_END, /**< Callback called after end_epoch_block */
|
||||
LL_ATON_RT_Callbacktype_NN_Init, /**< Callback called after `LL_ATON_RT_Init_Network`,
|
||||
* NOTE: 3rd parameter passed is `NULL` */
|
||||
LL_ATON_RT_Callbacktype_NN_DeInit, /**< Callback called after `LL_ATON_RT_DeInit_Network`,
|
||||
* NOTE: 3rd parameter passed is `NULL` */
|
||||
LL_ATON_RT_Callbacktype_RT_Init, /**< Callback called after `LL_ATON_RT_RuntimeInit` */
|
||||
LL_ATON_RT_Callbacktype_RT_Deinit, /**< Callback called before `LL_ATON_RT_RuntimeDeInit` */
|
||||
} LL_ATON_RT_Callbacktype_t;
|
||||
|
||||
typedef enum LL_ATON_User_IO_Result
|
||||
{
|
||||
LL_ATON_User_IO_NOERROR, /**< */
|
||||
LL_ATON_User_IO_WRONG_ALIGN, /**< */
|
||||
LL_ATON_User_IO_WRONG_SIZE, /**< */
|
||||
LL_ATON_User_IO_WRONG_INDEX, /**< */
|
||||
} LL_ATON_User_IO_Result_t;
|
||||
|
||||
typedef enum
|
||||
{
|
||||
EpochBlock_Flags_NONE = 0x0, /**< */
|
||||
EpochBlock_Flags_epoch_start = (0x1 << 0), /**< First EpochBlock of an Epoch */
|
||||
EpochBlock_Flags_epoch_end = (0x1 << 1), /**< Last EpochBlock of an Epoch */
|
||||
EpochBlock_Flags_blob = (0x1 << 2), /**< Item is an Epoch Blob */
|
||||
EpochBlock_Flags_last_eb = (0x1 << 3), /**< Last EpochBlock */
|
||||
EpochBlock_Flags_pure_hw = (0x1 << 4), /**< Pure HW EpochBlock */
|
||||
EpochBlock_Flags_pure_sw = (0x1 << 5), /**< Pure SW EpochBlock */
|
||||
EpochBlock_Flags_hybrid = (0x1 << 6), /**< Hybrid EpochBlock (i.e. mixed HW/SW) */
|
||||
EpochBlock_Flags_internal = (0x1 << 7), /**< ATON lib internal EpochBlock (used to implement hybrid epochs) */
|
||||
} EpochBlock_Flags_t;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
EpochBlock_FuncPtr_t start_epoch_block; /**< Method to execute the EpochBlock */
|
||||
EpochBlock_FuncPtr_t end_epoch_block; /**< Method to be executed when the EpochBlock ends */
|
||||
uintptr_t blob_address; /**< Blob address (in case this EpochBlock represents an epoch blob) */
|
||||
uint32_t wait_mask; /**< Mask needed to check when an EpochBlock ends
|
||||
* - if epoch blob: number (not bitmask) of epoch controller unit to use
|
||||
* - otherwise: bitmask with all output streaming engines to wait for before ending epoch */
|
||||
uint16_t flags; /**< EpochBlock flags */
|
||||
#ifdef LL_ATON_EB_DBG_INFO
|
||||
int16_t epoch_num; /**< Epoch number / First epoch number within blob */
|
||||
int16_t last_epoch_num; /**< Epoch number / Last epoch number within blob */
|
||||
uint32_t in_streng_mask; /**< Debug information about input streaming engines used in epoch */
|
||||
uint32_t out_streng_mask; /**< Debug information about output streaming engines used in epoch */
|
||||
uint64_t estimated_npu_cycles; /**< Debug information estimates for NPU cycles in epoch w/o memory penalty */
|
||||
uint64_t estimated_tot_cycles; /**< Debug information estimates for NPU cycles in epoch w/ memory penalty */
|
||||
#endif // LL_ATON_EB_DBG_INFO
|
||||
} EpochBlock_ItemTypeDef;
|
||||
|
||||
/**
|
||||
* @brief Checks if the pointed element is the last one of an array of `const EpochBlock_ItemTypeDef`
|
||||
*
|
||||
*/
|
||||
static inline bool EpochBlock_IsLastEpochBlock(const EpochBlock_ItemTypeDef *eb);
|
||||
|
||||
/**
|
||||
* @brief Checks if the pointed element is the first EpochBlock of an Epoch
|
||||
*
|
||||
*/
|
||||
static inline bool EpochBlock_IsEpochStart(const EpochBlock_ItemTypeDef *eb);
|
||||
|
||||
/**
|
||||
* @brief Checks if the pointed element is the last EpochBlock of an Epoch
|
||||
*
|
||||
*/
|
||||
static inline bool EpochBlock_IsEpochEnd(const EpochBlock_ItemTypeDef *eb);
|
||||
|
||||
/**
|
||||
* @brief Checks if the pointed element is the an Epoch Blob
|
||||
*
|
||||
*/
|
||||
static inline bool EpochBlock_IsEpochBlob(const EpochBlock_ItemTypeDef *eb);
|
||||
|
||||
/**
|
||||
* @brief Checks if the pointed element is pure SW epoch
|
||||
*
|
||||
*/
|
||||
static inline bool EpochBlock_IsEpochPureSW(const EpochBlock_ItemTypeDef *eb);
|
||||
|
||||
/**
|
||||
* @brief Checks if the pointed element is a pure HW or mixed SW/HW epoch
|
||||
*
|
||||
*/
|
||||
static inline bool EpochBlock_IsEpochPureHW(const EpochBlock_ItemTypeDef *eb);
|
||||
|
||||
/**
|
||||
* @brief Checks if the pointed element is a hybrid epoch
|
||||
*
|
||||
*/
|
||||
static inline bool EpochBlock_IsEpochHybrid(const EpochBlock_ItemTypeDef *eb);
|
||||
|
||||
/**
|
||||
* @brief Checks if the pointed element is an internal epoch
|
||||
*
|
||||
*/
|
||||
static inline bool EpochBlock_IsEpochInternal(const EpochBlock_ItemTypeDef *eb);
|
||||
|
||||
/**
|
||||
* @brief Returns the Epoch controller id to use
|
||||
*
|
||||
*/
|
||||
static inline uint32_t EpochBlock_EpochControllerUnit(const EpochBlock_ItemTypeDef *eb);
|
||||
|
||||
/**
|
||||
* @brief Returns the address of the configuration of the epoch controller (the blob address)
|
||||
*
|
||||
*/
|
||||
static inline uintptr_t EpochBlock_EpochBlobAddr(const EpochBlock_ItemTypeDef *eb);
|
||||
|
||||
/**
|
||||
* @brief ATON buffer types definition
|
||||
*/
|
||||
|
||||
typedef enum
|
||||
{
|
||||
DataType_UNDEFINED = 0,
|
||||
DataType_FLOAT = 1,
|
||||
DataType_UINT8 = 2,
|
||||
DataType_INT8 = 3,
|
||||
DataType_UINT16 = 4,
|
||||
DataType_INT16 = 5,
|
||||
DataType_INT32 = 6,
|
||||
DataType_INT64 = 7,
|
||||
DataType_STRING = 8,
|
||||
DataType_BOOL = 9,
|
||||
DataType_FLOAT16 = 10,
|
||||
DataType_DOUBLE = 11,
|
||||
DataType_UINT32 = 12,
|
||||
DataType_UINT64 = 13,
|
||||
DataType_COMPLEX64 = 14,
|
||||
DataType_COMPLEX128 = 15,
|
||||
DataType_BFLOAT16 = 16,
|
||||
DataType_FXP = 100 // AtoNN specific
|
||||
} Buffer_DataType_TypeDef;
|
||||
|
||||
/**
|
||||
* @brief ATON buffer Channel position
|
||||
*/
|
||||
|
||||
typedef enum
|
||||
{
|
||||
CHPos_UNDEFINED = 0, /**< No channel present */
|
||||
CHPos_First = 1, /**< Channel First ( ...B C H W )*/
|
||||
CHPos_Last = 2, /**< Channel Last ( ...B H W C ) */
|
||||
CHPos_Mixed = 3, /**< Channel with Batch(b) ( ...B C/b H W b ) */
|
||||
} Buffer_CHPos_TypeDef;
|
||||
|
||||
/**
|
||||
* @brief ATON buffer definition
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
const char *name; /**< Buffer name. NULL if end of list */
|
||||
__LL_address_t addr_base; /**< Buffer base address */
|
||||
uint32_t offset_start; /**< Offset of the buffer start address from the base address */
|
||||
uint32_t offset_end; /**< Offset of the buffer end address from the base address
|
||||
* (first bytes address beyond buffer length) */
|
||||
uint32_t offset_limit; /**< Offset of the limiter address from the base address,
|
||||
* (needed for configuring streaming engines) */
|
||||
uint8_t is_user_allocated; /**< */
|
||||
uint8_t is_param; /**< */
|
||||
uint16_t epoch; /**< */
|
||||
uint32_t batch; /**< */
|
||||
const uint32_t *mem_shape; /**< shape as seen by the user in memory (only valid for input/output buffers) */
|
||||
uint16_t mem_ndims; /**< Number of dimensions of mem_shape (Length of mem_shape) */
|
||||
Buffer_CHPos_TypeDef chpos; /**< Position of channels dimension in mem shape */
|
||||
Buffer_DataType_TypeDef type; /**< */
|
||||
int8_t Qm; /**< */
|
||||
int8_t Qn; /**< */
|
||||
uint8_t Qunsigned; /**< */
|
||||
uint8_t ndims; /**< */
|
||||
uint8_t nbits; /**< */
|
||||
uint8_t per_channel; /**< */
|
||||
const uint32_t *shape; /**< */
|
||||
const float *scale; /**< */
|
||||
const int16_t *offset; /**< This can become int8 or uint8 based on the Qunsigned field.
|
||||
* (This field Must have the same format of the quantized value) */
|
||||
} LL_Buffer_InfoTypeDef;
|
||||
|
||||
/**
|
||||
* @brief returns the base address of the mem pool the buffer is allocated in
|
||||
*
|
||||
*/
|
||||
static inline unsigned char *LL_Buffer_addr_base(const LL_Buffer_InfoTypeDef *buf);
|
||||
|
||||
/**
|
||||
* @brief returns the start address of the buffer
|
||||
*
|
||||
*/
|
||||
static inline unsigned char *LL_Buffer_addr_start(const LL_Buffer_InfoTypeDef *buf);
|
||||
|
||||
/**
|
||||
* @brief returns the end address of the buffer
|
||||
*
|
||||
*/
|
||||
static inline unsigned char *LL_Buffer_addr_end(const LL_Buffer_InfoTypeDef *buf);
|
||||
|
||||
/**
|
||||
* @brief returns the limit address of the buffer
|
||||
*
|
||||
*/
|
||||
static inline unsigned char *LL_Buffer_addr_limit(const LL_Buffer_InfoTypeDef *buf);
|
||||
|
||||
/**
|
||||
* @brief returns the length of the buffer
|
||||
*
|
||||
*/
|
||||
static inline uint32_t LL_Buffer_len(const LL_Buffer_InfoTypeDef *buf);
|
||||
|
||||
/**
|
||||
* @brief returns the buffer elements number of bits
|
||||
*
|
||||
*/
|
||||
static inline uint32_t LL_Buffer_bits(const LL_Buffer_InfoTypeDef *buf);
|
||||
|
||||
/** @defgroup ATONN_COMPILER Functions autogenerated by the AtoNN compiler
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Initialize a Network internal structures for the Epoch Controller
|
||||
* @note This function is generated by the AtoNN compiler when called without a network name
|
||||
* (i.e. without option `--network-name`)
|
||||
* @note Use macro `LL_ATON_DECLARE_NAMED_NN_PROTOS(network_name)` instead when the network has been generated
|
||||
* (by the AtoNN compiler) with a network name (i.e. with option `--network-name`)
|
||||
* @retval returns if the action succeded or an error occured
|
||||
*/
|
||||
extern bool LL_ATON_EC_Network_Init_Default(void);
|
||||
|
||||
/**
|
||||
* @brief Update a Network internal structures for the Epoch Controller before the execution of an Inference
|
||||
* @note This function is generated by the AtoNN compiler when called without a network name
|
||||
* (i.e. without option `--network-name`)
|
||||
* @note Use macro `LL_ATON_DECLARE_NAMED_NN_PROTOS(network_name)` instead when the network has been generated
|
||||
* (by the AtoNN compiler) with a network name (i.e. with option `--network-name`)
|
||||
* @retval returns if the action succeded or an error occured
|
||||
*/
|
||||
extern bool LL_ATON_EC_Inference_Init_Default(void);
|
||||
|
||||
/**
|
||||
* @brief Sets user allocated inputs (one at a time)
|
||||
* @note This function is generated by the AtoNN compiler when called without a network name
|
||||
* (i.e. without option `--network-name`)
|
||||
* @note Use macro `LL_ATON_DECLARE_NAMED_NN_PROTOS(network_name)` instead when the network has been generated
|
||||
* (by the AtoNN compiler) with a network name (i.e. with option `--network-name`)
|
||||
* @param num zero base index of the input buffer to set
|
||||
* @param buffer pointer to the area used to store this input
|
||||
* @param size size of the memory reserved for this input
|
||||
*/
|
||||
extern LL_ATON_User_IO_Result_t LL_ATON_Set_User_Input_Buffer_Default(uint32_t num, void *buffer, uint32_t size);
|
||||
|
||||
/**
|
||||
* @brief Gets user allocated inputs (one at a time)
|
||||
* @note This function is generated by the AtoNN compiler when called without a network name
|
||||
* (i.e. without option `--network-name`)
|
||||
* @note Use macro `LL_ATON_DECLARE_NAMED_NN_PROTOS(network_name)` instead when the network has been generated
|
||||
* (by the AtoNN compiler) with a network name (i.e. with option `--network-name`)
|
||||
* @param num zero base index of the input buffer to get
|
||||
* @retval returns a pointer to the specified user allocated input
|
||||
*/
|
||||
extern void *LL_ATON_Get_User_Input_Buffer_Default(uint32_t num);
|
||||
|
||||
/**
|
||||
* @brief Sets user allocated outputs (one at a time)
|
||||
* @note This function is generated by the AtoNN compiler when called without a network name
|
||||
* (i.e. without option `--network-name`)
|
||||
* @note Use macro `LL_ATON_DECLARE_NAMED_NN_PROTOS(network_name)` instead when the network has been generated
|
||||
* (by the AtoNN compiler) with a network name (i.e. with option `--network-name`)
|
||||
* @param num zero base index of the output buffer to set
|
||||
* @param buffer pointer to the area used to store this output
|
||||
* @param size size of the memory reserved for this output
|
||||
*/
|
||||
extern LL_ATON_User_IO_Result_t LL_ATON_Set_User_Output_Buffer_Default(uint32_t num, void *buffer, uint32_t size);
|
||||
|
||||
/**
|
||||
* @brief Gets user allocated inputs (one at a time)
|
||||
* @note This function is generated by the AtoNN compiler when called without a network name
|
||||
* (i.e. without option `--network-name`)
|
||||
* @note Use macro `LL_ATON_DECLARE_NAMED_NN_PROTOS(network_name)` instead when the network has been generated
|
||||
* (by the AtoNN compiler) with a network name (i.e. with option `--network-name`)
|
||||
* @param num zero base index of the output buffer to get
|
||||
* @retval returns a pointer to the specified user allocated output
|
||||
*/
|
||||
extern void *LL_ATON_Get_User_Output_Buffer_Default(uint32_t num);
|
||||
|
||||
/**
|
||||
* @brief Returns an array of structures describing the epoch blocks of the NN to execute
|
||||
* @note This function is generated by the AtoNN compiler when called without a network name
|
||||
* (i.e. without option `--network-name`)
|
||||
* @note Use macro `LL_ATON_DECLARE_NAMED_NN_PROTOS(network_name)` instead when the network has been generated
|
||||
* (by the AtoNN compiler) with a network name (i.e. with option `--network-name`)
|
||||
* @retval returns a pointer to an array of `const EpochBlock_ItemTypeDef`,
|
||||
* if `flags` contain `EpochBlock_Flags_last_eb` identifies the last (empty) EpochBlock (i.e. we are done)
|
||||
* (see helper function `EpochBlock_IsLastEpochBlock()`)
|
||||
*/
|
||||
extern const EpochBlock_ItemTypeDef *LL_ATON_EpochBlockItems_Default(void);
|
||||
|
||||
/**
|
||||
* @brief Returns an array of structures describing input buffers
|
||||
* @note This function is generated by the AtoNN compiler when called without a network name
|
||||
* (i.e. without option `--network-name`)
|
||||
* @note Use macro `LL_ATON_DECLARE_NAMED_NN_PROTOS(network_name)` instead when the network has been generated
|
||||
* (by the AtoNN compiler) with a network name (i.e. with option `--network-name`)
|
||||
* @retval returns a pointer to the array of LL_Buffer_InfoTypeDef, name is NULL for the last one
|
||||
*/
|
||||
extern const LL_Buffer_InfoTypeDef *LL_ATON_Output_Buffers_Info_Default(void);
|
||||
|
||||
/**
|
||||
* @brief Returns an array of structures describing output buffers
|
||||
* @note This function is generated by the AtoNN compiler when called without a network name
|
||||
* (i.e. without option `--network-name`)
|
||||
* @note Use macro `LL_ATON_DECLARE_NAMED_NN_PROTOS(network_name)` instead when the network has been generated
|
||||
* (by the AtoNN compiler) with a network name (i.e. with option `--network-name`)
|
||||
* @retval Returns a pointer to the array of LL_Buffer_InfoTypeDef, name is NULL for the last one
|
||||
*/
|
||||
extern const LL_Buffer_InfoTypeDef *LL_ATON_Input_Buffers_Info_Default(void);
|
||||
|
||||
/**
|
||||
* @brief Returns an array of structures describing epoch output transient buffers
|
||||
* @note This function is generated by the AtoNN compiler when called without a network name
|
||||
* (i.e. without option `--network-name`)
|
||||
* @note Use macro `LL_ATON_DECLARE_NAMED_NN_PROTOS(network_name)` instead when the network has been generated
|
||||
* (by the AtoNN compiler) with a network name (i.e. with option `--network-name`)
|
||||
* @retval Returns a pointer to the array of LL_Buffer_InfoTypeDef, name is NULL for the last one
|
||||
*/
|
||||
extern const LL_Buffer_InfoTypeDef *LL_ATON_Internal_Buffers_Info_Default(void);
|
||||
|
||||
/**
|
||||
* @brief Declare the function prototypes for named NN interface functions generated by the AtoNN compiler
|
||||
* @param network_name name of the network as provided by option `--network-name`
|
||||
*/
|
||||
#define LL_ATON_DECLARE_NAMED_NN_PROTOS(network_name) \
|
||||
extern bool LL_ATON_EC_Network_Init_##network_name(void); \
|
||||
extern bool LL_ATON_EC_Inference_Init_##network_name(void); \
|
||||
extern LL_ATON_User_IO_Result_t LL_ATON_Set_User_Input_Buffer_##network_name(uint32_t num, void *buffer, \
|
||||
uint32_t size); \
|
||||
extern void *LL_ATON_Get_User_Input_Buffer_##network_name(uint32_t num); \
|
||||
extern LL_ATON_User_IO_Result_t LL_ATON_Set_User_Output_Buffer_##network_name(uint32_t num, void *buffer, \
|
||||
uint32_t size); \
|
||||
extern void *LL_ATON_Get_User_Output_Buffer_##network_name(uint32_t num); \
|
||||
extern const EpochBlock_ItemTypeDef *LL_ATON_EpochBlockItems_##network_name(void); \
|
||||
extern const LL_Buffer_InfoTypeDef *LL_ATON_Output_Buffers_Info_##network_name(void); \
|
||||
extern const LL_Buffer_InfoTypeDef *LL_ATON_Input_Buffers_Info_##network_name(void); \
|
||||
extern const LL_Buffer_InfoTypeDef *LL_ATON_Internal_Buffers_Info_##network_name(void);
|
||||
|
||||
/**
|
||||
* @brief Type definitions for NN interface functions
|
||||
*/
|
||||
typedef bool (*NN_EC_Hook_TypeDef)(void);
|
||||
typedef LL_ATON_User_IO_Result_t (*NN_InputSetter_TypeDef)(uint32_t num, void *buffer, uint32_t size);
|
||||
typedef void *(*NN_InputGetter_TypeDef)(uint32_t num);
|
||||
typedef LL_ATON_User_IO_Result_t (*NN_OutputSetter_TypeDef)(uint32_t num, void *buffer, uint32_t size);
|
||||
typedef void *(*NN_OutputGetter_TypeDef)(uint32_t num);
|
||||
typedef const EpochBlock_ItemTypeDef *(*NN_EpochBlockItems_TypeDef)(void);
|
||||
typedef const LL_Buffer_InfoTypeDef *(*NN_Buffers_Info_TypeDef)(void);
|
||||
|
||||
typedef void (*TraceRuntime_FuncPtr_t)(LL_ATON_RT_Callbacktype_t ctype);
|
||||
|
||||
struct __nn_instance_struct; // forward declaration
|
||||
typedef struct __nn_instance_struct NN_Instance_TypeDef;
|
||||
typedef void (*TraceEpochBlock_FuncPtr_t)(LL_ATON_RT_Callbacktype_t ctype, const NN_Instance_TypeDef *nn_instance,
|
||||
const EpochBlock_ItemTypeDef *epoch_block);
|
||||
|
||||
typedef struct
|
||||
{
|
||||
const char *network_name;
|
||||
NN_EC_Hook_TypeDef ec_network_init;
|
||||
NN_EC_Hook_TypeDef ec_inference_init;
|
||||
NN_InputSetter_TypeDef input_setter;
|
||||
NN_InputGetter_TypeDef input_getter;
|
||||
NN_OutputSetter_TypeDef output_setter;
|
||||
NN_OutputGetter_TypeDef output_getter;
|
||||
NN_EpochBlockItems_TypeDef epoch_block_items;
|
||||
NN_Buffers_Info_TypeDef output_buffers_info;
|
||||
NN_Buffers_Info_TypeDef input_buffers_info;
|
||||
NN_Buffers_Info_TypeDef internal_buffers_info;
|
||||
} NN_Interface_TypeDef;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
const EpochBlock_ItemTypeDef *volatile current_epoch_block; // pointer to current epoch block
|
||||
const EpochBlock_ItemTypeDef *volatile first_epoch_block; // pointer to first epoch block in current epoch list
|
||||
const EpochBlock_ItemTypeDef *volatile next_epoch_block; // pointer to epoch block to be inserted
|
||||
|
||||
const EpochBlock_ItemTypeDef *volatile saved_current_epoch_block; // pointer to saved current epoch list
|
||||
const EpochBlock_ItemTypeDef
|
||||
*volatile saved_first_epoch_block; // pointer to saved first epoch block in current epoch list
|
||||
|
||||
bool inference_started; // inference has been started
|
||||
|
||||
#if (LL_ATON_RT_MODE == LL_ATON_RT_ASYNC)
|
||||
volatile uint32_t triggered_events; // currently triggered events/IRQs in current epoch
|
||||
volatile bool current_epoch_block_started; // has current epoch block already been started
|
||||
#endif // (LL_ATON_RT_MODE == LL_ATON_RT_ASYNC)
|
||||
|
||||
#ifndef NDEBUG
|
||||
volatile uint32_t
|
||||
nr_of_epoch_blocks; // number of epoch blocks in network (includes also terminating empty epoch block)
|
||||
volatile uint32_t saved_nr_of_epoch_blocks; // number of epoch blocks in saved network (includes also terminating
|
||||
// empty epoch block)
|
||||
#endif // NDEBUG
|
||||
|
||||
TraceEpochBlock_FuncPtr_t epoch_callback_function; // epoch callback function
|
||||
|
||||
#if defined(LL_ATON_RT_RELOC)
|
||||
uint32_t inst_reloc;
|
||||
#endif
|
||||
|
||||
} NN_Execution_State_TypeDef;
|
||||
|
||||
struct __nn_instance_struct
|
||||
{
|
||||
const NN_Interface_TypeDef *network;
|
||||
NN_Execution_State_TypeDef exec_state;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Declare and fill a constant named NN interface object
|
||||
* @param nn_if_name name of the network as provided by option `--network-name`
|
||||
*/
|
||||
#define LL_ATON_DECLARE_NAMED_NN_INTERFACE(nn_if_name) \
|
||||
LL_ATON_DECLARE_NAMED_NN_PROTOS(nn_if_name); \
|
||||
\
|
||||
static const NN_Interface_TypeDef NN_Interface_##nn_if_name = { \
|
||||
.network_name = #nn_if_name, \
|
||||
.ec_network_init = &LL_ATON_EC_Network_Init_##nn_if_name, \
|
||||
.ec_inference_init = &LL_ATON_EC_Inference_Init_##nn_if_name, \
|
||||
.input_setter = &LL_ATON_Set_User_Input_Buffer_##nn_if_name, \
|
||||
.input_getter = &LL_ATON_Get_User_Input_Buffer_##nn_if_name, \
|
||||
.output_setter = &LL_ATON_Set_User_Output_Buffer_##nn_if_name, \
|
||||
.output_getter = &LL_ATON_Get_User_Output_Buffer_##nn_if_name, \
|
||||
.epoch_block_items = &LL_ATON_EpochBlockItems_##nn_if_name, \
|
||||
.output_buffers_info = &LL_ATON_Output_Buffers_Info_##nn_if_name, \
|
||||
.input_buffers_info = &LL_ATON_Input_Buffers_Info_##nn_if_name, \
|
||||
.internal_buffers_info = &LL_ATON_Internal_Buffers_Info_##nn_if_name}
|
||||
|
||||
/**
|
||||
* @brief Declare and fill a non-constant named NN execution instance
|
||||
* @param nn_exec_name typically name of the network as provided by option `--network-name`
|
||||
* @param _nn_if_name pointer to network interface
|
||||
*/
|
||||
#define LL_ATON_DECLARE_NAMED_NN_INSTANCE(nn_exec_name, _nn_if_name) \
|
||||
static NN_Instance_TypeDef NN_Instance_##nn_exec_name = {.network = _nn_if_name, .exec_state = {0}}
|
||||
|
||||
/**
|
||||
* @brief Declare and fill a non-constant named NN execution instance and constant network interface,
|
||||
* which get linked together (by this macro).
|
||||
* @param nn_name name of the network as provided by option `--network-name`
|
||||
*/
|
||||
#define LL_ATON_DECLARE_NAMED_NN_INSTANCE_AND_INTERFACE(nn_name) \
|
||||
LL_ATON_DECLARE_NAMED_NN_INTERFACE(nn_name); \
|
||||
LL_ATON_DECLARE_NAMED_NN_INSTANCE(nn_name, &NN_Interface_##nn_name);
|
||||
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
static inline bool EpochBlock_IsLastEpochBlock(const EpochBlock_ItemTypeDef *eb)
|
||||
{
|
||||
return ((eb->flags & EpochBlock_Flags_last_eb) != 0);
|
||||
}
|
||||
|
||||
static inline bool EpochBlock_IsEpochStart(const EpochBlock_ItemTypeDef *eb)
|
||||
{
|
||||
return ((eb->flags & EpochBlock_Flags_epoch_start) != 0);
|
||||
}
|
||||
|
||||
static inline bool EpochBlock_IsEpochEnd(const EpochBlock_ItemTypeDef *eb)
|
||||
{
|
||||
return ((eb->flags & EpochBlock_Flags_epoch_end) != 0);
|
||||
}
|
||||
|
||||
static inline bool EpochBlock_IsEpochBlob(const EpochBlock_ItemTypeDef *eb)
|
||||
{
|
||||
return ((eb->flags & EpochBlock_Flags_blob) != 0);
|
||||
}
|
||||
|
||||
static inline bool EpochBlock_IsEpochPureSW(const EpochBlock_ItemTypeDef *eb)
|
||||
{
|
||||
return ((eb->flags & EpochBlock_Flags_pure_sw) != 0);
|
||||
}
|
||||
|
||||
static inline bool EpochBlock_IsEpochPureHW(const EpochBlock_ItemTypeDef *eb)
|
||||
{
|
||||
return ((eb->flags & EpochBlock_Flags_pure_hw) != 0);
|
||||
}
|
||||
|
||||
static inline bool EpochBlock_IsEpochHybrid(const EpochBlock_ItemTypeDef *eb)
|
||||
{
|
||||
return ((eb->flags & EpochBlock_Flags_hybrid) != 0);
|
||||
}
|
||||
|
||||
static inline bool EpochBlock_IsEpochInternal(const EpochBlock_ItemTypeDef *eb)
|
||||
{
|
||||
return ((eb->flags & EpochBlock_Flags_internal) != 0);
|
||||
}
|
||||
|
||||
static inline uint32_t EpochBlock_EpochControllerUnit(const EpochBlock_ItemTypeDef *eb)
|
||||
{
|
||||
LL_ATON_ASSERT(EpochBlock_IsEpochBlob(eb));
|
||||
return eb->wait_mask;
|
||||
}
|
||||
|
||||
static inline uintptr_t EpochBlock_EpochBlobAddr(const EpochBlock_ItemTypeDef *eb)
|
||||
{
|
||||
LL_ATON_ASSERT(EpochBlock_IsEpochBlob(eb));
|
||||
return eb->blob_address;
|
||||
}
|
||||
|
||||
static inline unsigned char *LL_Buffer_addr_base(const LL_Buffer_InfoTypeDef *buf)
|
||||
{
|
||||
if (buf->is_user_allocated)
|
||||
{
|
||||
unsigned char **tmp = (unsigned char **)buf->addr_base.p;
|
||||
return *tmp;
|
||||
}
|
||||
return buf->addr_base.p;
|
||||
}
|
||||
|
||||
static inline unsigned char *LL_Buffer_addr_start(const LL_Buffer_InfoTypeDef *buf)
|
||||
{
|
||||
return LL_Buffer_addr_base(buf) + buf->offset_start;
|
||||
}
|
||||
|
||||
static inline unsigned char *LL_Buffer_addr_end(const LL_Buffer_InfoTypeDef *buf)
|
||||
{
|
||||
return LL_Buffer_addr_base(buf) + buf->offset_end;
|
||||
}
|
||||
|
||||
static inline unsigned char *LL_Buffer_addr_limit(const LL_Buffer_InfoTypeDef *buf)
|
||||
{
|
||||
return LL_Buffer_addr_base(buf) + buf->offset_limit;
|
||||
}
|
||||
|
||||
static inline uint32_t LL_Buffer_len(const LL_Buffer_InfoTypeDef *buf)
|
||||
{
|
||||
return buf->offset_end - buf->offset_start;
|
||||
}
|
||||
|
||||
static inline uint32_t LL_Buffer_bits(const LL_Buffer_InfoTypeDef *buf)
|
||||
{
|
||||
return buf->Qm + buf->Qn + (buf->Qunsigned ? 0 : 1);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
81
lib/stai/libstai/include/ll_aton_attributes.h
Normal file
81
lib/stai/libstai/include/ll_aton_attributes.h
Normal file
@ -0,0 +1,81 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file ll_aton_attributes.h
|
||||
* @author SRA Artificial Intelligence & Embedded Architectures
|
||||
* @brief Header file of ATON library attributes handling.
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2024 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef __LL_ATON_ATTRIBUTES_H
|
||||
#define __LL_ATON_ATTRIBUTES_H
|
||||
|
||||
/*
|
||||
* Exported attributes handling:
|
||||
* LL_ATON_API_ENTRY
|
||||
* LL_ATON_ALIGNED(x)
|
||||
* LL_ATON_LIB_UNUSED(x)
|
||||
* LL_ATON_CONCAT(a, b)
|
||||
* LL_ATON_CONCAT3(a, b, c)
|
||||
* LL_ATON_WEAK
|
||||
*
|
||||
*/
|
||||
|
||||
/* Exported attributes handling */
|
||||
|
||||
#if defined(__clang__)
|
||||
#undef __weak
|
||||
#define __weak __attribute__((weak))
|
||||
#endif
|
||||
#if defined(__GNUC__)
|
||||
#ifndef __weak
|
||||
#define __weak __attribute__((weak))
|
||||
#endif /* __weak */
|
||||
#endif /* __GNUC__ */
|
||||
#define LL_ATON_WEAK __weak
|
||||
|
||||
#define LL_ATON_LIB_UNUSED(x) ((void)(x)) // prevent from eventual compiler warnings due to unused variables
|
||||
|
||||
#define __LL_ATON_CONCAT_ARG(a, b) a##b
|
||||
#define LL_ATON_CONCAT(a, b) __LL_ATON_CONCAT_ARG(a, b)
|
||||
#define LL_ATON_CONCAT3(a, b, c) LL_ATON_CONCAT(a, LL_ATON_CONCAT(b, c))
|
||||
|
||||
/* Alignment macros borrowed from ST.AI (file `stai.h`) */
|
||||
#if defined(_MSC_VER)
|
||||
#define LL_ATON_API_ENTRY __declspec(dllexport)
|
||||
#define LL_ATON_ALIGNED(x) __declspec(align(x))
|
||||
#elif defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
|
||||
#define LL_ATON_API_ENTRY /* LL_ATON_API_ENTRY */
|
||||
#define LL_ATON_ALIGNED(x) LL_ATON_CONCAT(LL_ATON_ALIGNED_, x)
|
||||
#define LL_ATON_ALIGNED_1 _Pragma("data_alignment = 1")
|
||||
#define LL_ATON_ALIGNED_2 _Pragma("data_alignment = 2")
|
||||
#define LL_ATON_ALIGNED_4 _Pragma("data_alignment = 4")
|
||||
#define LL_ATON_ALIGNED_8 _Pragma("data_alignment = 8")
|
||||
#define LL_ATON_ALIGNED_16 _Pragma("data_alignment = 16")
|
||||
#define LL_ATON_ALIGNED_32 _Pragma("data_alignment = 32")
|
||||
#define LL_ATON_ALIGNED_64 _Pragma("data_alignment = 64")
|
||||
#elif defined(__CC_ARM)
|
||||
#define LL_ATON_API_ENTRY __attribute__((visibility("default")))
|
||||
#define LL_ATON_ALIGNED(x) __attribute__((aligned(x)))
|
||||
/* Keil disallows anonymous union initialization by default */
|
||||
#pragma anon_unions
|
||||
#elif defined(__GNUC__)
|
||||
// #define LL_ATON_API_ENTRY __attribute__((visibility("default")))
|
||||
#define LL_ATON_API_ENTRY /* LL_ATON_API_ENTRY */
|
||||
#define LL_ATON_ALIGNED(x) __attribute__((aligned(x)))
|
||||
#else
|
||||
/* Dynamic libraries are not supported by the compiler */
|
||||
#define LL_ATON_API_ENTRY /* LL_ATON_API_ENTRY */
|
||||
#define LL_ATON_ALIGNED(x) /* LL_ATON_ALIGNED(x) */
|
||||
#endif
|
||||
|
||||
#endif
|
||||
210
lib/stai/libstai/include/ll_aton_caches_interface.h
Normal file
210
lib/stai/libstai/include/ll_aton_caches_interface.h
Normal file
@ -0,0 +1,210 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file ll_aton_caches_interface.h
|
||||
* @author SRA Artificial Intelligence & Embedded Architectures
|
||||
* @brief Header file for defining an implementing generic cache handling
|
||||
* functions for the application writer
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2024 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef __LL_ATON_CACHES_H
|
||||
#define __LL_ATON_CACHES_H
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "ll_aton_osal.h"
|
||||
#include "ll_aton_platform.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif
|
||||
|
||||
#if (LL_ATON_PLATFORM == LL_ATON_PLAT_STM32N6)
|
||||
|
||||
/*
|
||||
* Note: For relocatable mode, the fcts are implemented in the ll_aton_reloc_callbacks.c.
|
||||
* Only the declaration is requested here, to avoid to inline them the relocatable binary model.
|
||||
*/
|
||||
|
||||
/*** MCU cache maintenance functions ***/
|
||||
|
||||
/**
|
||||
* @brief perform MCU cache clean maintenance operation on an address range
|
||||
* @details whenever the content of a buffer is changed by the application (which is especially the case for input
|
||||
* buffers) cache maintenance MUST be taken into account before being able to run the network.
|
||||
* @param[in] virtual_addr start address (host-side/virtual) of address range
|
||||
* @param[in] size size of address range
|
||||
*
|
||||
* @note the address range should fulfill alignment constraints with respect to the MCU cache line size
|
||||
* for both its `address` & `size` (to better correspond to what this operation will actually do)!
|
||||
* @note this function is intended to handle the case where a buffer has been filled by the MCU/processor (such
|
||||
* passing thru the MCU cache) and should be called AFTER that the buffer has been filled
|
||||
*/
|
||||
#if defined(LL_ATON_RT_RELOC) && defined(BUILD_AI_NETWORK_RELOC)
|
||||
void LL_ATON_Cache_MCU_Clean_Range(uintptr_t virtual_addr, uint32_t size);
|
||||
#else
|
||||
static inline void LL_ATON_Cache_MCU_Clean_Range(uintptr_t virtual_addr, uint32_t size)
|
||||
{
|
||||
LL_ATON_OSAL_LOCK_MCU_CACHE();
|
||||
mcu_cache_clean_range(virtual_addr, virtual_addr + size);
|
||||
LL_ATON_OSAL_UNLOCK_MCU_CACHE();
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief perform MCU cache invalidate maintenance operation on an address range
|
||||
* @details whenever the content of a buffer is changed by the application (which is especially the case for input
|
||||
* buffers) cache maintenance MUST be taken into account before being able to run the network.
|
||||
* @param[in] virtual_addr start address (host-side/virtual) of address range
|
||||
* @param[in] size size of address range
|
||||
*
|
||||
* @note the address range should fulfill alignment constraints with respect to the MCU cache line size
|
||||
* for both its `address` & `size` (to better correspond to what this operation will actually do)!
|
||||
* @note this function is intended to handle the case where a buffer has been filled by-passing the MCU/processor
|
||||
* cache (e.g. using a DMA) and should be called BEFORE the buffer gets filled
|
||||
*/
|
||||
#if defined(LL_ATON_RT_RELOC) && defined(BUILD_AI_NETWORK_RELOC)
|
||||
void LL_ATON_Cache_MCU_Invalidate_Range(uintptr_t virtual_addr, uint32_t size);
|
||||
#else
|
||||
static inline void LL_ATON_Cache_MCU_Invalidate_Range(uintptr_t virtual_addr, uint32_t size)
|
||||
{
|
||||
LL_ATON_OSAL_LOCK_MCU_CACHE();
|
||||
mcu_cache_invalidate_range(virtual_addr, virtual_addr + size);
|
||||
LL_ATON_OSAL_UNLOCK_MCU_CACHE();
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief perform MCU cache clean & invalidate maintenance operation on an address range
|
||||
* @details whenever the content of a buffer is changed by the application (which is especially the case for input
|
||||
* buffers) cache maintenance MUST be taken into account before being able to run the network.
|
||||
* @param[in] virtual_addr start address (host-side/virtual) of address range
|
||||
* @param[in] size size of address range
|
||||
*
|
||||
* @note the address range should fulfill alignment constraints with respect to the MCU cache line size
|
||||
* for both its `address` & `size` (to better correspond to what this operation will actually do)!
|
||||
* @note this function is intended to handle the case where a buffer has been filled by the MCU/processor
|
||||
* (such passing thru the MCU cache) and is gonna to be modified immediately afterwards by-passing
|
||||
* the MCU/processor cache (e.g. using a DMA). It should be called AFTER that the buffer has been
|
||||
* filled
|
||||
*/
|
||||
#if defined(LL_ATON_RT_RELOC) && defined(BUILD_AI_NETWORK_RELOC)
|
||||
void LL_ATON_Cache_MCU_Clean_Invalidate_Range(uintptr_t virtual_addr, uint32_t size);
|
||||
#else
|
||||
static inline void LL_ATON_Cache_MCU_Clean_Invalidate_Range(uintptr_t virtual_addr, uint32_t size)
|
||||
{
|
||||
LL_ATON_OSAL_LOCK_MCU_CACHE();
|
||||
mcu_cache_clean_invalidate_range(virtual_addr, virtual_addr + size);
|
||||
LL_ATON_OSAL_UNLOCK_MCU_CACHE();
|
||||
}
|
||||
#endif
|
||||
|
||||
/*** NPU cache maintainence functions ***/
|
||||
|
||||
/**
|
||||
* @brief perform NPU cache clean maintenance operation on an address range
|
||||
* @details whenever the content of a buffer is changed by the application (which is especially the case for input
|
||||
* buffers) cache maintenance MUST be taken into account before being able to run the network.
|
||||
* @param[in] address start address (host-side/virtual) of address range
|
||||
* @param[in] size size of address range
|
||||
*
|
||||
* @note this cache maintainence function needs only be called for buffers which are NPU cacheable
|
||||
* @note the address range should fulfill alignment constraints with respect to the NPU cache line size
|
||||
* for both its `address` & `size` (to better correspond to what this operation will actually do)!
|
||||
* @note this function is intended to handle the case where a buffer has been filled passing thru the NPU cache
|
||||
* and should be called AFTER that the buffer has been filled
|
||||
*/
|
||||
#if defined(LL_ATON_RT_RELOC) && defined(BUILD_AI_NETWORK_RELOC)
|
||||
void LL_ATON_Cache_NPU_Clean_Range(uintptr_t virtual_addr, uint32_t size);
|
||||
#else
|
||||
static inline void LL_ATON_Cache_NPU_Clean_Range(uintptr_t virtual_addr, uint32_t size)
|
||||
{
|
||||
LL_ATON_OSAL_LOCK_NPU_CACHE();
|
||||
npu_cache_clean_range(ATON_LIB_VIRTUAL_TO_PHYSICAL_ADDR(virtual_addr),
|
||||
ATON_LIB_VIRTUAL_TO_PHYSICAL_ADDR(virtual_addr + size));
|
||||
LL_ATON_OSAL_UNLOCK_NPU_CACHE();
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief perform NPU cache clean & invalidate maintenance operation on an address range
|
||||
* @details whenever the content of a buffer is changed by the application (which is especially the case for input
|
||||
* buffers) cache maintenance MUST be taken into account before being able to run the network.
|
||||
* @param[in] address start address (host-side/virtual) of address range
|
||||
* @param[in] size size of address range
|
||||
*
|
||||
* @note this cache maintainence function needs only be called for buffers which are NPU cacheable
|
||||
* @note the address range should fulfill alignment constraints with respect to the NPU cache line size
|
||||
* for both its `address` & `size` (to better correspond to what this operation will actually do)!
|
||||
* @note this function is intended to handle the case where a buffer is NPU cacheable and has been filled by-passing
|
||||
* the NPU cache and should be called BEFORE the buffer gets filled
|
||||
* @note the NPU cache provides only a "clean & invalidate range" (and not a - pure - "invalidate range") cache
|
||||
* maintenance function which will be called by "stai_ext_cache_npu_clean_invalidate_range()", therefore it is
|
||||
* even more important to call it BEFORE the buffer gets filled
|
||||
*/
|
||||
#if defined(LL_ATON_RT_RELOC) && defined(BUILD_AI_NETWORK_RELOC)
|
||||
void LL_ATON_Cache_NPU_Clean_Invalidate_Range(uintptr_t virtual_addr, uint32_t size);
|
||||
#else
|
||||
static inline void LL_ATON_Cache_NPU_Clean_Invalidate_Range(uintptr_t virtual_addr, uint32_t size)
|
||||
{
|
||||
/* NOTE: The ATON NPU cache does not provide a pure invalidate-range function, but only a clean-invalidate range
|
||||
function! One has to take this into account when using `stai_ext_cache_npu_clean_invalidate_range`. */
|
||||
LL_ATON_OSAL_LOCK_NPU_CACHE();
|
||||
npu_cache_clean_invalidate_range(ATON_LIB_VIRTUAL_TO_PHYSICAL_ADDR(virtual_addr),
|
||||
ATON_LIB_VIRTUAL_TO_PHYSICAL_ADDR(virtual_addr + size));
|
||||
LL_ATON_OSAL_UNLOCK_NPU_CACHE();
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief perform NPU cache invalidate maintenance operation.
|
||||
* @details The whole NPU cache is invalidated.
|
||||
*/
|
||||
#if defined(LL_ATON_RT_RELOC) && defined(BUILD_AI_NETWORK_RELOC)
|
||||
void LL_ATON_Cache_NPU_Invalidate(void);
|
||||
#else
|
||||
static inline void LL_ATON_Cache_NPU_Invalidate(void)
|
||||
{
|
||||
LL_ATON_OSAL_LOCK_NPU_CACHE();
|
||||
npu_cache_invalidate();
|
||||
LL_ATON_OSAL_UNLOCK_NPU_CACHE();
|
||||
}
|
||||
#endif
|
||||
|
||||
#else // (LL_ATON_PLATFORM != LL_ATON_PLAT_STM32N6)
|
||||
/* MCU */
|
||||
static inline void LL_ATON_Cache_MCU_Clean_Range(uintptr_t virtual_addr, uint32_t size)
|
||||
{
|
||||
}
|
||||
static inline void LL_ATON_Cache_MCU_Invalidate_Range(uintptr_t virtual_addr, uint32_t size)
|
||||
{
|
||||
}
|
||||
static inline void LL_ATON_Cache_MCU_Clean_Invalidate_Range(uintptr_t virtual_addr, uint32_t size)
|
||||
{
|
||||
}
|
||||
|
||||
/* NPU */
|
||||
static inline void LL_ATON_Cache_NPU_Clean_Range(uintptr_t virtual_addr, uint32_t size)
|
||||
{
|
||||
}
|
||||
static inline void LL_ATON_Cache_NPU_Clean_Invalidate_Range(uintptr_t virtual_addr, uint32_t size)
|
||||
{
|
||||
}
|
||||
#endif // (LL_ATON_PLATFORM != LL_ATON_PLAT_STM32N6)
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // __LL_ATON_CACHES_H
|
||||
89
lib/stai/libstai/include/ll_aton_cipher.h
Normal file
89
lib/stai/libstai/include/ll_aton_cipher.h
Normal file
@ -0,0 +1,89 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file ll_aton_cipher.h
|
||||
* @author SRA Artificial Intelligence & Embedded Architectures
|
||||
* @brief Header file of ATON LL module.
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* Copyright (c) 2024 STMicroelectronics.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is licensed under terms that can be found in the LICENSE file
|
||||
* in the root directory of this software component.
|
||||
* If no LICENSE file comes with this software, it is provided AS-IS.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef __LL_ATON_CIPHER_H
|
||||
#define __LL_ATON_CIPHER_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
/**
|
||||
* @brief Encryption configuration structure for Streaming Engines and Epoch Controller units
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
unsigned int enable; /**< Enable/disable encryption (0, 1) */
|
||||
uint64_t encryption_id; /**< 43 bit Encryption ID */
|
||||
unsigned int rounds; /**< Number of encryption rounds: 0->12 rounds, 1->9 rounds */
|
||||
unsigned int key_sel; /**< Bus Interface encryption key selection (0, 1) */
|
||||
unsigned int increment; /**< Encryption ID increment rate: 0 -> no increment, <n> -> +1 every n frames */
|
||||
} LL_Streng_EncryptionTypedef;
|
||||
|
||||
typedef enum
|
||||
{
|
||||
CYPHER_CACHE_NONE = 0,
|
||||
CYPHER_CACHE_SRC,
|
||||
CYPHER_CACHE_DST,
|
||||
} CypherCacheSourceMask;
|
||||
|
||||
typedef enum
|
||||
{
|
||||
CYPHER_DISABLE_MASK = 0,
|
||||
CYPHER_SRC_MASK,
|
||||
CYPHER_DST_MASK,
|
||||
} CypherEnableMask;
|
||||
|
||||
/**
|
||||
* @brief Cyphering configuration structure for DmaCypher function
|
||||
*/
|
||||
|
||||
typedef struct
|
||||
{
|
||||
uint32_t srcAdd; /**< Transfer source address */
|
||||
uint32_t dstAdd; /**< Transfer destination address */
|
||||
uint32_t len; /**< Transfer size */
|
||||
CypherCacheSourceMask cypherCacheMask; /**< Cache usage mask:
|
||||
* 0-no cache
|
||||
* 1-cache source
|
||||
* 2-cache destination */
|
||||
CypherEnableMask cypherEnableMask; /**< Cyphering channel mask:
|
||||
* 0-no cypher
|
||||
* 1-cypher source
|
||||
* 2-cypher destination */
|
||||
uint64_t busIfKeyLsb; /**< Bus interface LSB Key */
|
||||
uint64_t busIfKeyMsb; /**< Bus interface MSB Key */
|
||||
} LL_Cypher_InitTypeDef;
|
||||
|
||||
#define CYPHER_SRC_STRENG_ID 0 /**< Stream engine used for source data in Dma/Cypher function */
|
||||
#define CYPHER_DST_STRENG_ID 1 /**< Stream engine used for destination data in Dma/Cypher function */
|
||||
#define CYPHER_CACHE_SIZE 0x40000 /**< N6 cache size */
|
||||
|
||||
int LL_Streng_EncryptionInit(int id, LL_Streng_EncryptionTypedef *);
|
||||
int LL_Streng_WeightEncryptionInit(int id);
|
||||
int LL_EpochCtrl_EncryptionInit(int id, LL_Streng_EncryptionTypedef *conf);
|
||||
int LL_DmaCypherInit(LL_Cypher_InitTypeDef *cypherInfo);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif //__LL_ATON_CIPHER_H
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user