mirror of
https://github.com/openmv/openmv.git
synced 2025-11-04 14:49:50 +08:00
commit
b4bad33623
@ -1,34 +0,0 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file ai_common_config.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @date 18-May-2018
|
||||
* @brief header file of AI platform common compile configuration defines
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* <h2><center>© Copyright (c) 2018 STMicroelectronics.
|
||||
* All rights reserved.</center></h2>
|
||||
*
|
||||
* This software component is licensed by ST under Ultimate Liberty license
|
||||
* SLA0044, the "License"; You may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at:
|
||||
* www.st.com/SLA0044
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef __AI_COMMON_CONFIG_H_
|
||||
#define __AI_COMMON_CONFIG_H_
|
||||
#pragma once
|
||||
|
||||
/*!
|
||||
* @defgroup layers Layers Compilation Config Definitions
|
||||
* @brief definition
|
||||
*
|
||||
*/
|
||||
|
||||
#define HAS_PROFILE_FLOAT
|
||||
#define HAS_PROFILE_FIXED
|
||||
|
||||
|
||||
#endif /*__AI_COMMON_CONFIG_H_*/
|
||||
@ -1,152 +0,0 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file ai_datatypes_defines.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @date 18-Oct-2017
|
||||
* @brief Definitions of AI platform private APIs types
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* <h2><center>© Copyright (c) 2017 STMicroelectronics.
|
||||
* All rights reserved.</center></h2>
|
||||
*
|
||||
* This software component is licensed by ST under Ultimate Liberty license
|
||||
* SLA0044, the "License"; You may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at:
|
||||
* www.st.com/SLA0044
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef __AI_DATATYPES_DEFINES_H__
|
||||
#define __AI_DATATYPES_DEFINES_H__
|
||||
#pragma once
|
||||
|
||||
#include "ai_platform.h"
|
||||
|
||||
/*!
|
||||
* @defgroup datatypes_defines Internal Datatypes Defines Header
|
||||
* @brief Data structures used internally to implement neural networks
|
||||
*
|
||||
*/
|
||||
|
||||
/* define to track datatypes used by codegen */
|
||||
#define AI_INTERFACE_TYPE /* AI_INTERFACE_TYPE */
|
||||
|
||||
#define AI_INTERNAL_API /* AI_INTERNAL_API */
|
||||
|
||||
#define AI_CONST const
|
||||
#define AI_STATIC static
|
||||
#define AI_STATIC_CONST static const
|
||||
|
||||
/******************************************************************************/
|
||||
/* NOP operation used by codegen */
|
||||
#define AI_NOP /* NOP */
|
||||
|
||||
#define AI_WRAP_FUNC(fn_) do { fn_ } while (0);
|
||||
|
||||
#define AI_CAT(a, ...) AI_PRIMITIVE_CAT(a, __VA_ARGS__)
|
||||
#define AI_PRIMITIVE_CAT(a, ...) a ## __VA_ARGS__
|
||||
|
||||
/******************************************************************************/
|
||||
#ifdef HAS_AI_ASSERT
|
||||
#include <assert.h>
|
||||
#define AI_ASSERT(cond) \
|
||||
{ assert(cond); }
|
||||
#else
|
||||
#define AI_ASSERT(cond) \
|
||||
AI_WRAP_FUNC(AI_NOP)
|
||||
#endif /*HAS_AI_ASSERT*/
|
||||
|
||||
/******************************************************************************/
|
||||
#define AI_NO_PACKED_STRUCTS
|
||||
|
||||
/* Macro for defining packed structures (compiler dependent).
|
||||
* This just reduces memory requirements, but is not required.
|
||||
*/
|
||||
#if defined(AI_NO_PACKED_STRUCTS)
|
||||
/* Disable struct packing */
|
||||
#define AI_PACKED_STRUCT_START /* AI_PACKED_STRUCT_START */
|
||||
#define AI_PACKED_STRUCT_END /* AI_PACKED_STRUCT_END */
|
||||
#define AI_PACKED /* AI_PACKED */
|
||||
#elif defined(__GNUC__) || defined(__clang__)
|
||||
/* For GCC and clang */
|
||||
#define AI_PACKED_STRUCT_START /* AI_PACKED_STRUCT_START */
|
||||
#define AI_PACKED_STRUCT_END /* AI_PACKED_STRUCT_END */
|
||||
#define AI_PACKED __attribute__((packed))
|
||||
#elif defined(__ICCARM__) || defined (__IAR_SYSTEMS_ICC__) || defined(__CC_ARM)
|
||||
/* For IAR ARM and Keil MDK-ARM compilers */
|
||||
#define AI_PACKED_STRUCT_START _Pragma("pack(push, 1)")
|
||||
#define AI_PACKED_STRUCT_END _Pragma("pack(pop)")
|
||||
#define AI_PACKED /* AI_PACKED */
|
||||
#elif defined(_MSC_VER) && (_MSC_VER >= 1500)
|
||||
/* For Microsoft Visual C++ */
|
||||
#define AI_PACKED_STRUCT_START __pragma(pack(push, 1))
|
||||
#define AI_PACKED_STRUCT_END __pragma(pack(pop))
|
||||
#define AI_PACKED /* AI_PACKED */
|
||||
#else
|
||||
/* Unknown compiler */
|
||||
#define AI_PACKED_STRUCT_START /* AI_PACKED_STRUCT_START */
|
||||
#define AI_PACKED_STRUCT_END /* AI_PACKED_STRUCT_END */
|
||||
#define AI_PACKED /* AI_PACKED */
|
||||
#endif /* AI_NO_PACKED_STRUCTS */
|
||||
|
||||
/******************************************************************************/
|
||||
#define AI_STRINGIFY_ARG(contents) # contents
|
||||
#define AI_STRINGIFY(macro_or_string) AI_STRINGIFY_ARG (macro_or_string)
|
||||
|
||||
/******************************************************************************/
|
||||
#if defined(_MSC_VER)
|
||||
#define AI_DECLARE_STATIC static __inline
|
||||
#define AI_ALIGNED_TYPE(type, x) type __declspec(align(x))
|
||||
#define AI_INTERFACE_ENTRY __declspec(dllexport)
|
||||
#elif defined(__ICCARM__) || defined (__IAR_SYSTEMS_ICC__)
|
||||
#define AI_DECLARE_STATIC static inline
|
||||
#define AI_ALIGNED_TYPE(type, x) type
|
||||
#define AI_INTERFACE_ENTRY /* AI_INTERFACE_ENTRY */
|
||||
#else /* _MSC_VER */
|
||||
#define AI_DECLARE_STATIC static __inline
|
||||
#define AI_ALIGNED_TYPE(type, x) type __attribute__ ((aligned(x)))
|
||||
#define AI_INTERFACE_ENTRY __attribute__((visibility("default")))
|
||||
#endif /* _MSC_VER */
|
||||
|
||||
/******************************************************************************/
|
||||
#define AI_ALIGN_MASKED(value, mask) ( ((value)+(mask))&(~(mask)) )
|
||||
|
||||
|
||||
#define AI_GET_REVISION(major, minor, micro) ( \
|
||||
((ai_u32)(major)<<24) | \
|
||||
((ai_u32)(minor)<<16) | \
|
||||
((ai_u32)(micro)<< 8) )
|
||||
|
||||
#define AI_GET_VERSION_STRING(major, minor, micro) \
|
||||
AI_STRINGIFY_ARG(major) "." \
|
||||
AI_STRINGIFY_ARG(minor) "." \
|
||||
AI_STRINGIFY_ARG(micro) \
|
||||
|
||||
#define AI_PACK(...) \
|
||||
__VA_ARGS__
|
||||
|
||||
|
||||
#define AI_PACK_TENSORS_PTR(...) \
|
||||
AI_PACK(__VA_ARGS__)
|
||||
|
||||
#define AI_PACK_INFO(size_) (ai_tensor_info[1]) { { \
|
||||
.buffer = (ai_buffer[size_])AI_STRUCT_INIT, \
|
||||
.state = (ai_tensor_state[size_])AI_STRUCT_INIT, \
|
||||
} }
|
||||
|
||||
#define AI_CR "\r\n"
|
||||
|
||||
#if (defined HAS_AI_DEBUG || defined HAS_DEBUG_LIB)
|
||||
#define AI_DEBUG(...) __VA_ARGS__
|
||||
#else
|
||||
#define AI_DEBUG(...) AI_WRAP_FUNC(AI_NOP)
|
||||
#endif
|
||||
|
||||
#define AI_FLAG_NONE (0x0)
|
||||
#define AI_FLAG_SET(mask, flag) (mask) |= (flag)
|
||||
#define AI_FLAG_UNSET(mask, flag) (mask) &= (~(flag))
|
||||
#define AI_FLAG_IS_SET(mask, flag) ( (flag)==((mask)&(flag)) )
|
||||
|
||||
#endif /*__AI_DATATYPES_DEFINES_H__*/
|
||||
@ -1,473 +0,0 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file ai_datatypes_format.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @date 18-Oct-2017
|
||||
* @brief Definitions of AI platform private format handling routines
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* <h2><center>© Copyright (c) 2017 STMicroelectronics.
|
||||
* All rights reserved.</center></h2>
|
||||
*
|
||||
* This software component is licensed by ST under Ultimate Liberty license
|
||||
* SLA0044, the "License"; You may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at:
|
||||
* www.st.com/SLA0044
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef __AI_DATATYPES_FORMAT_H__
|
||||
#define __AI_DATATYPES_FORMAT_H__
|
||||
#pragma once
|
||||
|
||||
#include "ai_platform.h"
|
||||
#include "ai_datatypes_defines.h"
|
||||
|
||||
#include "core_datatypes.h"
|
||||
|
||||
/*!
|
||||
* @defgroup ai_datatypes_format Definiton and Macro of array and buffer formats
|
||||
* @brief Type definition and implementation of internal @ref ai_array and
|
||||
* @ref ai_buffer formats.
|
||||
* @details The library handles 2 different kind of formats: an internal format
|
||||
* that is part of the @ref ai_array struct that is a packed 32bit representation
|
||||
* of the format attributes, and a public format (used in public APIs) associated
|
||||
* with @ref ai_buffer struct , defined as enum in @ref ai_platform.h,
|
||||
* that is just an enum type. Converters are provided in this header file to
|
||||
* convert from one format representation to another.
|
||||
* Some MSB bits are reserved in both formats to code some bit flag useful to
|
||||
* declare some special attribute. Three flags are actually implemented in both
|
||||
* formats: the @ref AI_BUFFER_FMT_FLAG_CONST and @ref AI_FMT_FLAG_CONST used
|
||||
* to tag read-only memory buffers, @ref AI_BUFFER_FMT_FLAG_STATIC and
|
||||
* @ref AI_FMT_FLAG_STATIC to mark statically allocated memory buffers and
|
||||
* @ref AI_FMT_FLAG_SCRATCH_BUFFER to tag temporary scratch buffers.
|
||||
* All the formats are declared in a proper tuple organize table header named
|
||||
* @ref format_lists.h that enumerates all the formats available for the library.
|
||||
* A new format could be added easily by adding a new FMY_ENTRY() as required.
|
||||
* The preprocessor automatically generates the code for the handling of the
|
||||
* format according to this tuples entry. A rational for the methodology could
|
||||
* be found here:
|
||||
* - https://codecraft.co/2012/10/29/how-enums-spread-disease-and-how-to-cure-it/
|
||||
*
|
||||
* The 32bits internal format fields are organized as follows:
|
||||
*
|
||||
* MSB LSB
|
||||
* 31 25 24 23 21 17 14 7 0
|
||||
* /---------------------------------------------------------------------------/
|
||||
* / ATTR. FLAGS | FLOAT | SIGN | LDIV | TYPE | PBITS | BITS | FBITS /
|
||||
* /---------------------------------------------------------------------------/
|
||||
* Where:
|
||||
* - FLAGS: is the reserved bits to store additional format attributes (e.g.
|
||||
* I/O / STATIC flags. etc.)
|
||||
* - FLOAT: 1 bit mark the format as floating point type
|
||||
* - SIGN : 1 bit mark the format as signed type
|
||||
* - LDIV : 2 bits is a log2 value that is used to compute elements size
|
||||
* with some special format such as the compressed ones. It is a shift
|
||||
* factor usually set to zero
|
||||
* - TYPE : 4 bits mark the format "family" type. Actually 5 families are coded,
|
||||
* @ref AI_FMT_FLOAT (float types)
|
||||
* @ref AI_FMT_Q (fixed-point types in Qm.n format)
|
||||
* @ref AI_FMT_LUT4 (compressed lookup 16 formats)
|
||||
* @ref AI_FMT_LUT8 (compressed lookup 256 formats)
|
||||
* - PBITS 3 bits padding bits used to set the number of padding bits
|
||||
* (per element) to handle special aligned formats/ E.g. a 6 bit format
|
||||
* where each element is stored byte aligned (8 bits) has 2 padding bits.
|
||||
* Usually this is set to 0
|
||||
* - BITS 7 bits set the total number of bits of the element, padding bits
|
||||
* excluded. The bits are thus = sign bit + fractional bits + integer bits
|
||||
* The number of integer bits could thus be known using the @ref
|
||||
* AI_FMT_GET_IBITS() macro.
|
||||
* - FBITS 7 bits set the number of fractional bits in the format
|
||||
*
|
||||
*
|
||||
* A reference code snippet for usage is the test unit that uses this header:
|
||||
*
|
||||
* \include test/test_lcut_formats.cpp
|
||||
*
|
||||
*/
|
||||
|
||||
/*!
|
||||
* Format bitfields definition. NOTE: 7 MSB are masked off
|
||||
* for (optional) atributes setting using flags. see @ref AI_FMT_FLAG_CONST that
|
||||
* is used for marking a data as constant readonly
|
||||
*/
|
||||
|
||||
/* 1 bit field to identify floating point values*/
|
||||
#define _FMT_FLOAT_MASK (0x1)
|
||||
#define _FMT_FLOAT_BITS (24)
|
||||
|
||||
/*! 1 bit sign info */
|
||||
#define _FMT_SIGN_MASK (0x1)
|
||||
#define _FMT_SIGN_BITS (23)
|
||||
|
||||
/*! fractional bits field (i.e. for Q formats see @ref AI_FMT_Q) */
|
||||
#define _FMT_FBITS_MASK (0x7F)
|
||||
#define _FMT_FBITS_BITS (0)
|
||||
#define _FMT_FBITS_BIAS ((_FMT_FBITS_MASK+1) >> 1)
|
||||
|
||||
/*! TOTAL number of bits (fractional+integer+sign) (excluded padding ones) */
|
||||
#define _FMT_BITS_MASK (0x7F)
|
||||
#define _FMT_BITS_BITS (7)
|
||||
#define _FMT_BITS_BIAS (0)
|
||||
|
||||
/*! Padding bits for handling formats not aligned to multiples of 8 bits */
|
||||
#define _FMT_PBITS_MASK (0x7)
|
||||
#define _FMT_PBITS_BITS (14)
|
||||
|
||||
/*! bits reserved for identifying the family format, e.g. float, fixed-point..*/
|
||||
#define _FMT_TYPE_MASK (0xF)
|
||||
#define _FMT_TYPE_BITS (17)
|
||||
|
||||
#define _FMT_LDIV_MASK (0x3)
|
||||
#define _FMT_LDIV_BITS (21)
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
#define AI_FMT_OBJ(fmt_) ((ai_array_format)(fmt_))
|
||||
|
||||
/*!
|
||||
* Only 25 LSB bits are used for storing actual format bits. 7 bits are reserved
|
||||
* for format attributes, see @ref AI_FMT_FLAG_CONST flag
|
||||
*/
|
||||
#define AI_FMT_FLAG_BITS (25)
|
||||
#define AI_FMT_MASK ((0x1<<AI_FMT_FLAG_BITS)-1)
|
||||
|
||||
#define AI_FMT_FLAG_CONST (0x1<<30)
|
||||
#define AI_FMT_FLAG_STATIC (0x1<<29)
|
||||
#define AI_FMT_FLAG_SCRATCH_BUFFER (0x1<<28)
|
||||
#define AI_FMT_FLAG_IS_IO (0x1<<27)
|
||||
#define AI_FMT_FLAG_VISITED (0x1<<26)
|
||||
|
||||
/******************************************************************************/
|
||||
/*!
|
||||
* Format "Class" type : this identify the family of the format:
|
||||
* float, integer, fixed point (i.e. Q format), compressed via lookup table
|
||||
*/
|
||||
#define AI_FMT_NONE (0x0)
|
||||
#define AI_FMT_FLOAT (0x1)
|
||||
#define AI_FMT_Q (0x2)
|
||||
#define AI_FMT_LUT4 (0x4)
|
||||
#define AI_FMT_LUT8 (0x8)
|
||||
|
||||
#define AI_FMT_QMASK \
|
||||
( (_FMT_FBITS_MASK<<_FMT_FBITS_BITS) | (_FMT_BITS_MASK<<_FMT_BITS_BITS) )
|
||||
|
||||
#define AI_FMT_GET(val_) \
|
||||
( (AI_FMT_OBJ(val_)) & AI_FMT_MASK )
|
||||
|
||||
#define AI_FMT_MASK_Q(val_) \
|
||||
( AI_FMT_OBJ(val_) & (~(AI_FMT_QMASK)) )
|
||||
|
||||
#define AI_FMT_GET_Q(val_) \
|
||||
( AI_FMT_MASK_Q(val_) | AI_FMT_SET_BITS(0) | AI_FMT_SET_FBITS(0) )
|
||||
|
||||
#define AI_FMT_GET_FLAGS(val_) \
|
||||
( ((AI_FMT_OBJ(val_)) & (~AI_FMT_MASK)) >> AI_FMT_FLAG_BITS )
|
||||
|
||||
#define AI_FMT_SAME(fmt1_, fmt2_) \
|
||||
( AI_FMT_GET(fmt1_) == AI_FMT_GET(fmt2_) )
|
||||
|
||||
#define _FMT_SET(val, mask, bits) AI_FMT_OBJ(((val)&(mask))<<(bits))
|
||||
#define _FMT_GET(fmt, mask, bits) ((AI_FMT_OBJ(fmt)>>(bits))&(mask))
|
||||
|
||||
#define AI_FMT_SET_FLOAT(val) _FMT_SET(val, _FMT_FLOAT_MASK, _FMT_FLOAT_BITS)
|
||||
#define AI_FMT_GET_FLOAT(fmt) _FMT_GET(fmt, _FMT_FLOAT_MASK, _FMT_FLOAT_BITS)
|
||||
#define AI_FMT_SET_SIGN(val) _FMT_SET(val, _FMT_SIGN_MASK, _FMT_SIGN_BITS)
|
||||
#define AI_FMT_GET_SIGN(fmt) _FMT_GET(fmt, _FMT_SIGN_MASK, _FMT_SIGN_BITS)
|
||||
#define AI_FMT_SET_PBITS(val) _FMT_SET(val, _FMT_PBITS_MASK, _FMT_PBITS_BITS)
|
||||
#define AI_FMT_GET_PBITS(fmt) _FMT_GET(fmt, _FMT_PBITS_MASK, _FMT_PBITS_BITS)
|
||||
#define AI_FMT_SET_TYPE(val) _FMT_SET(val, _FMT_TYPE_MASK, _FMT_TYPE_BITS)
|
||||
#define AI_FMT_GET_TYPE(fmt) _FMT_GET(fmt, _FMT_TYPE_MASK, _FMT_TYPE_BITS)
|
||||
#define AI_FMT_SET_LDIV(val) _FMT_SET(val, _FMT_LDIV_MASK, _FMT_LDIV_BITS)
|
||||
#define AI_FMT_GET_LDIV(fmt) _FMT_GET(fmt, _FMT_LDIV_MASK, _FMT_LDIV_BITS)
|
||||
|
||||
#define AI_FMT_SET_BITS(val) \
|
||||
_FMT_SET((val) + _FMT_BITS_BIAS, _FMT_BITS_MASK, _FMT_BITS_BITS)
|
||||
#define AI_FMT_GET_BITS(fmt) \
|
||||
((ai_i8)_FMT_GET(fmt, _FMT_BITS_MASK, _FMT_BITS_BITS) - _FMT_BITS_BIAS)
|
||||
#define AI_FMT_SET_FBITS(val) \
|
||||
_FMT_SET((val) + _FMT_FBITS_BIAS, _FMT_FBITS_MASK, _FMT_FBITS_BITS)
|
||||
#define AI_FMT_GET_FBITS(fmt) \
|
||||
((ai_i8)_FMT_GET(fmt, _FMT_FBITS_MASK, _FMT_FBITS_BITS) - _FMT_FBITS_BIAS)
|
||||
|
||||
/*!
|
||||
* The total number of bits for a given format is supposed to be the sum of the
|
||||
* bits + padding bits. This means that the number of integer bits is derived
|
||||
* as follow: int_bits = bits - fbits (fractional bits) - 1 (for the sign)
|
||||
*/
|
||||
#define AI_FMT_GET_BITS_SIZE(fmt_) \
|
||||
(AI_FMT_GET_BITS(fmt_)+AI_FMT_GET_PBITS(fmt_))
|
||||
|
||||
/*! Macro used to compute the integer bits for a format */
|
||||
#define AI_FMT_GET_IBITS(fmt_) \
|
||||
((ai_i16)AI_FMT_GET_BITS(fmt_)-AI_FMT_GET_FBITS(fmt_)-AI_FMT_GET_SIGN(fmt_))
|
||||
|
||||
/*! ai_buffer format handlers section *****************************************/
|
||||
|
||||
#define AI_BUFFER_FMT_MASK_Q(fmt_) \
|
||||
( AI_BUFFER_FMT_OBJ(fmt_) & 0xFFFFC000 )
|
||||
|
||||
#define AI_BUFFER_FMT_GET_Q(fmt_) \
|
||||
( AI_BUFFER_FMT_MASK_Q(fmt_) | AI_BUFFER_FMT_SET_FBITS(0) | \
|
||||
AI_BUFFER_FMT_SET_FBITS(0) )
|
||||
|
||||
#define AI_BUFFER_FMT_SET_Q(bits_, fbits_) \
|
||||
AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 1, 0, bits_, fbits_)
|
||||
|
||||
#define AI_BUFFER_FMT_IS_Q(fmt_) \
|
||||
( (AI_BUFFER_FMT_TYPE_Q==AI_BUFFER_FMT_GET_TYPE(fmt_)) && \
|
||||
(1==AI_BUFFER_FMT_GET_SIGN(fmt_)) )
|
||||
|
||||
#define AI_BUFFER_FMT_SET_UQ(bits_, fbits_) \
|
||||
AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 0, 0, bits_, fbits_)
|
||||
|
||||
#define AI_BUFFER_FMT_IS_UQ(fmt_) \
|
||||
( (AI_BUFFER_FMT_TYPE_Q==AI_BUFFER_FMT_GET_TYPE(fmt_)) && \
|
||||
(0==AI_BUFFER_FMT_GET_SIGN(fmt_)) )
|
||||
|
||||
/*! Q ai_array format handlers ************************************************/
|
||||
#define AI_ARRAY_FMT_SET_Q(bits_, fbits_) \
|
||||
( AI_FMT_MASK_Q(AI_ARRAY_FORMAT_Q) | AI_FMT_SET_BITS(bits_) | AI_FMT_SET_FBITS(fbits_) )
|
||||
|
||||
#define AI_ARRAY_FMT_IS_Q(fmt_) \
|
||||
( AI_FMT_GET(AI_FMT_MASK_Q(AI_ARRAY_FORMAT_Q))==AI_FMT_GET(AI_FMT_MASK_Q(fmt_)) )
|
||||
|
||||
#define AI_ARRAY_FMT_SET_UQ(bits_, fbits_) \
|
||||
( AI_FMT_MASK_Q(AI_ARRAY_FORMAT_UQ) | AI_FMT_SET_BITS(bits_) | AI_FMT_SET_FBITS(fbits_) )
|
||||
|
||||
#define AI_ARRAY_FMT_IS_UQ(fmt_) \
|
||||
( AI_FMT_GET(AI_FMT_MASK_Q(AI_ARRAY_FORMAT_UQ))==AI_FMT_GET(AI_FMT_MASK_Q(fmt_)) )
|
||||
|
||||
AI_DEPRECATED
|
||||
/* Alias for AI_ARRAY_FMT_SET_Q */
|
||||
#define AI_ARRAY_FMT_SET_SQ(bits_, fbits_) \
|
||||
AI_ARRAY_FMT_SET_Q(bits_, fbits_)
|
||||
|
||||
AI_DEPRECATED
|
||||
/* Alias for AI_ARRAY_FMT_IS_Q */
|
||||
#define AI_ARRAY_FMT_IS_SQ(fmt_) \
|
||||
AI_ARRAY_FMT_IS_Q(fmt_)
|
||||
|
||||
/*! ai_array section **********************************************************/
|
||||
#define AI_ARRAY_FMT_ENTRY(name_) \
|
||||
AI_CONCAT(AI_ARRAY_FORMAT_, name_)
|
||||
|
||||
#define AI_ARRAY_FMT_NAME(fmt_) \
|
||||
ai_array_fmt_name(fmt_)
|
||||
|
||||
#define AI_ARRAY_FMT_VALID(fmt_) \
|
||||
ai_array_fmt_valid(fmt_)
|
||||
|
||||
#define AI_ARRAY_FMT_EXPORTED(fmt_) \
|
||||
ai_array_fmt_exported(fmt_)
|
||||
|
||||
#define AI_ARRAY_FMT_GET_FORMATS(formats_) \
|
||||
ai_array_fmt_get_formats(formats_)
|
||||
|
||||
#define AI_ARRAY_TO_BUFFER_FMT(fmt_) \
|
||||
ai_array_to_buffer_fmt(fmt_)
|
||||
|
||||
#define AI_ARRAY_GET_BYTE_SIZE(fmt_, count_) \
|
||||
ai_array_get_byte_size(fmt_, count_)
|
||||
|
||||
#define AI_ARRAY_GET_DATA_BYTE_SIZE(fmt_, count_) \
|
||||
ai_array_get_data_byte_size(fmt_, count_)
|
||||
|
||||
#define AI_ARRAY_GET_ELEMS_FROM_SIZE(fmt_, size_) \
|
||||
ai_array_get_elems_from_size(fmt_, size_)
|
||||
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @typedef ai_array_format
|
||||
* @ingroup ai_datatypes_format
|
||||
* @brief Generic Data Format Specifier for @ref ai_array (32bits packed info)
|
||||
*/
|
||||
typedef int32_t ai_array_format;
|
||||
|
||||
/*!
|
||||
* @enum internal data format enums
|
||||
* @ingroup ai_datatypes_format
|
||||
* @brief Generic Data Format Specifier (32bits packed info)
|
||||
*/
|
||||
typedef enum {
|
||||
#define FMT_ENTRY(exp_, name_, type_id_, sign_bit_, float_bit_, \
|
||||
pbits_, bits_, fbits_, ldiv_bits_) \
|
||||
AI_ARRAY_FMT_ENTRY(name_) = (AI_FMT_SET_FLOAT(float_bit_) | \
|
||||
AI_FMT_SET_SIGN(sign_bit_) | \
|
||||
AI_FMT_SET_BITS(bits_) | \
|
||||
AI_FMT_SET_FBITS(fbits_) | \
|
||||
AI_FMT_SET_PBITS(pbits_) | \
|
||||
AI_FMT_SET_TYPE(type_id_) | \
|
||||
AI_FMT_SET_LDIV(ldiv_bits_)),
|
||||
#include "formats_list.h"
|
||||
} ai_array_format_entry;
|
||||
|
||||
/*!
|
||||
* @brief Get a human readable string from the format ID value
|
||||
* @ingroup ai_datatypes_format
|
||||
* @param[in] type the @ref ai_array_format to print out
|
||||
* @return a string with a human readable name of the format
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
const char* ai_array_fmt_name(const ai_array_format type);
|
||||
|
||||
/*!
|
||||
* @brief Check if @ref ai_array_format is a exportable to an @ref ai_buffer_format
|
||||
* @ingroup ai_datatypes_format
|
||||
* @param[in] type the ai_array_format to check
|
||||
* @return true if the format is exported, false otherwise
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
ai_bool ai_array_fmt_exported(const ai_array_format type);
|
||||
|
||||
/*!
|
||||
* @brief Check if @ref ai_array_format is a valid format present in the list of
|
||||
* supported formats
|
||||
* @ingroup ai_datatypes_format
|
||||
* @param[in] type the ai_array_format to check
|
||||
* @return true if the format is valid, false otherwise
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
ai_bool ai_array_fmt_valid(const ai_array_format type);
|
||||
|
||||
/*!
|
||||
* @brief Get the complete list of supported @ref ai_array_format formats
|
||||
* @ingroup ai_datatypes_format
|
||||
* @param[out] formats a pointer to an array withj all supported formats listed
|
||||
* @return the number of supported formats
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
ai_size ai_array_fmt_get_formats(const ai_array_format** formats);
|
||||
|
||||
/*! ai_buffer section *********************************************************
|
||||
* Only 25 LSB bits are used for storing actual format bits. 7 bits are reserved
|
||||
* for format atrtributes, see @ref AI_FMT_FLAG_CONST flag
|
||||
*/
|
||||
|
||||
#define AI_BUFFER_FMT_ENTRY(name_) \
|
||||
AI_CONCAT(AI_BUFFER_FORMAT_, name_)
|
||||
|
||||
#define AI_BUFFER_FMT_NAME(type_) \
|
||||
ai_buffer_fmt_name(type_)
|
||||
|
||||
#define AI_BUFFER_FMT_VALID(type_) \
|
||||
ai_buffer_fmt_valid(type_)
|
||||
|
||||
#define AI_BUFFER_FMT_GET_FORMATS(formats_) \
|
||||
ai_buffer_fmt_get_formats(formats_)
|
||||
|
||||
#define AI_BUFFER_TO_ARRAY_FMT(fmt_) \
|
||||
ai_buffer_to_array_fmt(fmt_)
|
||||
|
||||
#define AI_BUFFER_GET_BITS_SIZE(fmt) \
|
||||
AI_ARRAY_GET_BITS_SIZE(AI_BUFFER_TO_ARRAY_FMT(fmt))
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Get a human readable string from the format ID value
|
||||
* @ingroup ai_datatypes_format
|
||||
* @param[in] type the @ref ai_buffer_format to print out
|
||||
* @return a string with a human readable name of the format
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
const char* ai_buffer_fmt_name(
|
||||
const ai_buffer_format type);
|
||||
|
||||
/*!
|
||||
* @brief Check if @ref ai_buffer_format is a valid format present in the list
|
||||
* of supported formats
|
||||
* @ingroup ai_datatypes_format
|
||||
* @param[in] type the @ref ai_buffer_format to check
|
||||
* @return true if the format is valid, false otherwise
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
ai_bool ai_buffer_fmt_valid(
|
||||
const ai_buffer_format type);
|
||||
|
||||
/*!
|
||||
* @brief Get the complete list of supported @ref ai_buffer_format formats
|
||||
* @ingroup ai_datatypes_format
|
||||
* @param[out] formats a pointer to an array with all supported formats listed
|
||||
* @return the number of supported formats
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
ai_size ai_buffer_fmt_get_formats(
|
||||
const ai_buffer_format** formats);
|
||||
|
||||
/*! Conversions section *******************************************************/
|
||||
/*!
|
||||
* @brief Convert from ai_array_format to ai_buffer_format.
|
||||
* @ingroup ai_datatypes_format
|
||||
* @param fmt the input ai_array_format to convert
|
||||
* @return the converted format as a ai_buffer_format
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
ai_buffer_format ai_array_to_buffer_fmt(
|
||||
const ai_array_format fmt);
|
||||
|
||||
/*!
|
||||
* @brief Convert from ai_buffer_format to ai_array_format.
|
||||
* @ingroup ai_datatypes_format
|
||||
* @param fmt the input ai_buffer_format to convert
|
||||
* @return the converted format as a ai_array_format
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
ai_array_format ai_buffer_to_array_fmt(
|
||||
const ai_buffer_format fmt);
|
||||
|
||||
/** helpers section ***********************************************************/
|
||||
/*!
|
||||
* @brief Computes the size in bytes given an ai_array_format and number of
|
||||
* array elements.
|
||||
* @details This routine computes from the number of elements of the array its
|
||||
* size in bytes. If the array is referred by a tensor structure, it is the task
|
||||
* of the latter to handle per-dimension padding (e.g. to align odd rows in a
|
||||
* 4-bit matrix. At array level the padding elements MUST be included in the
|
||||
* number of elements.
|
||||
* @ingroup ai_datatypes_format
|
||||
* @param[in] fmt the input array format as an ai_array_format
|
||||
* @param[in] count the number of elements stored in the data array
|
||||
* @return the size in bytes of the array given the specific format and number
|
||||
* of elements (including padding elements)
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
ai_size ai_array_get_byte_size(
|
||||
const ai_array_format fmt, const ai_size count);
|
||||
|
||||
/*!
|
||||
* @brief Computes the size in bytes given an ai_array_format and number of
|
||||
* array elements of the data fields (e.g. LUT table size excluded).
|
||||
* @details This routine computes from the number of elements of the array its
|
||||
* size in bytes. If the array is referred by a tensor structure, it is the task
|
||||
* of the latter to handle per-dimension padding (e.g. to align odd rows in a
|
||||
* 4-bit matrix. At array level the padding elements MUST be included in the
|
||||
* number of elements.
|
||||
* @ingroup ai_datatypes_format
|
||||
* @param[in] fmt the input array format as an ai_array_format
|
||||
* @param[in] count the number of elements stored in the data array
|
||||
* @return the size in bytes of the array given the specific format and number
|
||||
* of elements (including padding elements)
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
ai_size ai_array_get_data_byte_size(
|
||||
const ai_array_format fmt, const ai_size count);
|
||||
|
||||
/*!
|
||||
* @brief Computes the number of elements from ai_array_format and
|
||||
* the size in byte of the array.
|
||||
* @ingroup ai_datatypes_format
|
||||
* @param fmt the input array format as an ai_array_format
|
||||
* @param size the size in bytes of the array
|
||||
* @return the number of elements that could be stored given the format
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
ai_size ai_array_get_elems_from_size(
|
||||
const ai_array_format fmt, const ai_size byte_size);
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*__AI_DATATYPES_FORMAT_H__*/
|
||||
@ -1,379 +0,0 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file ai_datatypes_internal.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @date 01-May-2017
|
||||
* @brief Definitions of AI platform private APIs types
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* <h2><center>© Copyright (c) 2017 STMicroelectronics.
|
||||
* All rights reserved.</center></h2>
|
||||
*
|
||||
* This software component is licensed by ST under Ultimate Liberty license
|
||||
* SLA0044, the "License"; You may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at:
|
||||
* www.st.com/SLA0044
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef __AI_DATATYPES_INTERNAL_H__
|
||||
#define __AI_DATATYPES_INTERNAL_H__
|
||||
#pragma once
|
||||
|
||||
#include <string.h>
|
||||
#include "ai_platform.h"
|
||||
#include "ai_platform_interface.h"
|
||||
|
||||
|
||||
/*!
|
||||
* @defgroup datatypes_internal Internal Datatypes
|
||||
* @brief Data structures used internally to implement neural networks
|
||||
*
|
||||
* The layers are defined as structs; a generic layer type defines the basic
|
||||
* layer parameters and type-specific parameters are handled by specializations
|
||||
* implemented as a C union. The layers keep also a pointer to the parent
|
||||
* network and the next layer in the network.
|
||||
* The input, output and parameters are tensor with an hard-coded maximum
|
||||
* dimension of 4. Tensors are floating point arrays with a notion of size.
|
||||
* The network is a linked list of layers, and thus it stores only the pointer
|
||||
* to the first layer.
|
||||
*/
|
||||
|
||||
/*!
|
||||
* @section Offsets
|
||||
* @ingroup datatypes_internal
|
||||
* Macros to handle (byte) stride addressing on tensors. The `AI_PTR` macro
|
||||
* is used to always cast a pointer to byte array. The macros `AI_OFFSET_X` are
|
||||
* used to compute (byte) offsets of respectively adjacents row elements, col
|
||||
* elements, channel elements and `channel_in` elements.
|
||||
* @{
|
||||
*/
|
||||
|
||||
/** Count Variable Number of Arguments (up to 64 elements) ********************/
|
||||
#define AI_NUMARGS(...) \
|
||||
PP_NARG_(__VA_ARGS__,PP_RSEQ_N())
|
||||
#define PP_NARG_(...) \
|
||||
PP_ARG_N(__VA_ARGS__)
|
||||
#define PP_ARG_N( \
|
||||
_1, _2, _3, _4, _5, _6, _7, _8, _9,_10, \
|
||||
_11,_12,_13,_14,_15,_16,_17,_18,_19,_20, \
|
||||
_21,_22,_23,_24,_25,_26,_27,_28,_29,_30, \
|
||||
_31,_32,_33,_34,_35,_36,_37,_38,_39,_40, \
|
||||
_41,_42,_43,_44,_45,_46,_47,_48,_49,_50, \
|
||||
_51,_52,_53,_54,_55,_56,_57,_58,_59,_60, \
|
||||
_61,_62,_63,N,...) N
|
||||
#define PP_RSEQ_N() \
|
||||
63,62,61,60, \
|
||||
59,58,57,56,55,54,53,52,51,50, \
|
||||
49,48,47,46,45,44,43,42,41,40, \
|
||||
39,38,37,36,35,34,33,32,31,30, \
|
||||
29,28,27,26,25,24,23,22,21,20, \
|
||||
19,18,17,16,15,14,13,12,11,10, \
|
||||
9,8,7,6,5,4,3,2,1,0
|
||||
|
||||
#define AI_PTR_ALIGN(ptr, alignment) \
|
||||
( (((ai_uptr)(ptr))+((ai_uptr)(alignment)-1))&(~((ai_uptr)(alignment)-1)) )
|
||||
|
||||
|
||||
/*! AI_STORAGE_KLASS SECTION ************************************/
|
||||
#define AI_STORAGE_KLASS_TYPE(s_) \
|
||||
( (s_)->type )
|
||||
|
||||
#define AI_STORAGE_KLASS_SIZE(s_) \
|
||||
( (s_)->size )
|
||||
|
||||
#define AI_STORAGE_KLASS_DATA(s_, type_) \
|
||||
( (type_*)((s_)->data) )
|
||||
|
||||
#define AI_STORAGE_KLASS_COPY(dst_, dst_type_, src_, src_type_) \
|
||||
{ \
|
||||
AI_ASSERT(AI_STORAGE_KLASS_SIZE(src_)>=AI_STORAGE_KLASS_SIZE(dst_)) \
|
||||
AI_STORAGE_KLASS_SIZE(dst_) = AI_STORAGE_KLASS_SIZE(src_); \
|
||||
for (ai_size i=0; i<AI_STORAGE_KLASS_SIZE(dst_); i++ ) { \
|
||||
AI_STORAGE_KLASS_DATA(dst_, dst_type_)[i] = \
|
||||
AI_STORAGE_KLASS_DATA(src_, src_type_)[i]; \
|
||||
} \
|
||||
}
|
||||
|
||||
#define AI_STORAGE_KLASS_DUMP(s_, pfx_, post_, fmt_, type_) \
|
||||
{ \
|
||||
AI_ASSERT(s_) \
|
||||
printf(pfx_, AI_STORAGE_KLASS_SIZE(s_)); \
|
||||
for ( ai_u32 i=0; i<AI_STORAGE_KLASS_SIZE(s_); i++ ) { \
|
||||
if ( (i % 8)==0 ) printf("\n "); \
|
||||
printf(fmt_, AI_STORAGE_KLASS_DATA(s_, type_)[i]); \
|
||||
} \
|
||||
printf(post_); \
|
||||
}
|
||||
|
||||
/*! AI_SHAPES SECTION ************************************/
|
||||
#define AI_SHAPE_2D_H(shape_) \
|
||||
AI_SHAPE_ELEM(shape_, AI_SHAPE_2D_HEIGHT)
|
||||
|
||||
#define AI_SHAPE_2D_W(shape_) \
|
||||
AI_SHAPE_ELEM(shape_, AI_SHAPE_2D_WIDTH)
|
||||
|
||||
#define AI_SHAPE_ELEM(shape_, pos_) \
|
||||
AI_STORAGE_KLASS_DATA(shape_, ai_shape_dimension)[pos_]
|
||||
|
||||
#define AI_SHAPE_SIZE(shape_) \
|
||||
AI_STORAGE_KLASS_SIZE(shape_)
|
||||
|
||||
#define AI_SHAPE_CLONE(dst_, src_) \
|
||||
AI_STORAGE_KLASS_COPY(dst_, ai_shape_dimension, src_, ai_shape_dimension)
|
||||
|
||||
//#define AI_SHAPE_BATCH(shape_) AI_SHAPE_ELEM((shape_), AI_SHAPE_BATCH_CHANNEL)
|
||||
#define AI_SHAPE_H(shape_) AI_SHAPE_ELEM((shape_), AI_SHAPE_HEIGHT)
|
||||
#define AI_SHAPE_W(shape_) AI_SHAPE_ELEM((shape_), AI_SHAPE_WIDTH)
|
||||
#define AI_SHAPE_CH(shape_) AI_SHAPE_ELEM((shape_), AI_SHAPE_CHANNEL)
|
||||
#define AI_SHAPE_IN_CH(shape_) AI_SHAPE_ELEM((shape_), AI_SHAPE_IN_CHANNEL)
|
||||
|
||||
#define AI_CONV_SHAPE_H AI_SHAPE_W
|
||||
#define AI_CONV_SHAPE_W AI_SHAPE_CH
|
||||
#define AI_CONV_SHAPE_CH AI_SHAPE_H
|
||||
#define AI_CONV_SHAPE_IN_CH AI_SHAPE_IN_CH
|
||||
|
||||
/*! AI_STRIDES SECTION ***********************************/
|
||||
#define AI_STRIDE_2D_H(stride_) \
|
||||
AI_STRIDE_ELEM((stride_), AI_SHAPE_2D_HEIGHT)
|
||||
|
||||
|
||||
#define AI_STRIDE_2D_W(stride_) \
|
||||
AI_STRIDE_ELEM((stride_), AI_SHAPE_2D_WIDTH)
|
||||
|
||||
#define AI_STRIDE_ELEM(stride_, pos_) \
|
||||
AI_STORAGE_KLASS_DATA(stride_, ai_stride_dimension)[pos_]
|
||||
|
||||
#define AI_STRIDE_SIZE(stride_) \
|
||||
AI_STORAGE_KLASS_SIZE(stride_)
|
||||
|
||||
#define AI_STRIDE_CLONE(dst_, src_) \
|
||||
AI_STORAGE_KLASS_COPY(dst_, ai_stride_dimension, src_, ai_stride_dimension)
|
||||
|
||||
//#define AI_STRIDE_BATCH(stride) AI_STRIDE_ELEM((stride), AI_SHAPE_BATCH_CHANNEL)
|
||||
#define AI_STRIDE_H(stride) AI_STRIDE_ELEM((stride), AI_SHAPE_HEIGHT)
|
||||
#define AI_STRIDE_W(stride) AI_STRIDE_ELEM((stride), AI_SHAPE_WIDTH)
|
||||
#define AI_STRIDE_CH(stride) AI_STRIDE_ELEM((stride), AI_SHAPE_CHANNEL)
|
||||
#define AI_STRIDE_IN_CH(stride) AI_STRIDE_ELEM((stride), AI_SHAPE_IN_CHANNEL)
|
||||
|
||||
/*! AI_TENSORS SECTION ***********************************/
|
||||
#define AI_TENSOR_KLASS(tensor_) \
|
||||
( (tensor_) ? (tensor_)->klass : NULL )
|
||||
|
||||
#define AI_TENSOR_SHAPE(tensor_) \
|
||||
( &((tensor_)->shape) )
|
||||
|
||||
#define AI_TENSOR_STRIDE(tensor_) \
|
||||
( &((tensor_)->stride) )
|
||||
|
||||
#define AI_TENSOR_INFO(tensor_) \
|
||||
( &((tensor_)->info) )
|
||||
|
||||
#define AI_TENSOR_DATA(tensor_) \
|
||||
( (tensor_) ? (tensor_)->data : NULL )
|
||||
|
||||
#define AI_TENSOR_ID(tensor_) \
|
||||
( (tensor_) ? AI_TENSOR_INFO(tensor_)->id : 0 )
|
||||
|
||||
#define AI_TENSOR_FLAGS(tensor_) \
|
||||
( (tensor_) ? AI_TENSOR_INFO(tensor_)->flags : 0 )
|
||||
|
||||
|
||||
#define AI_TENSOR_DATA_SIZE(tensor_) \
|
||||
( (tensor_) ? AI_TENSOR_INFO(tensor_)->data_size : 0 )
|
||||
|
||||
/*! AI_OFFSETS SECTION ***********************************/
|
||||
//#define AI_OFFSET_BATCH(b, stride) ((ai_ptr_offset)(b) * AI_STRIDE_BATCH(stride))
|
||||
#define AI_OFFSET_H(y, stride) ((ai_ptr_offset)(y) * AI_STRIDE_H(stride))
|
||||
#define AI_OFFSET_W(x, stride) ((ai_ptr_offset)(x) * AI_STRIDE_W(stride))
|
||||
#define AI_OFFSET_CH(ch, stride) ((ai_ptr_offset)(ch) * AI_STRIDE_CH(stride))
|
||||
#define AI_OFFSET_IN_CH(ch, stride) ((ai_ptr_offset)(ch) * \
|
||||
AI_STRIDE_IN_CH(stride))
|
||||
#define AI_OFFSET(y, x, ch, in_ch, stride) ( \
|
||||
AI_OFFSET_H((y), (stride)) + AI_OFFSET_W((x), (stride)) + \
|
||||
AI_OFFSET_CH((ch), (stride)) + AI_OFFSET_IN_CH((in_ch), (stride)) )
|
||||
|
||||
/*! @} */
|
||||
|
||||
#define AI_GET_CONV_OUT_SIZE(in_size, filt_size, pad_l, pad_r, filt_stride) \
|
||||
((((in_size) - (filt_size) + (pad_l) + (pad_r)) / (filt_stride)) + 1)
|
||||
|
||||
|
||||
/** Tensors datatypes defines handlers ****************************************/
|
||||
#define AI_TENSOR_SIZE(tensor_) \
|
||||
( AI_SHAPE_H(AI_TENSOR_SHAPE(tensor_)) * AI_SHAPE_W(AI_TENSOR_SHAPE(tensor_)) * \
|
||||
AI_SHAPE_CH(AI_TENSOR_SHAPE(tensor_)) * AI_SHAPE_IN_CH(AI_TENSOR_SHAPE(tensor_)) )
|
||||
|
||||
#define AI_TENSOR_BYTE_SIZE(tensor_) \
|
||||
( AI_SHAPE_H(AI_TENSOR_SHAPE(tensor_)) * AI_STRIDE_H(AI_TENSOR_STRIDE(tensor_)) )
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
/** Integer tensor info extraction ********************************************/
|
||||
#define AI_INTQ_INFO_LIST_SCALE_ARRAY(list_, type_) \
|
||||
( ((list_) && (list_)->info) \
|
||||
? ((type_*)((list_)->info->scale)) : NULL )
|
||||
|
||||
#define AI_INTQ_INFO_LIST_ZEROPOINT_ARRAY(list_, type_) \
|
||||
( ((list_) && (list_)->info) \
|
||||
? ((type_*)((list_)->info->zeropoint)) : NULL )
|
||||
|
||||
#define AI_KLASS_GET_INTQ_INFO_LIST(tensor_) \
|
||||
((ai_intq_info_list*)((tensor_)->klass))
|
||||
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @typedef ai_offset
|
||||
* @ingroup ai_datatypes_internal
|
||||
* @brief Generic index offset type
|
||||
*/
|
||||
typedef int32_t ai_offset;
|
||||
|
||||
|
||||
/*!
|
||||
* @typedef ai_vec4_float
|
||||
* @ingroup ai_datatypes_internal
|
||||
* @brief 32bit X 4 float (optimization for embedded MCU)
|
||||
*/
|
||||
typedef struct _ai_vec4_float {
|
||||
ai_float a1;
|
||||
ai_float a2;
|
||||
ai_float a3;
|
||||
ai_float a4;
|
||||
} ai_vec4_float;
|
||||
|
||||
|
||||
#define AI_VEC4_FLOAT(ptr_) \
|
||||
_get_vec4_float((ai_handle)(ptr_))
|
||||
|
||||
AI_DECLARE_STATIC
|
||||
ai_vec4_float _get_vec4_float(const ai_handle fptr)
|
||||
{
|
||||
return *((const ai_vec4_float*)fptr);
|
||||
}
|
||||
|
||||
/*!
|
||||
* @typedef (*func_copy_tensor)
|
||||
* @ingroup datatypes_internal
|
||||
* @brief Fuction pointer for generic tensor copy routines
|
||||
* this function pointer abstracts a generic tensor copy routine.
|
||||
*/
|
||||
typedef ai_bool (*func_copy_tensor)(ai_tensor* dst, const ai_tensor* src);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Check whether 2 shapes have identical dimensions.
|
||||
* @ingroup datatypes_internal
|
||||
* @param shape0 the 1st tensor shape to compare
|
||||
* @param shape1 the 2nd tensor shape to compare
|
||||
* @return true if shape0 and shape1 have same dimensions. false otherwise
|
||||
*/
|
||||
AI_DECLARE_STATIC
|
||||
ai_bool ai_shape_is_same(
|
||||
const ai_shape* shape0, const ai_shape* shape1)
|
||||
{
|
||||
AI_ASSERT(shape0 && shape1)
|
||||
AI_ASSERT(AI_SHAPE_SIZE(shape0)==AI_SHAPE_SIZE(shape1))
|
||||
ai_size dim = AI_SHAPE_SIZE(shape0);
|
||||
while ( dim>0 ) {
|
||||
dim--;
|
||||
if ( AI_SHAPE_ELEM(shape0, dim)!=AI_SHAPE_ELEM(shape1, dim) )
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/*!
|
||||
* @brief Check if shape0 is a subshape of shape1
|
||||
* @ingroup datatypes_internal
|
||||
* @param shape0 the 1st tensor shape to compare
|
||||
* @param shape1 the 2nd tensor shape to compare
|
||||
* @return true if shape0 is a subshape of shape1 (all shape0 dimensions are
|
||||
* smallers or equal of the shape1 ones). false otherwise
|
||||
*/
|
||||
AI_DECLARE_STATIC
|
||||
ai_bool ai_shape_is_subshape(
|
||||
const ai_shape* shape0, const ai_shape* shape1)
|
||||
{
|
||||
AI_ASSERT(shape0 && shape1)
|
||||
AI_ASSERT(AI_SHAPE_SIZE(shape0)==AI_SHAPE_SIZE(shape1))
|
||||
ai_size dim = AI_SHAPE_SIZE(shape0);
|
||||
while ( dim ) {
|
||||
dim--;
|
||||
if ( AI_SHAPE_ELEM(shape0, dim)>AI_SHAPE_ELEM(shape1, dim) )
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/*!
|
||||
* @brief Computes the total size of a tensor given its dimensions.
|
||||
* @ingroup datatypes_internal
|
||||
* @param shape the tensor shape
|
||||
*/
|
||||
AI_DECLARE_STATIC
|
||||
ai_size ai_shape_get_size(const ai_shape* shape)
|
||||
{
|
||||
AI_ASSERT(shape)
|
||||
AI_ASSERT(AI_SHAPE_SIZE(shape)==AI_SHAPE_MAX_DIMENSION)
|
||||
ai_size dim = AI_SHAPE_SIZE(shape);
|
||||
ai_size size = 1;
|
||||
while ( dim>0 ) {
|
||||
dim--;
|
||||
size *= AI_SHAPE_ELEM(shape, dim);
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
/*!
|
||||
* @brief Computes the size of the input image discarding the channels.
|
||||
* @ingroup datatypes_internal
|
||||
* @param shape the tensor shape
|
||||
*/
|
||||
AI_DECLARE_STATIC
|
||||
ai_size ai_shape_get_npixels(const ai_shape* shape)
|
||||
{
|
||||
AI_ASSERT(shape)
|
||||
const ai_size npixels = AI_SHAPE_W(shape) * AI_SHAPE_H(shape);
|
||||
return npixels;
|
||||
}
|
||||
|
||||
/*!
|
||||
* @brief Map from ai_buffer data struct to ai_array data struct.
|
||||
* @ingroup datatypes_internal
|
||||
* @param buf a pointer to the ai_buffer to be mapped to ai_array
|
||||
* @return an initialized @ref ai_array struct representing same data
|
||||
*/
|
||||
AI_DECLARE_STATIC
|
||||
ai_array ai_from_buffer_to_array(const ai_buffer* buf)
|
||||
{
|
||||
AI_ASSERT(buf)
|
||||
const ai_u32 size = AI_BUFFER_SIZE(buf) * buf->n_batches;
|
||||
|
||||
AI_ARRAY_OBJ_DECLARE(a, AI_BUFFER_TO_ARRAY_FMT(AI_BUFFER_FMT_OBJ(buf->format)),
|
||||
buf->data, buf->data, size, AI_CONST);
|
||||
return a;
|
||||
}
|
||||
|
||||
/*!
|
||||
* @brief Map from ai_array data struct to ai_buffer data struct.
|
||||
* @ingroup datatypes_internal
|
||||
* @param array a pointer to the ai_array to be mapped to ai_buffer
|
||||
* @return an initialized @ref ai_buffer struct representing same data
|
||||
*/
|
||||
AI_DECLARE_STATIC
|
||||
ai_buffer ai_from_array_to_buffer(const ai_array* array)
|
||||
{
|
||||
AI_ASSERT(array)
|
||||
const ai_buffer b = AI_BUFFER_OBJ_INIT(AI_ARRAY_TO_BUFFER_FMT(array->format), \
|
||||
1, 1, array->size, 1, array->data_start);
|
||||
return b;
|
||||
}
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*__AI_DATATYPES_INTERNAL_H__*/
|
||||
@ -1,140 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2017 rxi
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the MIT license. See `log.c` for details.
|
||||
*/
|
||||
|
||||
#ifndef AI_LOG_H_
|
||||
#define AI_LOG_H_
|
||||
#pragma once
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdint.h>
|
||||
#include <stdarg.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#else
|
||||
#include <stdbool.h>
|
||||
#endif
|
||||
|
||||
/*!
|
||||
* @defgroup log Core logger class definition and implementation
|
||||
* @brief Data structures and defines used to implementlogger module
|
||||
* functionalities
|
||||
*/
|
||||
|
||||
#define LOG_VERSION "0.3.0"
|
||||
#define LOG_CR "\r\n"
|
||||
|
||||
/***** Compilation options: define/undef as required **************************/
|
||||
#define LOG_USE_COLOR
|
||||
/* #define LOG_INFO_SOURCE_CODE */
|
||||
|
||||
#ifndef HAS_STM32
|
||||
#define LOG_USE_FILE
|
||||
#define LOG_INFO_TIME
|
||||
#define LOG_INFO_SOURCE_CODE_STRIP_FILE_PATHS '/'
|
||||
#else
|
||||
#define LOG_INFO_SOURCE_CODE_STRIP_FILE_PATHS '\\'
|
||||
#endif
|
||||
|
||||
/******************************************************************************/
|
||||
#define LOG_SUDO (0x0)
|
||||
#define LOG_FATAL (0x1)
|
||||
#define LOG_ERROR (0x2)
|
||||
#define LOG_WARN (0x3)
|
||||
#define LOG_INFO (0x4)
|
||||
#define LOG_DEBUG (0x5)
|
||||
#define LOG_TRACE (0x6)
|
||||
|
||||
|
||||
/*!
|
||||
* @typedef log_LockFn
|
||||
* @ingroup ai_log
|
||||
* @brief callback function for locking implementation (e.g. mutexes, etc.)
|
||||
*/
|
||||
typedef void (*log_LockFn)(const void *udata, const bool lock);
|
||||
|
||||
/*!
|
||||
* @typedef log_MsgFn
|
||||
* @ingroup ai_log
|
||||
* @brief callback for listening at logged channels
|
||||
*/
|
||||
typedef void (*log_MsgFn)(
|
||||
const void *udata, const uint8_t level,
|
||||
const char* msg, const uint32_t len);
|
||||
|
||||
/*!
|
||||
* @brief Get gloabal log context handle
|
||||
* @ingroup ai_log
|
||||
*/
|
||||
void* ai_log_acquire(void);
|
||||
|
||||
/*!
|
||||
* @brief Set global log level
|
||||
* @ingroup ai_log
|
||||
*/
|
||||
void ai_log_set_level(const uint8_t level);
|
||||
|
||||
/*!
|
||||
* @brief Set global log quiet mode (no messages are emitted)
|
||||
* @ingroup ai_log
|
||||
*/
|
||||
void ai_log_set_quiet(const bool enable);
|
||||
|
||||
/*!
|
||||
* @brief Set callback for log messages locking
|
||||
* @ingroup ai_log
|
||||
*/
|
||||
void ai_log_set_lock(log_LockFn fn, const void *udata);
|
||||
|
||||
/*!
|
||||
* @brief Push on log stack a new listener with given log level
|
||||
* @ingroup ai_log
|
||||
* @param[in] level the log level for this channel
|
||||
* @param[out] the callback function to emit when a message is available
|
||||
* @param[in] udata a pointer to the caller environment that is provided back
|
||||
* when the callback is called
|
||||
* @return 0 if OK, value>0 that indicates the current size of the stack
|
||||
*/
|
||||
uint8_t ai_log_channel_push(const uint8_t level, log_MsgFn fn, const void *udata);
|
||||
|
||||
/*!
|
||||
* @brief Pop from log stack a pushed listener
|
||||
* @ingroup ai_log
|
||||
* @param[in] the callback function registered during @ref log_channel_push
|
||||
* @param[in] udata a pointer to the caller environment registered during @ref
|
||||
* log_channel_push
|
||||
* @return 0 if OK, value>0 that indicates the max size of the callback stack
|
||||
*/
|
||||
uint8_t ai_log_channel_pop(log_MsgFn fn, const void *udata);
|
||||
|
||||
#ifdef LOG_USE_FILE
|
||||
/*!
|
||||
* @brief Enable file dumping of all logged messages to a file as well.
|
||||
* @details NB: the quiet option does not apply to file logging. file log
|
||||
* messages are recorded also when the log is in quiet mode.
|
||||
* @ingroup ai_log
|
||||
* @param[out] fp the file pointer of the file used to log the massages
|
||||
*/
|
||||
void ai_log_set_fp(FILE *fp);
|
||||
#endif
|
||||
|
||||
/*!
|
||||
* @brief Main Routine: PLEASE invoke always by using defined macros
|
||||
* @ingroup ai_log
|
||||
* @param[in] level the log level of the input message
|
||||
* @param[in] file the string containing the __FILE__ info about the source file
|
||||
* generating the message to log
|
||||
* @param[in] fmt the varargs format of the string to print
|
||||
*/
|
||||
void ai_log_log(const uint8_t level, const char *file,
|
||||
const int line, const char *fmt, ...);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /*AI_LOG_H_*/
|
||||
@ -1,301 +0,0 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file ai_math_helpers.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @date 01-May-2017
|
||||
* @brief Math helpers routines header file.
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* <h2><center>© Copyright (c) 2017 STMicroelectronics.
|
||||
* All rights reserved.</center></h2>
|
||||
*
|
||||
* This software component is licensed by ST under Ultimate Liberty license
|
||||
* SLA0044, the "License"; You may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at:
|
||||
* www.st.com/SLA0044
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef __AI_MATH_HELPERS_H_
|
||||
#define __AI_MATH_HELPERS_H_
|
||||
#include <math.h>
|
||||
|
||||
#include "ai_platform.h"
|
||||
#include "ai_platform_interface.h"
|
||||
|
||||
#define STM32_DOT_INLINE_OPTIM
|
||||
|
||||
#define AI_FLOAT_TOLERANCE (6.19209290e-5F) /* Used for small calculation
|
||||
noise issues */
|
||||
#define AI_FLOAT_EPSILON (1.19209290e-7F)
|
||||
#define AI_I8_EPSILON (0.00787401F) /* 1/(2^7 - 1) */
|
||||
#define AI_I16_EPSILON (3.051851e-5F) /* 1/(2^15 - 1) */
|
||||
|
||||
#define AI_FLT_MAX (3.40282346638528859812e+38f)
|
||||
|
||||
#define AI_MIN(x,y) ( ((x)<(y)) ? (x) : (y) )
|
||||
#define AI_MAX(x,y) ( ((x)>(y)) ? (x) : (y) )
|
||||
#define AI_SIGN(x) (((x)>0) ? 1 : -1)
|
||||
#define AI_CLAMP(x, min, max) AI_MIN(AI_MAX(x,min), max)
|
||||
#define AI_ABS(x) fabsf(x)
|
||||
#define AI_ABS_DIFF(x, y) ( ((x)>(y)) ? ((x)-(y)) : ((y)-(x)) )
|
||||
#define AI_NEG(x) ( -1 * (x) )
|
||||
#define AI_RECIPROCAL(x) ( 1.0f / (x) )
|
||||
#define AI_CEIL(x) ceilf(x)
|
||||
#define AI_FLOOR(x) floorf(x)
|
||||
#define AI_FLOOR_DIV(x, y) AI_FLOOR((x)/(y)) /* floor division: x // y */
|
||||
#define AI_FLOOR_MOD(x, y) fmodf(x, y)
|
||||
#define AI_ROUND(x) roundf(x)
|
||||
|
||||
#if defined(STM32_DOT_INLINE_OPTIM)
|
||||
|
||||
AI_DECLARE_STATIC
|
||||
void __ai_math_dot_array(
|
||||
ai_float* out,
|
||||
const ai_float* data0,
|
||||
const ai_float* data1,
|
||||
ai_size data_size)
|
||||
{
|
||||
ai_float sum = 0.0f; /* Temporary result storage */
|
||||
|
||||
/* Run the below code for Cortex-M4 and Cortex-M3 */
|
||||
|
||||
/* First part of the processing with loop unrolling. Compute 4 outputs at a time.
|
||||
** a second loop below computes the remaining 1 to 3 samples. */
|
||||
while (data_size >= 4u)
|
||||
{
|
||||
/* C = A[0]* B[0] + A[1]* B[1] + A[2]* B[2] + .....+ A[blockSize-1]* B[blockSize-1] */
|
||||
/* Calculate dot product and then store the result in a temporary buffer */
|
||||
sum += (*data0++) * (*data1++);
|
||||
sum += (*data0++) * (*data1++);
|
||||
sum += (*data0++) * (*data1++);
|
||||
sum += (*data0++) * (*data1++);
|
||||
|
||||
/* Decrement the loop counter */
|
||||
data_size -= 4u;
|
||||
}
|
||||
|
||||
while (data_size > 0u)
|
||||
{
|
||||
/* C = A[0]* B[0] + A[1]* B[1] + A[2]* B[2] + .....+ A[blockSize-1]* B[blockSize-1] */
|
||||
/* Calculate dot product and then store the result in a temporary buffer. */
|
||||
sum += (*data0++) * (*data1++);
|
||||
|
||||
/* Decrement the loop counter */
|
||||
data_size--;
|
||||
}
|
||||
|
||||
/* Directly accumulate the result back in the destination buffer */
|
||||
*out += sum;
|
||||
}
|
||||
|
||||
#undef AI_MATH_DOT_ARRAY
|
||||
#define AI_MATH_DOT_ARRAY(dst, src0, src1, size) \
|
||||
__ai_math_dot_array(dst, src0, src1, size)
|
||||
|
||||
|
||||
#else
|
||||
|
||||
#define AI_MATH_DOT_ARRAY(dst, src0, src1, size) \
|
||||
ai_math_dot_array(dst, src0, src1, size)
|
||||
|
||||
#endif
|
||||
|
||||
#define AI_MATH_ACOS(x) acosf(x)
|
||||
#define AI_MATH_ACOSH(x) acoshf(x)
|
||||
#define AI_MATH_ASIN(x) asinf(x)
|
||||
#define AI_MATH_ASINH(x) asinhf(x)
|
||||
#define AI_MATH_ATAN(x) atanf(x)
|
||||
#define AI_MATH_ATANH(x) atanhf(x)
|
||||
#define AI_MATH_COS(x) cosf(x)
|
||||
#define AI_MATH_COSH(x) coshf(x)
|
||||
#define AI_MATH_ERF(x) erff(x)
|
||||
#define AI_MATH_EXP(x) expf(x)
|
||||
#define AI_MATH_LOG(x) logf(x)
|
||||
#define AI_MATH_POW(x, e) powf((x), (e))
|
||||
#define AI_MATH_RSQRT(x) (1.0f / AI_MATH_SQRT(x))
|
||||
#define AI_MATH_SIN(x) sinf(x)
|
||||
#define AI_MATH_SINH(x) sinhf(x)
|
||||
#define AI_MATH_SQRT(x) ai_math_sqrt(x)
|
||||
#define AI_MATH_TAN(x) tanf(x)
|
||||
#define AI_MATH_TANH(x) tanhf(x)
|
||||
|
||||
#define AI_MATH_RELU_TEST(x, thr, min, max) \
|
||||
( ((x)<(thr)) ? (min) : (max) )
|
||||
|
||||
#define AI_MATH_RELU_GENERIC(x, thr, alpha, max) \
|
||||
AI_MATH_RELU_TEST(x, max, AI_MATH_RELU_GENERIC_NO_MAX(x, thr, alpha), max)
|
||||
|
||||
#define AI_MATH_RELU_GENERIC_NO_MAX(x, thr, alpha) \
|
||||
AI_MATH_RELU_TEST(x, thr, ((alpha)*((x)-(thr))), x)
|
||||
|
||||
#define AI_MATH_RELU_THRESHOLDED(x, thr) \
|
||||
AI_MATH_RELU_TEST(x, thr, 0, (x))
|
||||
|
||||
#define AI_MATH_LEAKY_RELU(x, neg_slope, pos_slope) \
|
||||
AI_MATH_RELU_TEST(x, 0, (x)*(neg_slope), (x)*(pos_slope))
|
||||
// ( ((x)>0) ? (x)*(pos_slope) : (x)*(neg_slope) )
|
||||
|
||||
#define AI_MATH_PRELU(x, slope) \
|
||||
AI_MATH_RELU_TEST(x, 0, (x)*(slope), (x))
|
||||
// AI_MATH_LEAKY_RELU(x, slope, 1)
|
||||
|
||||
#define AI_MATH_RELU(x) \
|
||||
AI_MATH_RELU_TEST(x, 0, 0, x)
|
||||
// AI_MAX(x, 0)
|
||||
|
||||
#define AI_MATH_ELU(x, alpha) \
|
||||
(AI_MAX(0.0f, (x)) + AI_MIN(0.0f, (alpha) * (AI_MATH_EXP(x)-1.0f)))
|
||||
|
||||
#define AI_MATH_SELU(x, alpha, scale) \
|
||||
((scale)*AI_MATH_ELU(x, alpha))
|
||||
|
||||
#define AI_MATH_SCALED_TANH(x, alpha, beta) \
|
||||
((alpha)*AI_MATH_TANH((beta)*(x)))
|
||||
|
||||
#define AI_MATH_SIGMOID(x) \
|
||||
(1.0f / (1.0f + AI_MATH_EXP(-(x))))
|
||||
|
||||
#define AI_MATH_HARD_SIGMOID(x, alpha, beta) \
|
||||
(AI_MAX(0.0f, AI_MIN(1.0f, (x) * (alpha) + (beta))))
|
||||
|
||||
#define AI_MATH_SOFT_PLUS(x) \
|
||||
AI_MATH_LOG(AI_MATH_EXP(x)+1.0f)
|
||||
|
||||
#define AI_MATH_SOFT_SIGN(x) \
|
||||
((x)/(AI_ABS(x)+1.0f))
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @defgroup math_helpers Math helpers
|
||||
* @brief Common math functions
|
||||
*
|
||||
* Math functions are mapped to the underlying platform through those utility
|
||||
* functions. On x86 and ARM v7 they are mapped to the float math functions in
|
||||
* the C99 standard library; on MCUs they are mapped to the ARM DSP functions.
|
||||
*/
|
||||
|
||||
/*!
|
||||
* @brief platform optimized dot product of float vectors
|
||||
*
|
||||
* Computes the dot product between vectors and adds the result to out.
|
||||
* @ingroup math_helpers
|
||||
* @param out scalar result of the dot product
|
||||
* @param data0 the first float vector
|
||||
* @param data1 the second float vector
|
||||
* @param data_size the size of both vectors
|
||||
*/
|
||||
AI_INTERFACE_ENTRY
|
||||
void ai_math_dot_array(
|
||||
ai_float* out,
|
||||
const ai_float* data0,
|
||||
const ai_float* data1,
|
||||
const ai_size data_size);
|
||||
|
||||
/*!
|
||||
* @brief platform optimized square root on a float value
|
||||
* @ingroup math_helpers
|
||||
* @param x input value
|
||||
* @return square root of the value
|
||||
*/
|
||||
AI_INTERFACE_ENTRY
|
||||
ai_float ai_math_sqrt(const ai_float x);
|
||||
|
||||
/*!
|
||||
* @brief platform optimized exponential on a float value
|
||||
* @ingroup math_helpers
|
||||
* @param x input value
|
||||
* @return exponential of the value
|
||||
*/
|
||||
AI_INTERFACE_ENTRY
|
||||
ai_float ai_math_exp(const ai_float x);
|
||||
|
||||
/*!
|
||||
* @brief platform optimized pow on a float value
|
||||
* @ingroup math_helpers
|
||||
* @param x input value
|
||||
* @param e input value
|
||||
* @return pow of the value ^ e
|
||||
*/
|
||||
AI_INTERFACE_ENTRY
|
||||
ai_float ai_math_pow(const ai_float x, const ai_float e);
|
||||
|
||||
/*!
|
||||
* @brief platform optimized tangent on a float value
|
||||
* @ingroup math_helpers
|
||||
* @param x input value
|
||||
* @return hyperbolic tangent of the value
|
||||
*/
|
||||
AI_INTERFACE_ENTRY
|
||||
ai_float ai_math_tanh(const ai_float x);
|
||||
|
||||
/*!
|
||||
* @brief platform optimized relu on a float value
|
||||
* @ingroup math_helpers
|
||||
* @param x input value
|
||||
* @return relu of the value ( x if x>0 else 0)
|
||||
*/
|
||||
AI_INTERFACE_ENTRY
|
||||
ai_float ai_math_relu(const ai_float x);
|
||||
|
||||
/*!
|
||||
* @brief platform optimized parametric relu on a float value
|
||||
* @ingroup math_helpers
|
||||
* @param x input value
|
||||
* @param slope input value
|
||||
* @return parametric relu of the value
|
||||
*/
|
||||
AI_INTERFACE_ENTRY
|
||||
ai_float ai_math_prelu(const ai_float x, const ai_float slope);
|
||||
|
||||
/*!
|
||||
* @brief platform optimized parametric sigmoid on a float value
|
||||
* @ingroup math_helpers
|
||||
* @param x input value
|
||||
* @return sigmoid of the value
|
||||
*/
|
||||
AI_INTERFACE_ENTRY
|
||||
ai_float ai_math_sigmoid(const ai_float x);
|
||||
|
||||
/*!
|
||||
* @brief platform optimized parametric hard sigmoid on a float value
|
||||
* @ingroup math_helpers
|
||||
* @param x input value
|
||||
* @return hard sigmoid of the value
|
||||
*/
|
||||
AI_INTERFACE_ENTRY
|
||||
ai_float ai_math_hard_sigmoid(const ai_float x);
|
||||
|
||||
/*!
|
||||
* @brief platform optimized parametric sign function on a float value
|
||||
* @ingroup math_helpers
|
||||
* @param x input value
|
||||
* @return sign of the value
|
||||
*/
|
||||
AI_INTERFACE_ENTRY
|
||||
ai_float ai_math_sign(const ai_float x);
|
||||
|
||||
/*!
|
||||
* @brief optimized parametric rectified linear unit on a float value
|
||||
* @ingroup math_helpers
|
||||
* @param x input value
|
||||
* @param slope parameter value
|
||||
* @return x if x is positive and x*slope otherwise
|
||||
*/
|
||||
AI_INTERFACE_ENTRY
|
||||
ai_float ai_fast_prelu(const ai_float x, const ai_float slope);
|
||||
|
||||
AI_INTERFACE_ENTRY ai_float ai_div(const ai_float a, const ai_float b);
|
||||
AI_INTERFACE_ENTRY ai_float ai_floor_div(const ai_float a, const ai_float b);
|
||||
AI_INTERFACE_ENTRY ai_float ai_floor_mod(const ai_float a, const ai_float b);
|
||||
AI_INTERFACE_ENTRY ai_float ai_max(const ai_float a, const ai_float b);
|
||||
AI_INTERFACE_ENTRY ai_float ai_min(const ai_float a, const ai_float b);
|
||||
AI_INTERFACE_ENTRY ai_float ai_mul(const ai_float a, const ai_float b);
|
||||
AI_INTERFACE_ENTRY ai_float ai_sub(const ai_float a, const ai_float b);
|
||||
AI_INTERFACE_ENTRY ai_float ai_sum(const ai_float a, const ai_float b);
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /* __MATH_HELPERS_H_ */
|
||||
@ -1,176 +0,0 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file ai_network_inspector.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @date 6-Aug-2018
|
||||
* @brief header file of the network inspector wrapper plugin
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* <h2><center>© Copyright (c) 2018 STMicroelectronics.
|
||||
* All rights reserved.</center></h2>
|
||||
*
|
||||
* This software component is licensed by ST under Ultimate Liberty license
|
||||
* SLA0044, the "License"; You may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at:
|
||||
* www.st.com/SLA0044
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef __AI_NETWORK_INSPECTOR_H_
|
||||
#define __AI_NETWORK_INSPECTOR_H_
|
||||
#pragma once
|
||||
|
||||
#include "ai_platform.h"
|
||||
#include "ai_platform_interface.h"
|
||||
#include "core_net_inspect_interface.h"
|
||||
|
||||
#define AI_INSPECTOR_API_MAJOR 1
|
||||
#define AI_INSPECTOR_API_MINOR 1
|
||||
#define AI_INSPECTOR_API_MICRO 0
|
||||
|
||||
/*!
|
||||
* @defgroup ai_network_inspector AI Network Inspector Module Tool
|
||||
* @brief header with datatypes and APIs for inspector module
|
||||
*/
|
||||
|
||||
#define AI_INSPECTOR_NETWORK_BIND_FAILED (0x0)
|
||||
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @typedef ai_inspector_entry_id
|
||||
* @ingroup ai_network_inspector
|
||||
* @brief network inspector bind network index id number
|
||||
* (actually the inspector could bind up to 65534 networks)
|
||||
*/
|
||||
typedef ai_u16 ai_inspector_entry_id;
|
||||
|
||||
/*!
|
||||
* @typedef ai_inspector_net_info
|
||||
* @ingroup ai_network_inspector
|
||||
* @brief wrapper struct for @ref ai_network_report
|
||||
*/
|
||||
typedef ai_network_report ai_inspector_net_info;
|
||||
|
||||
/*!
|
||||
* @typedef ai_inspector_config
|
||||
* @ingroup ai_network_inspector
|
||||
* @brief wrapper struct for @ref ai_inspect_config
|
||||
*/
|
||||
typedef ai_inspect_config ai_inspector_config;
|
||||
|
||||
/*!
|
||||
* @typedef ai_inspector_node_info
|
||||
* @ingroup ai_network_inspector
|
||||
* @brief wrapper struct for @ref ai_inspect_node_info
|
||||
*/
|
||||
typedef ai_inspect_node_info ai_inspector_node_info;
|
||||
|
||||
/*!
|
||||
* @typedef ai_inspector_net_report
|
||||
* @ingroup ai_network_inspector
|
||||
* @brief wrapper struct for @ref ai_inspect_net_report
|
||||
*/
|
||||
typedef ai_inspect_net_report ai_inspector_net_report;
|
||||
|
||||
/*!
|
||||
* @struct ai_inspector_net_entry
|
||||
* @ingroup ai_network_inspector
|
||||
* @brief struct with info related to the bound network. It has the network
|
||||
* handle, the network params, and the error (see @ref ai_error definition)
|
||||
*/
|
||||
typedef struct ai_inspector_net_entry_ {
|
||||
ai_handle handle; /*!< bound network context handle */
|
||||
ai_network_params params; /*!< bound network context params */
|
||||
ai_error error; /*!< bound network context error */
|
||||
} ai_inspector_net_entry;
|
||||
|
||||
/*!
|
||||
* @brief Return default context config.
|
||||
* @ingroup ai_network_inspector
|
||||
* @param[out] inspector config datastructure
|
||||
* @return the default inspector configuration
|
||||
*/
|
||||
AI_API_ENTRY
|
||||
ai_inspector_config ai_inspector_default_config(void);
|
||||
|
||||
/*!
|
||||
* @brief Create a network inspector plugin module.
|
||||
* @ingroup ai_network_inspector
|
||||
* @param[out] handle a pointer to an opaque handle that points to the inspector
|
||||
* context created
|
||||
* @param[in] cfg a pointer to the inspector config. if NULL a default config is
|
||||
* used by the inspector instance
|
||||
* @return true if initialization was fine, false otherwise
|
||||
*/
|
||||
AI_API_ENTRY
|
||||
ai_bool ai_inspector_create(
|
||||
ai_handle* handle, const ai_inspector_config* cfg);
|
||||
|
||||
/*!
|
||||
* @brief Destroy a network inspector plugin module.
|
||||
* @ingroup ai_network_inspector
|
||||
* @param[in/out] handle an opaque handle to the inspector context to destroy
|
||||
* context
|
||||
* @return true if destroy was fine, false otherwise
|
||||
*/
|
||||
AI_API_ENTRY
|
||||
ai_bool ai_inspector_destroy(ai_handle handle);
|
||||
|
||||
/*!
|
||||
* @brief Bind a network instance with the inspector plugin
|
||||
* @ingroup ai_network_inspector
|
||||
* @param[in/out] handle an opaque handle to the inspector context
|
||||
* @param[in] entry a pointer to the info about the network to be bound
|
||||
* @return a network id > 0 that is an index used to refer to the bound
|
||||
* network instance. if the returned index is 0 an error occurred during binding
|
||||
*/
|
||||
AI_API_ENTRY
|
||||
ai_inspector_entry_id ai_inspector_bind_network(
|
||||
ai_handle handle, const ai_inspector_net_entry* entry);
|
||||
|
||||
/*!
|
||||
* @brief Unbind a network instance from the inspector plugin
|
||||
* @ingroup ai_network_inspector
|
||||
* @param[in/out] handle an opaque handle to the inspector context
|
||||
* @param[in] net_id: a network id provided by @ref ai_inspector_bind_network API
|
||||
* @return true if the unbind was successful, false otherwise
|
||||
*/
|
||||
AI_API_ENTRY
|
||||
ai_bool ai_inspector_unbind_network(
|
||||
ai_handle handle, const ai_inspector_entry_id net_id);
|
||||
|
||||
/*!
|
||||
* @brief Get inspection report on a bind network
|
||||
* @ingroup ai_network_inspector
|
||||
* @param[in/out] handle an opaque handle to the inspector context
|
||||
* @param[in] net_id: a network id provided by @ref ai_inspector_bind_network API
|
||||
* @param[out] report a pointer to the required report @ref ai_inspector_net_report
|
||||
* data struct
|
||||
* @return true if the query was successful, false otherwise
|
||||
*/
|
||||
AI_API_ENTRY
|
||||
ai_bool ai_inspector_get_report(
|
||||
ai_handle handle, const ai_inspector_entry_id net_id,
|
||||
ai_inspector_net_report* report);
|
||||
|
||||
/*!
|
||||
* @brief Run a network instance bind to the inspector
|
||||
* @ingroup ai_network_inspector
|
||||
* @param[in/out] handle an opaque handle to the inspector context
|
||||
* @param[in] net_id: a network id provided by @ref ai_inspector_bind_network API
|
||||
* @param[in] input a pointer to the input data buffer
|
||||
* @param[out] outbut a pointer to the output data buffer
|
||||
* @return the number of batches processed. a value <=0 indicates an error
|
||||
*/
|
||||
AI_API_ENTRY
|
||||
ai_i32 ai_inspector_run(
|
||||
ai_handle handle, const ai_inspector_entry_id net_id,
|
||||
const ai_buffer* input, ai_buffer* output);
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /* __AI_NETWORK_INSPECTOR_H_ */
|
||||
@ -1,508 +0,0 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file ai_platform.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @date 01-May-2017
|
||||
* @brief Definitions of AI platform public APIs types
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* <h2><center>© Copyright (c) 2017 STMicroelectronics.
|
||||
* All rights reserved.</center></h2>
|
||||
*
|
||||
* This software component is licensed by ST under Ultimate Liberty license
|
||||
* SLA0044, the "License"; You may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at:
|
||||
* www.st.com/SLA0044
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef __AI_PLATFORM_H__
|
||||
#define __AI_PLATFORM_H__
|
||||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#define AI_PLATFORM_API_MAJOR 1
|
||||
#define AI_PLATFORM_API_MINOR 1
|
||||
#define AI_PLATFORM_API_MICRO 0
|
||||
|
||||
/******************************************************************************/
|
||||
#ifdef __cplusplus
|
||||
#define AI_API_DECLARE_BEGIN extern "C" {
|
||||
#define AI_API_DECLARE_END }
|
||||
#else
|
||||
#include <stdbool.h>
|
||||
#define AI_API_DECLARE_BEGIN /* AI_API_DECLARE_BEGIN */
|
||||
#define AI_API_DECLARE_END /* AI_API_DECLARE_END */
|
||||
#endif
|
||||
|
||||
/******************************************************************************/
|
||||
#define AI_CONCAT_ARG(a, b) a ## b
|
||||
#define AI_CONCAT(a, b) AI_CONCAT_ARG(a, b)
|
||||
|
||||
/******************************************************************************/
|
||||
#if defined(_MSC_VER)
|
||||
#define AI_API_ENTRY __declspec(dllexport)
|
||||
#define AI_ALIGNED(x) /* AI_ALIGNED(x) */
|
||||
#elif defined(__ICCARM__) || defined (__IAR_SYSTEMS_ICC__)
|
||||
#define AI_API_ENTRY /* AI_API_ENTRY */
|
||||
#define AI_ALIGNED(x) AI_CONCAT(AI_ALIGNED_,x)
|
||||
#define AI_ALIGNED_1 _Pragma("data_alignment = 1")
|
||||
#define AI_ALIGNED_2 _Pragma("data_alignment = 2")
|
||||
#define AI_ALIGNED_4 _Pragma("data_alignment = 4")
|
||||
#define AI_ALIGNED_8 _Pragma("data_alignment = 8")
|
||||
#elif defined(__CC_ARM)
|
||||
#define AI_API_ENTRY __attribute__((visibility("default")))
|
||||
#define AI_ALIGNED(x) __attribute__((aligned (x)))
|
||||
/* Keil disallows anonymous union initialization by default */
|
||||
#pragma anon_unions
|
||||
#elif defined(__GNUC__)
|
||||
#define AI_API_ENTRY __attribute__((visibility("default")))
|
||||
#define AI_ALIGNED(x) __attribute__((aligned(x)))
|
||||
#else
|
||||
/* Dynamic libraries are not supported by the compiler */
|
||||
#define AI_API_ENTRY /* AI_API_ENTRY */
|
||||
#define AI_ALIGNED(x) /* AI_ALIGNED(x) */
|
||||
#endif
|
||||
|
||||
#define AI_HANDLE_PTR(ptr_) ((ai_handle)(ptr_))
|
||||
#define AI_HANDLE_NULL AI_HANDLE_PTR(0)
|
||||
|
||||
#define AI_HANDLE_FUNC_PTR(func) ((ai_handle_func)(func))
|
||||
|
||||
#define AI_UNUSED(x) (void)(x);
|
||||
|
||||
#define AI_DEPRECATED /* AI_DEPRECATED */
|
||||
|
||||
#define AI_LEGACY /* AI_LEGACY */
|
||||
|
||||
#ifndef __GNUC__
|
||||
#define AI_STRUCT_INIT {0}
|
||||
#else
|
||||
#define AI_STRUCT_INIT {}
|
||||
#endif
|
||||
|
||||
#define AI_ERROR_FMT AIU32_FMT
|
||||
|
||||
#define AI_IS_UNSIGNED(type) \
|
||||
((((type)0) - 1) > 0)
|
||||
|
||||
#define AI_CUSTOM_SIZE(type) \
|
||||
(ai_custom_type_signature)((AI_IS_UNSIGNED(type)) \
|
||||
? (0x80|(sizeof(type)&0x7f)) : (sizeof(type)&0x7f))
|
||||
|
||||
#define AI_NETWORK_PARAMS_INIT(params_, activations_) { \
|
||||
.params = params_, \
|
||||
.activations = activations_ }
|
||||
|
||||
/*! ai_intq_info struct handlers **********************************************/
|
||||
#define AI_INTQ_INFO_LIST_FLAGS(list_) \
|
||||
( (list_) ? (list_)->flags : 0 )
|
||||
|
||||
#define AI_INTQ_INFO_LIST_SCALE(list_, type_, pos_) \
|
||||
( ((list_) && (list_)->info && ((pos_)<(list_)->size)) \
|
||||
? ((type_*)((list_)->info->scale))[(pos_)] : 0 )
|
||||
|
||||
#define AI_INTQ_INFO_LIST_ZEROPOINT(list_, type_, pos_) \
|
||||
( ((list_) && (list_)->info && ((pos_)<(list_)->size)) \
|
||||
? ((type_*)((list_)->info->zeropoint))[(pos_)] : 0 )
|
||||
|
||||
/*! ai_buffer format handlers *************************************************/
|
||||
|
||||
/*!
|
||||
* @enum buffer format definition
|
||||
* @ingroup ai_platform
|
||||
*
|
||||
* 32 bit signed format list.
|
||||
*/
|
||||
typedef int32_t ai_buffer_format;
|
||||
|
||||
/*! ai_buffer_meta flags ******************************************************/
|
||||
#define AI_BUFFER_META_HAS_INTQ_INFO (0x1U << 0)
|
||||
#define AI_BUFFER_META_FLAG_SCALE_FLOAT (0x1U << 0)
|
||||
#define AI_BUFFER_META_FLAG_ZEROPOINT_U8 (0x1U << 1)
|
||||
#define AI_BUFFER_META_FLAG_ZEROPOINT_S8 (0x1U << 2)
|
||||
|
||||
/*! ai_buffer format variable flags *******************************************/
|
||||
#define AI_BUFFER_FMT_TYPE_NONE (0x0)
|
||||
#define AI_BUFFER_FMT_TYPE_FLOAT (0x1)
|
||||
#define AI_BUFFER_FMT_TYPE_Q (0x2)
|
||||
|
||||
#define AI_BUFFER_FMT_FLAG_CONST (0x1U<<30)
|
||||
#define AI_BUFFER_FMT_FLAG_STATIC (0x1U<<29)
|
||||
#define AI_BUFFER_FMT_FLAG_IS_IO (0x1U<<27)
|
||||
|
||||
#define AI_BUFFER_FMT_PACK(value_, mask_, bits_) \
|
||||
( ((value_) & (mask_)) << (bits_) )
|
||||
|
||||
#define AI_BUFFER_FMT_UNPACK(fmt_, mask_, bits_) \
|
||||
( (AI_BUFFER_FMT_OBJ(fmt_) >> (bits_)) & (mask_) )
|
||||
|
||||
#define AI_BUFFER_FMT_OBJ(fmt_) \
|
||||
((ai_buffer_format)(fmt_))
|
||||
|
||||
#define AI_BUFFER_FMT_GET_FLOAT(fmt_) \
|
||||
AI_BUFFER_FMT_UNPACK(fmt_, 0x1, 24)
|
||||
|
||||
#define AI_BUFFER_FMT_GET_SIGN(fmt_) \
|
||||
AI_BUFFER_FMT_UNPACK(fmt_, 0x1, 23)
|
||||
|
||||
#define AI_BUFFER_FMT_GET_TYPE(fmt_) \
|
||||
AI_BUFFER_FMT_UNPACK(fmt_, 0xF, 17)
|
||||
|
||||
#define AI_BUFFER_FMT_GET_BITS(fmt_) \
|
||||
AI_BUFFER_FMT_UNPACK(fmt_, 0x7F, 7)
|
||||
|
||||
#define AI_BUFFER_FMT_SET_BITS(bits_) \
|
||||
AI_BUFFER_FMT_PACK((bits_), 0x7F, 7)
|
||||
|
||||
#define AI_BUFFER_FMT_GET_FBITS(fmt_) \
|
||||
( (ai_i8)AI_BUFFER_FMT_UNPACK(fmt_, 0x7F, 0) - 64 )
|
||||
|
||||
#define AI_BUFFER_FMT_SET_FBITS(fbits_) \
|
||||
AI_BUFFER_FMT_PACK((fbits_)+64, 0x7F, 0)
|
||||
|
||||
#define AI_BUFFER_FMT_SET(type_id_, sign_bit_, float_bit_, bits_, fbits_) \
|
||||
AI_BUFFER_FMT_OBJ( \
|
||||
AI_BUFFER_FMT_PACK(float_bit_, 0x1, 24) | \
|
||||
AI_BUFFER_FMT_PACK(sign_bit_, 0x1, 23) | \
|
||||
AI_BUFFER_FMT_PACK(0, 0x3, 21) | \
|
||||
AI_BUFFER_FMT_PACK(type_id_, 0xF, 17) | \
|
||||
AI_BUFFER_FMT_PACK(0, 0x7, 14) | \
|
||||
AI_BUFFER_FMT_SET_BITS(bits_) | \
|
||||
AI_BUFFER_FMT_SET_FBITS(fbits_) \
|
||||
)
|
||||
|
||||
#define AI_BUFFER_FMT_SAME(fmt1_, fmt2_) \
|
||||
( AI_BUFFER_FMT_GET(fmt1_) == AI_BUFFER_FMT_GET(fmt2_) )
|
||||
|
||||
#define AI_BUFFER_FMT_GET(fmt_) \
|
||||
(AI_BUFFER_FMT_OBJ(fmt_) & 0x01FFFFFF)
|
||||
|
||||
#define AI_BUFFER_FORMAT(buf_) \
|
||||
AI_BUFFER_FMT_GET((buf_)->format)
|
||||
#define AI_BUFFER_WIDTH(buf_) \
|
||||
((buf_)->width)
|
||||
#define AI_BUFFER_HEIGHT(buf_) \
|
||||
((buf_)->height)
|
||||
#define AI_BUFFER_CHANNELS(buf_) \
|
||||
((buf_)->channels)
|
||||
#define AI_BUFFER_N_BATCHES(buf_) \
|
||||
((buf_)->n_batches)
|
||||
#define AI_BUFFER_DATA(buf_, type_) \
|
||||
((type_*)((buf_)->data))
|
||||
|
||||
#define AI_BUFFER_META_INFO(buf_) \
|
||||
((buf_)->meta_info)
|
||||
|
||||
#define AI_BUFFER_META_INFO_INTQ(meta_) \
|
||||
((meta_) && ((meta_)->flags & AI_BUFFER_META_HAS_INTQ_INFO)) \
|
||||
? ((meta_)->intq_info) : NULL
|
||||
|
||||
#define AI_BUFFER_META_INFO_INTQ_GET_SCALE(meta_, pos_) \
|
||||
( (AI_BUFFER_META_INFO_INTQ(meta_)) \
|
||||
? AI_INTQ_INFO_LIST_SCALE(AI_BUFFER_META_INFO_INTQ(meta_), ai_float, pos_) \
|
||||
: 0 )
|
||||
|
||||
#define AI_BUFFER_META_INFO_INTQ_GET_ZEROPOINT(meta_, pos_) \
|
||||
( (AI_BUFFER_META_INFO_INTQ(meta_)) \
|
||||
? ((AI_INTQ_INFO_LIST_FLAGS(AI_BUFFER_META_INFO_INTQ(meta_))&AI_BUFFER_META_FLAG_ZEROPOINT_U8) \
|
||||
? AI_INTQ_INFO_LIST_ZEROPOINT(AI_BUFFER_META_INFO_INTQ(meta_), ai_u8, pos_) \
|
||||
: AI_INTQ_INFO_LIST_ZEROPOINT(AI_BUFFER_META_INFO_INTQ(meta_), ai_i8, pos_) ) \
|
||||
: 0 )
|
||||
|
||||
#define AI_BUFFER_META_INFO_INIT(flags_, intq_info_) { \
|
||||
.flags = (flags_), \
|
||||
.intq_info = AI_PACK(intq_info_) \
|
||||
}
|
||||
|
||||
#define AI_BUFFER_SIZE(buf_) \
|
||||
(((buf_)->width) * ((buf_)->height) * ((buf_)->channels))
|
||||
|
||||
#define AI_BUFFER_BYTE_SIZE(count_, fmt_) \
|
||||
( (((count_) * AI_BUFFER_FMT_GET_BITS(fmt_))+4) >> 3 )
|
||||
|
||||
|
||||
#define AI_BUFFER_OBJ_INIT(format_, h_, w_, ch_, n_batches_, data_) \
|
||||
{ .format = (ai_buffer_format)(format_), \
|
||||
.n_batches = (n_batches_), \
|
||||
.height = (h_), \
|
||||
.width = (w_), \
|
||||
.channels = (ch_), \
|
||||
.data = (ai_handle)(data_), \
|
||||
.meta_info = NULL \
|
||||
}
|
||||
|
||||
#define AI_BUFFER_OBJ_INIT_STATIC(type_, format_, h_, w_, ch_, n_batches_, ...) \
|
||||
{ .format = (ai_buffer_format)(format_), \
|
||||
.n_batches = (n_batches_), \
|
||||
.height = (h_), \
|
||||
.width = (w_), \
|
||||
.channels = (ch_), \
|
||||
.data = (ai_handle)((type_[(h_)*(w_)*(ch_)*(n_batches_)]){__VA_ARGS__}), \
|
||||
.meta_info = NULL \
|
||||
}
|
||||
|
||||
/*!
|
||||
* @enum buffer formats enum list
|
||||
* @ingroup ai_platform
|
||||
*
|
||||
* List of supported ai_buffer format types.
|
||||
*/
|
||||
enum {
|
||||
AI_BUFFER_FORMAT_NONE = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_NONE, 0, 0, 0, 0),
|
||||
AI_BUFFER_FORMAT_FLOAT = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_FLOAT, 1, 1, 32, 0),
|
||||
|
||||
AI_BUFFER_FORMAT_U8 = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 0, 0, 8, 0),
|
||||
AI_BUFFER_FORMAT_U16 = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 0, 0, 16, 0),
|
||||
AI_BUFFER_FORMAT_S8 = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 1, 0, 8, 0),
|
||||
AI_BUFFER_FORMAT_S16 = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 1, 0, 16, 0),
|
||||
|
||||
AI_BUFFER_FORMAT_Q = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 1, 0, 0, 0),
|
||||
AI_BUFFER_FORMAT_Q7 = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 1, 0, 8, 7),
|
||||
AI_BUFFER_FORMAT_Q15 = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 1, 0, 16, 15),
|
||||
|
||||
AI_BUFFER_FORMAT_UQ = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 0, 0, 0, 0),
|
||||
AI_BUFFER_FORMAT_UQ7 = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 0, 0, 8, 7),
|
||||
AI_BUFFER_FORMAT_UQ15 = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 0, 0, 16, 15),
|
||||
};
|
||||
|
||||
/******************************************************************************/
|
||||
#define AI_ERROR_INIT(type_, code_) { \
|
||||
.type = AI_ERROR_##type_, \
|
||||
.code = AI_ERROR_CODE_##code_ }
|
||||
|
||||
/* printf formats */
|
||||
#ifdef REISC
|
||||
#define SSIZET_FMT "%lu"
|
||||
#define AII32_FMT "%ld"
|
||||
#define AIU32_FMT "%lu"
|
||||
#else /* REISC */
|
||||
#define SSIZET_FMT "%u"
|
||||
#define AII32_FMT "%d"
|
||||
#define AIU32_FMT "%u"
|
||||
#endif /* REISC */
|
||||
|
||||
typedef uint8_t ai_custom_type_signature;
|
||||
|
||||
typedef void* ai_handle;
|
||||
|
||||
typedef void (*ai_handle_func)(void*);
|
||||
|
||||
typedef float ai_float;
|
||||
typedef double ai_double;
|
||||
|
||||
typedef bool ai_bool;
|
||||
|
||||
typedef uint32_t ai_size;
|
||||
|
||||
typedef uintptr_t ai_uptr;
|
||||
|
||||
typedef unsigned int ai_uint;
|
||||
typedef uint8_t ai_u8;
|
||||
typedef uint16_t ai_u16;
|
||||
typedef uint32_t ai_u32;
|
||||
typedef uint64_t ai_u64;
|
||||
|
||||
typedef int ai_int;
|
||||
typedef int8_t ai_i8;
|
||||
typedef int16_t ai_i16;
|
||||
typedef int32_t ai_i32;
|
||||
typedef int64_t ai_i64;
|
||||
|
||||
typedef uint32_t ai_signature;
|
||||
|
||||
/******************************************************************************/
|
||||
/*!
|
||||
* @struct ai_error
|
||||
* @ingroup ai_platform
|
||||
* @brief Structure encoding details about the last error.
|
||||
*/
|
||||
typedef struct ai_error_ {
|
||||
ai_u32 type : 8; /*!< Error type represented by @ref ai_error_type */
|
||||
ai_u32 code : 24; /*!< Error code represented by @ref ai_error_code */
|
||||
} ai_error;
|
||||
|
||||
/******************************************************************************/
|
||||
/*!
|
||||
* @struct ai_intq_info
|
||||
* @ingroup ai_platform
|
||||
* @brief an element of the ai_intq_info_list entry. It reports an array for the
|
||||
* scale and zeropoint values for each buffer. Optional flags are also present
|
||||
*/
|
||||
typedef struct ai_intq_info_ {
|
||||
ai_float* scale;
|
||||
ai_handle zeropoint;
|
||||
} ai_intq_info;
|
||||
|
||||
/*!
|
||||
* @struct ai_intq_info_list
|
||||
* @ingroup ai_platform
|
||||
* @brief list reporting meta info for quantized networks integer support
|
||||
* when size > 1 it means a per channel out quantization
|
||||
*/
|
||||
typedef struct ai_intq_info_list_ {
|
||||
ai_u16 flags; /*!< optional flags to store intq info attributes */
|
||||
ai_u16 size; /*!< number of elements in the the intq_info list */
|
||||
ai_intq_info* info; /*!< pointer to an array of metainfo associated to the intq_info list */
|
||||
} ai_intq_info_list;
|
||||
|
||||
/******************************************************************************/
|
||||
/*!
|
||||
* @struct ai_buffer_meta_info
|
||||
* @ingroup ai_platform
|
||||
* @brief Optional meta attributes associated with the I/O buffer.
|
||||
* This datastruct is used also for network querying, where the data field may
|
||||
* may be NULL.
|
||||
*/
|
||||
typedef struct ai_buffer_meta_info_ {
|
||||
ai_u32 flags; /*!< meta info flags */
|
||||
ai_intq_info_list* intq_info; /*!< meta info related to integer format */
|
||||
} ai_buffer_meta_info;
|
||||
|
||||
/*!
|
||||
* @struct ai_buffer
|
||||
* @ingroup ai_platform
|
||||
* @brief Memory buffer storing data (optional) with a shape, size and type.
|
||||
* This datastruct is used also for network querying, where the data field may
|
||||
* may be NULL.
|
||||
*/
|
||||
typedef struct ai_buffer_ {
|
||||
ai_buffer_format format; /*!< buffer format */
|
||||
ai_u16 n_batches; /*!< number of batches in the buffer */
|
||||
ai_u16 height; /*!< buffer height dimension */
|
||||
ai_u16 width; /*!< buffer width dimension */
|
||||
ai_u32 channels; /*!< buffer number of channels */
|
||||
ai_handle data; /*!< pointer to buffer data */
|
||||
ai_buffer_meta_info* meta_info; /*!< pointer to buffer metadata info */
|
||||
} ai_buffer;
|
||||
|
||||
/* enums section */
|
||||
|
||||
/*!
|
||||
* @enum ai_error_type
|
||||
* @ingroup ai_platform
|
||||
*
|
||||
* Generic enum to list network error types.
|
||||
*/
|
||||
typedef enum {
|
||||
AI_ERROR_NONE = 0x00, /*!< No error */
|
||||
AI_ERROR_TOOL_PLATFORM_MISMATCH = 0x01,
|
||||
AI_ERROR_TYPES_MISMATCH = 0x02,
|
||||
AI_ERROR_INVALID_HANDLE = 0x10,
|
||||
AI_ERROR_INVALID_STATE = 0x11,
|
||||
AI_ERROR_INVALID_INPUT = 0x12,
|
||||
AI_ERROR_INVALID_OUTPUT = 0x13,
|
||||
AI_ERROR_INVALID_PARAM = 0x14,
|
||||
AI_ERROR_INVALID_SIGNATURE = 0x15,
|
||||
AI_ERROR_INIT_FAILED = 0x30,
|
||||
AI_ERROR_ALLOCATION_FAILED = 0x31,
|
||||
AI_ERROR_DEALLOCATION_FAILED = 0x32,
|
||||
} ai_error_type;
|
||||
|
||||
/*!
|
||||
* @enum ai_error_code
|
||||
* @ingroup ai_platform
|
||||
*
|
||||
* Generic enum to list network error codes.
|
||||
*/
|
||||
typedef enum {
|
||||
AI_ERROR_CODE_NONE = 0x0000, /*!< No error */
|
||||
AI_ERROR_CODE_NETWORK = 0x0010,
|
||||
AI_ERROR_CODE_NETWORK_PARAMS = 0x0011,
|
||||
AI_ERROR_CODE_NETWORK_WEIGHTS = 0x0012,
|
||||
AI_ERROR_CODE_NETWORK_ACTIVATIONS = 0x0013,
|
||||
AI_ERROR_CODE_LAYER = 0x0014,
|
||||
AI_ERROR_CODE_TENSOR = 0x0015,
|
||||
AI_ERROR_CODE_ARRAY = 0x0016,
|
||||
AI_ERROR_CODE_INVALID_PTR = 0x0017,
|
||||
AI_ERROR_CODE_INVALID_SIZE = 0x0018,
|
||||
AI_ERROR_CODE_INVALID_FORMAT = 0x0019,
|
||||
AI_ERROR_CODE_OUT_OF_RANGE = 0x0020,
|
||||
AI_ERROR_CODE_INVALID_BATCH = 0x0021,
|
||||
AI_ERROR_CODE_MISSED_INIT = 0x0030,
|
||||
} ai_error_code;
|
||||
|
||||
/*!
|
||||
* @struct ai_platform_version
|
||||
* @ingroup ai_platform
|
||||
* @brief Datastruct storing platform version info
|
||||
*/
|
||||
typedef struct ai_platform_version_ {
|
||||
ai_u8 major;
|
||||
ai_u8 minor;
|
||||
ai_u8 micro;
|
||||
ai_u8 reserved;
|
||||
} ai_platform_version;
|
||||
|
||||
|
||||
/*!
|
||||
* @struct ai_network_params
|
||||
* @ingroup ai_platform
|
||||
*
|
||||
* Datastructure to pass parameters to the network initialization.
|
||||
*/
|
||||
typedef struct ai_network_params_ {
|
||||
ai_buffer params; /*! info about params buffer(required!) */
|
||||
ai_buffer activations; /*! info about activations buffer (required!) */
|
||||
} ai_network_params;
|
||||
|
||||
/*!
|
||||
* @struct ai_network_report
|
||||
* @ingroup ai_platform
|
||||
*
|
||||
* Datastructure to query a network report with some relevant network detail.
|
||||
*/
|
||||
typedef struct ai_network_report_ {
|
||||
const char* model_name;
|
||||
const char* model_signature;
|
||||
const char* model_datetime;
|
||||
|
||||
const char* compile_datetime;
|
||||
|
||||
const char* runtime_revision;
|
||||
ai_platform_version runtime_version;
|
||||
|
||||
const char* tool_revision;
|
||||
ai_platform_version tool_version;
|
||||
ai_platform_version tool_api_version;
|
||||
|
||||
ai_platform_version api_version;
|
||||
ai_platform_version interface_api_version;
|
||||
|
||||
ai_u32 n_macc;
|
||||
|
||||
ai_u16 n_inputs;
|
||||
ai_u16 n_outputs;
|
||||
ai_buffer* inputs;
|
||||
ai_buffer* outputs;
|
||||
|
||||
ai_buffer activations;
|
||||
ai_buffer params;
|
||||
|
||||
ai_u32 n_nodes;
|
||||
|
||||
ai_signature signature;
|
||||
} ai_network_report;
|
||||
|
||||
/*!
|
||||
* @enum ai_upsample_mode
|
||||
* @ingroup ai_platform
|
||||
* @brief allowed mode in upsample layer
|
||||
*/
|
||||
typedef enum {
|
||||
AI_UPSAMPLE_ZEROS = 0x0,
|
||||
AI_UPSAMPLE_NEAREST,
|
||||
AI_UPSAMPLE_BILINEAR,
|
||||
AI_UPSAMPLE_TRILINEAR
|
||||
} ai_upsample_mode;
|
||||
|
||||
typedef enum {
|
||||
AI_PAD_CONSTANT = 0x0,
|
||||
AI_PAD_REFLECT,
|
||||
AI_PAD_EDGE,
|
||||
} ai_pad_mode;
|
||||
|
||||
#endif /*__AI_PLATFORM_H__*/
|
||||
@ -1,786 +0,0 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file ai_platform_interface.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @date 02-Aug-2018
|
||||
* @brief Definitions of AI platform interface APIs types
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* <h2><center>© Copyright (c) 2018 STMicroelectronics.
|
||||
* All rights reserved.</center></h2>
|
||||
*
|
||||
* This software component is licensed by ST under Ultimate Liberty license
|
||||
* SLA0044, the "License"; You may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at:
|
||||
* www.st.com/SLA0044
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef __AI_PLATFORM_INTERFACE_H__
|
||||
#define __AI_PLATFORM_INTERFACE_H__
|
||||
#pragma once
|
||||
|
||||
#include "ai_platform.h"
|
||||
|
||||
#include "datatypes_network.h"
|
||||
#include "ai_datatypes_format.h"
|
||||
|
||||
/*!
|
||||
* @defgroup datatypes_interface Interface Datatypes
|
||||
* @brief Data structures and defines used to implement neural networks
|
||||
*/
|
||||
|
||||
#define AI_PLATFORM_INTERFACE_API_MAJOR 1
|
||||
#define AI_PLATFORM_INTERFACE_API_MINOR 3
|
||||
#define AI_PLATFORM_INTERFACE_API_MICRO 0
|
||||
|
||||
/******************************************************************************/
|
||||
#define AI_ERROR_TRAP(net_, type_, code_) \
|
||||
ai_platform_network_set_error((net_), AI_CONCAT(AI_ERROR_,type_), \
|
||||
AI_CONCAT(AI_ERROR_CODE_,code_))
|
||||
|
||||
/*! AI_PTR HANDLERS SECTION ************************************/
|
||||
#define AI_PTR(ptr_) ((ai_ptr)(ptr_))
|
||||
#define AI_PTR_CONST(ptr_) ((ai_ptr_const)(ptr_))
|
||||
|
||||
/*! STATIC ARRAYS ALLOCATOR SECTION ************************************/
|
||||
#define AI_PACK_STORAGE_ARRAY(type_, dim_, ...) \
|
||||
(type_[dim_]) { AI_PACK(__VA_ARGS__) }
|
||||
|
||||
/*! AI_STORAGE_KLASS SECTION ************************************/
|
||||
#define AI_STORAGE_KLASS_PACK(type_, dim_, ...) \
|
||||
AI_PACK_STORAGE_ARRAY(type_, dim_, __VA_ARGS__)
|
||||
|
||||
#define AI_STORAGE_KLASS_INIT(type_, size_, data_) \
|
||||
{ \
|
||||
.type = (type_), \
|
||||
.size = (size_), \
|
||||
.data = (ai_handle)(data_), \
|
||||
}
|
||||
|
||||
/*!
|
||||
* @enum ai_storage_klass_type
|
||||
* @ingroup ai_platform_interface
|
||||
* @brief @ref ai_storage_class types enum
|
||||
*/
|
||||
typedef enum {
|
||||
AI_STORAGE_KLASS_SHAPE = 0x0,
|
||||
AI_STORAGE_KLASS_STRIDE,
|
||||
AI_STORAGE_KLASS_FLOAT,
|
||||
AI_STORAGE_KLASS_U8,
|
||||
AI_STORAGE_KLASS_I8,
|
||||
AI_STORAGE_KLASS_U16,
|
||||
AI_STORAGE_KLASS_I16,
|
||||
AI_STORAGE_KLASS_U32,
|
||||
AI_STORAGE_KLASS_I32,
|
||||
AI_STORAGE_KLASS_U64,
|
||||
AI_STORAGE_KLASS_I64,
|
||||
} ai_storage_klass_type;
|
||||
|
||||
/*!
|
||||
* @struct ai_storage_klass
|
||||
* @ingroup ai_platform_interface
|
||||
* @brief Generic "Template" klass for generic storage arrays containers
|
||||
* from this klass several typed containers are derived (see e.g. @ref ai_shape)
|
||||
*/
|
||||
AI_PACKED_STRUCT_START
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_storage_klass_s {
|
||||
ai_u32 type : 8;
|
||||
ai_u32 size : 24;
|
||||
ai_handle data;
|
||||
} ai_storage_klass;
|
||||
AI_PACKED_STRUCT_END
|
||||
|
||||
/*! AI_SHAPES SECTION ************************************/
|
||||
#define AI_SHAPE_2D_INIT(w_, h_) \
|
||||
{ .data = { (w_), (h_) } }
|
||||
|
||||
#define AI_SHAPE_INIT(dim_, ...) \
|
||||
AI_STORAGE_KLASS_INIT( \
|
||||
AI_STORAGE_KLASS_SHAPE, \
|
||||
dim_, \
|
||||
AI_STORAGE_KLASS_PACK(ai_shape_dimension, dim_, ## __VA_ARGS__))
|
||||
|
||||
#define AI_SHAPE_INIT_FROM_BUFFER(dim_, buffer_) \
|
||||
AI_STORAGE_KLASS_INIT( \
|
||||
AI_STORAGE_KLASS_SHAPE, \
|
||||
dim_, \
|
||||
buffer_)
|
||||
|
||||
/*!
|
||||
* @enum ai_shape_type
|
||||
* @ingroup ai_platform_interface
|
||||
* @brief Codes for the 4D tensor dimensions
|
||||
*/
|
||||
typedef enum {
|
||||
AI_SHAPE_MAX_DIMENSION = 0x4,
|
||||
AI_SHAPE_HEIGHT = 0x3,
|
||||
AI_SHAPE_WIDTH = 0x2,
|
||||
AI_SHAPE_CHANNEL = 0x1,
|
||||
AI_SHAPE_IN_CHANNEL = 0x0,
|
||||
// AI_SHAPE_BATCH_CHANNEL = 0x4,
|
||||
} ai_shape_type;
|
||||
|
||||
/*!
|
||||
* @struct ai_shape
|
||||
* @ingroup ai_platform_interface
|
||||
* @brief Dimensions for generic 4D tensors
|
||||
*/
|
||||
#if 1
|
||||
|
||||
#if 0
|
||||
AI_PACKED_STRUCT_START
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_shape_s {
|
||||
ai_u32 type : 8;
|
||||
ai_u32 size : 24;
|
||||
ai_shape_dimension data[AI_SHAPE_MAX_DIMENSION]; /*!< 4D tensor shape */
|
||||
} ai_shape;
|
||||
AI_PACKED_STRUCT_END
|
||||
#else
|
||||
typedef ai_storage_klass ai_shape;
|
||||
#endif
|
||||
|
||||
#else
|
||||
AI_PACKED_STRUCT_START
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_shape_s {
|
||||
ai_shape_dimension* dimension; /*!< ND tensor shape */
|
||||
} ai_shape;
|
||||
AI_PACKED_STRUCT_END
|
||||
#endif
|
||||
|
||||
/*! AI_STRIDES HANDLERS SECTION ************************************/
|
||||
#define AI_STRIDE_INIT(dim_, ...) \
|
||||
AI_STORAGE_KLASS_INIT( \
|
||||
AI_STORAGE_KLASS_STRIDE, \
|
||||
dim_, \
|
||||
AI_STORAGE_KLASS_PACK(ai_stride_dimension, dim_, ## __VA_ARGS__))
|
||||
|
||||
|
||||
#define AI_STRIDE_INIT_FROM_BUFFER(dim_, buffer_) \
|
||||
AI_STORAGE_KLASS_INIT( \
|
||||
AI_STORAGE_KLASS_STRIDE, \
|
||||
dim_, \
|
||||
buffer_)
|
||||
|
||||
/*!
|
||||
* @struct ai_stride
|
||||
* @ingroup ai_platform_interface
|
||||
* @brief Stride dimensions for generic 4D tensors (in number of elements)
|
||||
*/
|
||||
typedef ai_storage_klass ai_stride;
|
||||
|
||||
/*! BASIC_TYPES HANDLERS SECTION ************************************/
|
||||
#define AI_SIZE(value_) \
|
||||
((ai_size)(value_))
|
||||
|
||||
/*! AI_KLASS_OBJ HANDLERS SECTION ************************************/
|
||||
#define AI_KLASS_OBJ(obj_) \
|
||||
((ai_klass_obj)(obj_))
|
||||
|
||||
/*! GENERIC HANDLERS SECTION ************************************/
|
||||
#define AI_OBJ_DATA(obj_, type_) \
|
||||
((type_)(obj_)->data)
|
||||
|
||||
/*! AI_BUFFER HANDLERS SECTION ************************************/
|
||||
#define AI_BUFFER_OBJ(ptr) \
|
||||
((ai_buffer*)(ptr))
|
||||
|
||||
/*! AI_ARRAY HANDLERS SECTION ************************************/
|
||||
#define AI_ARRAY_OBJ(ptr) \
|
||||
((ai_array*)(ptr))
|
||||
|
||||
#define AI_ARRAY_OBJ_FMT(array_) \
|
||||
((ai_array_format)(AI_ARRAY_OBJ(array_)->format))
|
||||
|
||||
#define AI_ARRAY_OBJ_SIZE(array_) \
|
||||
(AI_ARRAY_OBJ(array_)->size)
|
||||
|
||||
#define AI_ARRAY_OBJ_BYTE_SIZE(array_) \
|
||||
AI_SIZE(AI_ARRAY_GET_BYTE_SIZE(AI_ARRAY_OBJ_FMT(array_), \
|
||||
AI_ARRAY_OBJ_SIZE(array_)))
|
||||
|
||||
#define AI_ARRAY_OBJ_DATA_SIZE(array_) \
|
||||
AI_ARRAY_GET_DATA_BYTE_SIZE(AI_ARRAY_OBJ_FMT(array_), \
|
||||
AI_ARRAY_OBJ_SIZE(array_))
|
||||
|
||||
#define AI_ARRAY_OBJ_DATA(array_, type_) \
|
||||
((type_*)(AI_ARRAY_OBJ(array_)->data))
|
||||
|
||||
#define AI_ARRAY_OBJ_DATA_START(array_, type_) \
|
||||
((type_*)(AI_ARRAY_OBJ(array_)->data_start))
|
||||
|
||||
#define AI_ARRAY_OBJ_ELEM(array_, type_, pos_) \
|
||||
AI_ARRAY_OBJ_DATA(array_, type_)[(pos_)]
|
||||
|
||||
#define AI_ARRAY_OBJ_INIT_STATIC(type_, format_, size_, ...) { \
|
||||
.format = AI_FMT_OBJ(format_), \
|
||||
.size = (ai_array_size)(size_), \
|
||||
.data = (ai_ptr)((type_[]){ __VA_ARGS__ }), \
|
||||
.data_start = AI_PTR(0), \
|
||||
}
|
||||
|
||||
#define AI_ARRAY_OBJ_INIT(format_, data_, data_start_, size_) { \
|
||||
.format = AI_FMT_OBJ(format_), \
|
||||
.size = (ai_array_size)(size_), \
|
||||
.data = AI_PTR(data_), \
|
||||
.data_start = AI_PTR(data_start_) }
|
||||
|
||||
#define AI_ARRAY_OBJ_DECLARE_STATIC(name_, type_, format_, attr_, size_, ...) \
|
||||
AI_ALIGNED(4) \
|
||||
attr_ ai_array name_ = AI_ARRAY_OBJ_INIT_STATIC(type_, format_, size_, __VA_ARGS__);
|
||||
|
||||
|
||||
#define AI_ARRAY_OBJ_DECLARE(name_, format_, data_, data_start_, size_, attr_) \
|
||||
AI_ALIGNED(4) \
|
||||
attr_ ai_array name_ = AI_ARRAY_OBJ_INIT(format_, data_, data_start_, size_);
|
||||
|
||||
|
||||
/********************************* ai_array macros ***************************/
|
||||
#define AI_PACK_ARRAYS(...) \
|
||||
(ai_array[]) { AI_PACK(__VA_ARGS__) }
|
||||
|
||||
#define AI_ARRAY_LIST_OBJ_INIT(arrays_ptr_) \
|
||||
((ai_array*)(arrays_ptr_))
|
||||
|
||||
#define AI_ARRAY_LIST_FLAGS(list_) \
|
||||
( (list_) ? (list_)->flags : 0x0 )
|
||||
|
||||
#define AI_ARRAY_LIST_SIZE(list_) \
|
||||
( (list_) ? (list_)->size : 0 )
|
||||
|
||||
#define AI_ARRAY_LIST_DATA(list_, pos_) \
|
||||
( (list_) ? &((list_)->data[pos_]) : NULL )
|
||||
|
||||
|
||||
/********************************* ai_tensor macros **************************/
|
||||
#define AI_TENSOR_OBJ(obj_) \
|
||||
((ai_tensor*)(obj_))
|
||||
|
||||
#define AI_TENSOR_INFO_OBJ_INIT(id_, flags_, data_size_) { \
|
||||
.id = (id_), \
|
||||
.flags = (flags_), \
|
||||
.data_size = (data_size_) \
|
||||
}
|
||||
|
||||
#define AI_TENSOR_OBJ_INIT(id_, flags_, shape_, stride_, arrays_size_, arrays_ptr_, klass_obj_) { \
|
||||
.klass = (ai_klass_obj)(klass_obj_), \
|
||||
.info = AI_TENSOR_INFO_OBJ_INIT(id_, flags_, arrays_size_), \
|
||||
.shape = shape_, \
|
||||
.stride = stride_, \
|
||||
.data = AI_ARRAY_LIST_OBJ_INIT(AI_PACK(arrays_ptr_)), \
|
||||
}
|
||||
|
||||
#define AI_TENSOR_OBJ_DECLARE(name_, attr_, id_, flags_, shape_, stride_, \
|
||||
arrays_size_, arrays_ptr_, klass_obj_) \
|
||||
AI_ALIGNED(4) \
|
||||
attr_ ai_tensor name_ = AI_TENSOR_OBJ_INIT(id_, flags_, AI_PACK(shape_), AI_PACK(stride_), \
|
||||
arrays_size_, AI_PACK(arrays_ptr_), AI_PACK(klass_obj_));
|
||||
|
||||
/********************************* TENSOR STATE MACROS ***********************/
|
||||
#define AI_TENSOR_STATE_OBJ_INIT(end_ptr_ , curr_ptr_, stride_, size_) \
|
||||
{ (end_ptr_), (curr_ptr_), (stride_), (size_) }
|
||||
|
||||
/********************************* TENSOR LIST MACROS ************************/
|
||||
#define AI_TENSOR_LIST_EMPTY \
|
||||
{ .size = 0, .flags = AI_FLAG_NONE, \
|
||||
.tensor = (ai_tensor*[]) { NULL }, .info = NULL \
|
||||
}
|
||||
|
||||
#define AI_TENSOR_LIST_ENTRY(...) \
|
||||
{ .size = AI_NUMARGS(__VA_ARGS__), .flags = AI_FLAG_NONE, \
|
||||
.tensor = (ai_tensor*[]) { __VA_ARGS__ }, .info = NULL \
|
||||
}
|
||||
|
||||
#define AI_TENSOR_LIST_OBJ_DECLARE(name_, attr_, ...) \
|
||||
AI_ALIGNED(4) \
|
||||
attr_ ai_tensor_list name_ = AI_TENSOR_LIST_ENTRY(__VA_ARGS__);
|
||||
|
||||
/********************************* TENSOR LIST I/O MACROS ********************/
|
||||
#define AI_TENSOR_LIST_IO_ENTRY(flags_, size_, ...) \
|
||||
{ .size = (size_), .flags = (flags_), \
|
||||
.tensor = (ai_tensor*[]) { __VA_ARGS__ }, \
|
||||
.info = (ai_tensor_list_info[1]) { { \
|
||||
.buffer = (ai_buffer[size_])AI_STRUCT_INIT, \
|
||||
.state = (ai_tensor_state[size_])AI_STRUCT_INIT, \
|
||||
.meta = (ai_buffer_meta_info[size_])AI_STRUCT_INIT \
|
||||
} } \
|
||||
}
|
||||
|
||||
/********************************* TENSOR CHAIN MACROS ***********************/
|
||||
#define AI_TENSOR_CHAIN_OBJ_INIT(flags_, size_, ...) \
|
||||
{ .size = (size_), .flags = (flags_), \
|
||||
.chain = (ai_tensor_list[]){ __VA_ARGS__ } }
|
||||
|
||||
#define AI_TENSOR_CHAIN_OBJ_DECLARE(name_, attr_, size_, ...) \
|
||||
AI_ALIGNED(4) \
|
||||
attr_ ai_tensor_chain name_ = \
|
||||
AI_TENSOR_CHAIN_OBJ_INIT(AI_FLAG_NONE, size_, __VA_ARGS__);
|
||||
|
||||
|
||||
/********************************* TENSOR CHAIN I/O MACROS *******************/
|
||||
#define AI_TENSOR_CHAIN_IO_OBJ_INIT(flags_, in_tensor_list_, out_tensor_list_) \
|
||||
{ .chain = (ai_tensor_list[]){ in_tensor_list_, out_tensor_list_ }, \
|
||||
.size = 2, .flags = (flags_) }
|
||||
|
||||
#define AI_TENSOR_CHAIN_IO_OBJ_DECLARE( \
|
||||
name_, attr_, flags_, in_tensor_list_, out_tensor_list_) \
|
||||
AI_ALIGNED(4) \
|
||||
attr_ ai_tensor_chain_io name_ = \
|
||||
AI_TENSOR_CHAIN_IO_OBJ_INIT(flags_, in_tensor_list_, out_tensor_list_);
|
||||
|
||||
/******************************* NETWORK SECTION ****************************/
|
||||
#define AI_NETWORK_OBJ(obj_) \
|
||||
((ai_network*)(obj_))
|
||||
|
||||
|
||||
#define AI_NETWORK_OBJ_INIT( \
|
||||
weights_buffer_, activations_buffer_, \
|
||||
in_tensor_list_ptr_, out_tensor_list_ptr_, \
|
||||
in_node_ptr_, signature_, klass_obj_) { \
|
||||
.magic = 0x0, \
|
||||
.signature = signature_, \
|
||||
.klass = AI_KLASS_OBJ(klass_obj_), \
|
||||
.flags = AI_FLAG_NONE, \
|
||||
.error = AI_ERROR_INIT(NONE, NONE), \
|
||||
.n_batches = 0, \
|
||||
.batch_id = 0, \
|
||||
.params = weights_buffer_, \
|
||||
.activations = activations_buffer_, \
|
||||
.tensors = AI_TENSOR_CHAIN_IO_OBJ_INIT(AI_FLAG_NONE, \
|
||||
AI_PACK(in_tensor_list_ptr_), \
|
||||
AI_PACK(out_tensor_list_ptr_)), \
|
||||
.input_node = AI_NODE_OBJ(in_node_ptr_), \
|
||||
.current_node = AI_NODE_OBJ(NULL), \
|
||||
}
|
||||
|
||||
#define AI_NETWORK_OBJ_DECLARE( \
|
||||
name_, attr_, \
|
||||
weights_buffer_, activations_buffer_, \
|
||||
in_tensor_list_ptr_, out_tensor_list_ptr_, \
|
||||
in_node_ptr_, signature_, klass_obj_) \
|
||||
AI_ALIGNED(4) \
|
||||
attr_ ai_network name_ = AI_NETWORK_OBJ_INIT( \
|
||||
AI_PACK(weights_buffer_), \
|
||||
AI_PACK(activations_buffer_), \
|
||||
AI_PACK(in_tensor_list_ptr_), \
|
||||
AI_PACK(out_tensor_list_ptr_), \
|
||||
(in_node_ptr_), (signature_), (klass_obj_));
|
||||
|
||||
#define AI_NETWORK_ACQUIRE_CTX(handle_) \
|
||||
AI_NETWORK_OBJ(ai_platform_context_acquire(handle_))
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @typedef ai_klass_obj
|
||||
* @ingroup ai_platform_interface
|
||||
* @brief handler to (private) generic subclass derivatives implementation
|
||||
*/
|
||||
typedef void* ai_klass_obj;
|
||||
|
||||
/*!
|
||||
* @typedef ai_ptr
|
||||
* @ingroup ai_platform_interface
|
||||
* @brief Byte pointer data addressing
|
||||
*/
|
||||
typedef uint8_t* ai_ptr;
|
||||
|
||||
/*!
|
||||
* @typedef ai_ptr_const
|
||||
* @ingroup ai_platform_interface
|
||||
* @brief Constant byte pointer data addressing
|
||||
*/
|
||||
typedef const uint8_t* ai_ptr_const;
|
||||
|
||||
/*!
|
||||
* @typedef ai_ptr_offset
|
||||
* @ingroup ai_platform_interface
|
||||
* @brief byte offset for computing strides
|
||||
*/
|
||||
typedef int32_t ai_ptr_offset;
|
||||
|
||||
/*!
|
||||
* @typedef ai_flags
|
||||
* @ingroup ai_platform_interface
|
||||
* @brief bitmask for flags management
|
||||
*/
|
||||
typedef uint32_t ai_flags;
|
||||
|
||||
/*!
|
||||
* @typedef ai_magic
|
||||
* @ingroup ai_platform_interface
|
||||
* @brief magic field to mark internal datatstructures
|
||||
*/
|
||||
typedef uint32_t ai_magic;
|
||||
|
||||
|
||||
#define AI_CONTEXT_FIELDS \
|
||||
ai_magic magic; /*!< magic word to mark valid contexts datastructs*/ \
|
||||
ai_signature signature; /*!< 32bit signature for network consistency checks */
|
||||
|
||||
#define AI_CONTEXT_OBJ(obj) ((ai_context*)(obj))
|
||||
|
||||
/*!
|
||||
* @typedef ai_context
|
||||
* @ingroup ai_platform_interface
|
||||
* @brief Abstract internal context header exposed to codegen interface
|
||||
*/
|
||||
AI_PACKED_STRUCT_START
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_context_ {
|
||||
AI_CONTEXT_FIELDS
|
||||
} ai_context;
|
||||
AI_PACKED_STRUCT_END
|
||||
|
||||
/*!
|
||||
* @enum ai_shape_2d_type
|
||||
* @ingroup ai_platform_interface
|
||||
* @brief Codes for the 2D tensor dimensions
|
||||
*/
|
||||
typedef enum {
|
||||
AI_SHAPE_2D_MAX_DIMENSION = 0x2,
|
||||
AI_SHAPE_2D_HEIGHT = 0x1,
|
||||
AI_SHAPE_2D_WIDTH = 0x0,
|
||||
} ai_shape_2d_type;
|
||||
|
||||
|
||||
/*!
|
||||
* @struct ai_shape_2d
|
||||
* @ingroup ai_platform_interface
|
||||
* @brief Dimensions for generic 2D tensors
|
||||
*/
|
||||
AI_PACKED_STRUCT_START
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_shape_2d_s {
|
||||
ai_shape_dimension data[AI_SHAPE_2D_MAX_DIMENSION]; /*!< 2D tensor dimensions */
|
||||
} ai_shape_2d;
|
||||
AI_PACKED_STRUCT_END
|
||||
|
||||
|
||||
/*!
|
||||
* @struct ai_array
|
||||
* @ingroup ai_platform_interface
|
||||
* @brief Generic flattened array with size
|
||||
* and (byte) stride of each item
|
||||
*/
|
||||
AI_PACKED_STRUCT_START
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_array_s {
|
||||
// ai_u16 flags; /*!< optional flags to store array list attributes */
|
||||
// ai_u16 id; /*!< ID of the array object */
|
||||
ai_array_format format; /*!< array format (see @ref ai_array_format) */
|
||||
ai_array_size size; /*!< number of elements in the array (NOT number
|
||||
of bytes!). The size of the array could be
|
||||
determine using @ref AI_ARRAY_GET_BYTE_SIZE
|
||||
macro */
|
||||
ai_ptr data; /*!< pointer to data */
|
||||
ai_ptr data_start; /*!< pointer to parent's data start address */
|
||||
} ai_array;
|
||||
AI_PACKED_STRUCT_END
|
||||
|
||||
/*!
|
||||
* @struct ai_tensor_info
|
||||
* @ingroup ai_platform_interface
|
||||
* @brief ai_tensor_info info structure for storing size of the array list,
|
||||
* tensor dimensionality, etc.
|
||||
*
|
||||
*/
|
||||
AI_PACKED_STRUCT_START
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_tensor_info_s {
|
||||
ai_u16 id;
|
||||
ai_u8 flags;
|
||||
ai_u8 data_size;
|
||||
} ai_tensor_info;
|
||||
AI_PACKED_STRUCT_END
|
||||
|
||||
/*!
|
||||
* @struct ai_tensor
|
||||
* @ingroup ai_platform_interface
|
||||
* @brief Generic tensor structure for storing parameters and activations
|
||||
*
|
||||
* The data is stored in a flattened array with an implicit order given by the
|
||||
* reverse order in @ref ai_shape_dimension:
|
||||
* in_channels, channels, width, height.
|
||||
*/
|
||||
AI_PACKED_STRUCT_START
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_tensor_s {
|
||||
ai_klass_obj klass; /*!< opaque pointer to klass context */
|
||||
ai_tensor_info info; /*!< tensor info metadata see @ref ai_tensor_info)*/
|
||||
ai_shape shape; /*!< tensor shape see @ref ai_shape */
|
||||
ai_stride stride; /*!< tensor stride see @ref ai_stride */
|
||||
ai_array* data; /*!< flattened array pointer to tensor data */
|
||||
} ai_tensor;
|
||||
AI_PACKED_STRUCT_END
|
||||
|
||||
/*!
|
||||
* @struct ai_tensor_state
|
||||
* @ingroup ai_platform_interface
|
||||
* @brief state context for tensor management (used for I/O network tensors)
|
||||
*/
|
||||
AI_PACKED_STRUCT_START
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_tensor_state_s {
|
||||
ai_ptr end_ptr; /*!< end address of the I/O tensor data buffer */
|
||||
ai_ptr curr_ptr; /*!< current address of the I/O tensor data buffer (for batching) */
|
||||
ai_ptr_offset stride; /*!< single batch buffer size (in bytes) */
|
||||
ai_size size; /*!< total size in bytes of the I/O tensor buffer */
|
||||
} ai_tensor_state;
|
||||
AI_PACKED_STRUCT_END
|
||||
|
||||
/*!
|
||||
* @struct ai_tensor_list_info
|
||||
* @ingroup ai_platform_interface
|
||||
* @brief info metadata for tensor list management (used for I/O network tensors)
|
||||
*/
|
||||
AI_PACKED_STRUCT_START
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_tensor_list_info_s {
|
||||
ai_tensor_state* state; /*!< I/O buffer internal pointers state */
|
||||
ai_buffer* buffer; /*!< I/O buffer pointer */
|
||||
ai_buffer_meta_info* meta; /*!< I/O buffer meta informations */
|
||||
} ai_tensor_list_info;
|
||||
AI_PACKED_STRUCT_END
|
||||
|
||||
/********************************* INTEGER QUANTIZATION DATATYPES ************/
|
||||
|
||||
#define AI_INTQ_INFO_OBJ_INIT(flags_, scale_ , zeropoint_) { \
|
||||
.scale = (scale_), \
|
||||
.zeropoint = (ai_handle)(zeropoint_), \
|
||||
.flags = (flags_), \
|
||||
}
|
||||
|
||||
|
||||
#define AI_PACK_INTQ_INFO_LIST(...) \
|
||||
(ai_intq_info_list[]) { AI_PACK(__VA_ARGS__) }
|
||||
|
||||
#define AI_PACK_INTQ_INFO(scale_, zp_) \
|
||||
(ai_intq_info[1]) { { .scale = AI_PACK(scale_), \
|
||||
.zeropoint = AI_PACK(zp_) } }
|
||||
|
||||
#define AI_PACK_INTQ_SCALE(...) \
|
||||
(ai_float[]) { AI_PACK(__VA_ARGS__) }
|
||||
|
||||
#define AI_PACK_INTQ_ZP(...) \
|
||||
(ai_i8[]) { AI_PACK(__VA_ARGS__) }
|
||||
|
||||
#define AI_PACK_UINTQ_ZP(...) \
|
||||
(ai_u8[]) { AI_PACK(__VA_ARGS__) }
|
||||
|
||||
|
||||
#define AI_INTQ_INFO_LIST_OBJ_EMPTY { 0 }
|
||||
|
||||
#define AI_INTQ_INFO_LIST_OBJ_INIT(flags_, size_, info_) \
|
||||
{ \
|
||||
.flags = (flags_), \
|
||||
.size = (size_), \
|
||||
.info = (info_), \
|
||||
}
|
||||
|
||||
#define AI_INTQ_INFO_LIST_OBJ_DECLARE(name_, attr_, ...) \
|
||||
AI_ALIGNED(4) \
|
||||
attr_ ai_intq_info_list name_ = \
|
||||
AI_INTQ_INFO_LIST_OBJ_INIT(AI_FLAG_NONE, __VA_ARGS__);
|
||||
|
||||
|
||||
/********************************* TENSOR CHAINS DATATYPES *******************/
|
||||
/*!
|
||||
* @enum ai_tensor_chain_type
|
||||
* @ingroup ai_platform_interface
|
||||
* @brief Enum for the different tensor chains supported in the library
|
||||
*/
|
||||
typedef enum {
|
||||
AI_TENSOR_CHAIN_INPUT = 0x0,
|
||||
AI_TENSOR_CHAIN_OUTPUT = 0x1,
|
||||
AI_TENSOR_CHAIN_WEIGHTS = 0x2,
|
||||
AI_TENSOR_CHAIN_SCRATCH = 0x3,
|
||||
AI_TENSOR_CHAIN_SIZE
|
||||
} ai_tensor_chain_type;
|
||||
|
||||
/*!
|
||||
* @struct ai_tensor_list
|
||||
* @ingroup ai_platform_interface
|
||||
* @brief list (in form of arrays) of internal nodes tensor pointers
|
||||
*/
|
||||
AI_PACKED_STRUCT_START
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_tensor_list_s {
|
||||
ai_u16 size; /*!< number of elements in the the tensor list */
|
||||
ai_u16 flags; /*!< optional flags to store tensor list attributes */
|
||||
ai_tensor** tensor; /*!< array of linked tensor pointer */
|
||||
ai_tensor_list_info* info; /*!< pointer to an array of metainfo associated to the tensors */
|
||||
} ai_tensor_list;
|
||||
AI_PACKED_STRUCT_END
|
||||
|
||||
|
||||
/*!
|
||||
* @struct ai_tensor_chain
|
||||
* @ingroup ai_platform_interface
|
||||
* @brief tensor chain datastruct for internal network nodes
|
||||
*/
|
||||
AI_PACKED_STRUCT_START
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_tensor_chain_s {
|
||||
ai_u16 size;
|
||||
ai_u16 flags;
|
||||
ai_tensor_list* chain; /*!< pointer to a 4 sized array see @ref ai_tensor_chain_type */
|
||||
} ai_tensor_chain;
|
||||
AI_PACKED_STRUCT_END
|
||||
|
||||
/* forward function */
|
||||
struct ai_node_s;
|
||||
|
||||
/*!
|
||||
* @struct ai_network
|
||||
* @ingroup layers
|
||||
* @brief Structure encoding a sequential neural network
|
||||
*/
|
||||
AI_PACKED_STRUCT_START
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_network_s {
|
||||
AI_CONTEXT_FIELDS
|
||||
ai_klass_obj klass; /*!< opaque handler to specific network implementations */
|
||||
ai_flags flags; /*!< bitflags mask to track some network state info */
|
||||
ai_error error; /*!< track 1st error code in the network */
|
||||
|
||||
ai_u16 n_batches; /*!< number of batches to process */
|
||||
ai_u16 batch_id; /*!< current batch to to process btw [0, n_batches)*/
|
||||
ai_buffer params; /*!< params buffer data */
|
||||
ai_buffer activations; /*!< activations buffer data */
|
||||
|
||||
ai_tensor_chain tensors; /*!< I/O tensor chain list see @ref ai_tensor_list */
|
||||
|
||||
struct ai_node_s* input_node; /*!< first node to execute */
|
||||
struct ai_node_s* current_node; /*!< current node to execute */
|
||||
} ai_network;
|
||||
AI_PACKED_STRUCT_END
|
||||
|
||||
/*!
|
||||
* @brief Get platform runtime lib revision version as string.
|
||||
* @ingroup ai_platform_interface
|
||||
* @return a string containing the revision of the runtime library
|
||||
*/
|
||||
AI_INTERFACE_TYPE
|
||||
const char* ai_platform_runtime_get_revision(void);
|
||||
|
||||
/*!
|
||||
* @brief Get platform runtime lib version as datastruct.
|
||||
* @ingroup ai_platform_interface
|
||||
* @return a datastruct containing the version of the runtime library
|
||||
*/
|
||||
AI_INTERFACE_TYPE
|
||||
ai_platform_version ai_platform_runtime_get_version(void);
|
||||
|
||||
/*!
|
||||
* @brief Get platform public APIs version as datastruct.
|
||||
* @ingroup ai_platform_interface
|
||||
* @return a datastruct containing the version of the public APIs
|
||||
*/
|
||||
AI_INTERFACE_TYPE
|
||||
ai_platform_version ai_platform_api_get_version(void);
|
||||
|
||||
/*!
|
||||
* @brief Get platform interface private APIs version as datastruct.
|
||||
* @ingroup ai_platform_interface
|
||||
* @return a datastruct containing the version of the interface private APIs
|
||||
*/
|
||||
AI_INTERFACE_TYPE
|
||||
ai_platform_version ai_platform_interface_api_get_version(void);
|
||||
|
||||
/*!
|
||||
* @brief Get platform context.
|
||||
* @ingroup ai_platform_interface
|
||||
* @return a valid context handle or NULL otherwise
|
||||
*/
|
||||
AI_INTERFACE_TYPE
|
||||
ai_context* ai_platform_context_acquire(const ai_handle handle);
|
||||
|
||||
/*!
|
||||
* @brief Release platform context.
|
||||
* @ingroup ai_platform_interface
|
||||
* @return an opaque handle to the released object
|
||||
*/
|
||||
AI_INTERFACE_TYPE
|
||||
ai_handle ai_platform_context_release(ai_context* ctx);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief get **first** error tracked when using the network
|
||||
* @ingroup ai_platform_interface
|
||||
* @param network an opaque handler to the network context
|
||||
* @return ai_error the FIRST error generated during network processing
|
||||
*/
|
||||
AI_INTERFACE_TYPE
|
||||
ai_error ai_platform_network_get_error(ai_handle network);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Set specific error code of the network. if an error is already present
|
||||
* keep it
|
||||
* @ingroup ai_platform_interface
|
||||
* @param net_ctx a pointer to the network context
|
||||
* @param type error type as defined in @ref ai_error_type
|
||||
* @param code error code as defined in @ref ai_error_code
|
||||
* @return true if no previous errors where recorded, false if a previous error
|
||||
* is present or context is invalid
|
||||
*/
|
||||
AI_INTERFACE_TYPE
|
||||
ai_bool ai_platform_network_set_error(
|
||||
ai_network* net_ctx, const ai_error_type type, const ai_error_code code);
|
||||
|
||||
/*!
|
||||
* @brief Finalize network report datastruct with I/O buffer infos
|
||||
* @ingroup ai_platform_interface
|
||||
* @return bool if the report has been finalized correctly. false otherwise
|
||||
*/
|
||||
AI_INTERFACE_TYPE
|
||||
ai_bool ai_platform_api_get_network_report(
|
||||
ai_handle network, ai_network_report* r);
|
||||
|
||||
/*!
|
||||
* @brief create a network context with some error check
|
||||
* @ingroup ai_platform_interface
|
||||
* @param a pointer to an opaque handle of the network context
|
||||
* @param an (optional) pointer to the network config buffer info
|
||||
* @param net_ctx a pointer to the network context structure to initialize
|
||||
* @param tools_major major version id of the tool used to generate the network
|
||||
* @param tools_minor minor version id of the tool used to generate the network
|
||||
* @param tools_micro micro version id of the tool used to generate the network
|
||||
* @return the error during network creation or error none if ok
|
||||
*/
|
||||
AI_INTERFACE_TYPE
|
||||
ai_error ai_platform_network_create(
|
||||
ai_handle* network, const ai_buffer* network_config,
|
||||
ai_network* net_ctx,
|
||||
const ai_u8 tools_major, const ai_u8 tools_minor, const ai_u8 tools_micro);
|
||||
|
||||
/*!
|
||||
* @brief destroy a network context
|
||||
* @ingroup ai_platform_interface
|
||||
* @param network a pointer to an opaque handle of the network context
|
||||
* @return AI_HANDLE_NULL if deallocation OK, same network handle if failed
|
||||
*/
|
||||
AI_INTERFACE_TYPE
|
||||
ai_handle ai_platform_network_destroy(ai_handle network);
|
||||
|
||||
/*!
|
||||
* @brief initialize the network context
|
||||
* @ingroup ai_platform_interface
|
||||
* @param network a pointer to an opaque handle of the network context
|
||||
* @return a valid network context, NULL if initialization failed
|
||||
*/
|
||||
AI_INTERFACE_TYPE
|
||||
ai_network* ai_platform_network_init(
|
||||
ai_handle network, const ai_network_params* params);
|
||||
|
||||
/*!
|
||||
* @brief main platform runtime execute of a network
|
||||
* @ingroup ai_platform_interface
|
||||
* @param network an opaque handler to the network context
|
||||
* @param input a pointer to the input buffer data to process
|
||||
* @param output a pointer to the output buffer
|
||||
* @return the number of batches processed from the input. A result <=0 in case
|
||||
* of error
|
||||
*/
|
||||
AI_INTERFACE_TYPE
|
||||
ai_i32 ai_platform_network_process(
|
||||
ai_handle network, const ai_buffer* input, ai_buffer* output);
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*__AI_PLATFORM_INTERFACE_H__*/
|
||||
@ -1,291 +0,0 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file core_common.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @date 20-Lug-2018
|
||||
* @brief header file of common core datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* <h2><center>© Copyright (c) 2018 STMicroelectronics.
|
||||
* All rights reserved.</center></h2>
|
||||
*
|
||||
* This software component is licensed by ST under Ultimate Liberty license
|
||||
* SLA0044, the "License"; You may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at:
|
||||
* www.st.com/SLA0044
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef __CORE_COMMON_H_
|
||||
#define __CORE_COMMON_H_
|
||||
#pragma once
|
||||
|
||||
#include "ai_platform.h"
|
||||
#include "ai_platform_interface.h"
|
||||
#include "ai_datatypes_internal.h"
|
||||
#include "core_datatypes.h"
|
||||
#include "core_log.h"
|
||||
|
||||
/*!
|
||||
* @defgroup core_common Common Core Library Routines
|
||||
* @brief Common macros, datatypes and routines of core common module
|
||||
* @details This module contains the definitons and handling of the @ref ai_node
|
||||
* datastructures. An ai_node is a generic abstraction for a network node that
|
||||
* could be either a fixed function layer or an operator. Ideally the platform
|
||||
* interface defined in api module should handle an process generic nodes in the
|
||||
* network, not relying on the fact that they are layers or operators datastructs
|
||||
* Specific implementative details should be kept inside layers and operators
|
||||
* modules. The core module implements additionally common routines used in the
|
||||
* layers and operators modules.
|
||||
*/
|
||||
|
||||
/******************************************************************************/
|
||||
#ifdef HAS_AI_ASSERT
|
||||
#define ASSERT_ARRAY_SANITY(a_) \
|
||||
AI_ASSERT((a_) && (a_)->size>0)
|
||||
|
||||
#define ASSERT_ARRAY_DATA_SANITY(a_) \
|
||||
ASSERT_ARRAY_SANITY(a_) \
|
||||
AI_ASSERT((a_)->data && (a_)->data_start)
|
||||
|
||||
#define ASSERT_TENSOR_SANITY(t_) \
|
||||
AI_ASSERT((t_) && (t_)->data) \
|
||||
AI_ASSERT(CORE_TENSOR_GET_SHAPE_SIZE(t_)>0) \
|
||||
ASSERT_ARRAY_SANITY((t_)->data)
|
||||
|
||||
#define ASSERT_TENSOR_LIST_SANITY(tlist_) \
|
||||
AI_ASSERT((tlist_) && (GET_TENSOR_LIST_SIZE(tlist_)>0)) \
|
||||
|
||||
#define ASSERT_TENSOR_DATA_SANITY(t_) \
|
||||
ASSERT_TENSOR_SANITY(t_) \
|
||||
ASSERT_ARRAY_DATA_SANITY((t_)->data)
|
||||
|
||||
#define ASSERT_NODE_SANITY(node_) \
|
||||
do { \
|
||||
AI_ASSERT(AI_NODE_OBJ(node_)->tensors && AI_NODE_OBJ(node_)->tensors->chain) \
|
||||
ASSERT_TENSOR_SANITY(GET_TENSOR_IN(AI_NODE_OBJ(node_)->tensors, 0)) \
|
||||
ASSERT_TENSOR_SANITY(GET_TENSOR_OUT(AI_NODE_OBJ(node_)->tensors, 0)) \
|
||||
} while (0);
|
||||
#else
|
||||
#define ASSERT_ARRAY_SANITY(a_) /* ASSERT_ARRAY_SANITY */
|
||||
#define ASSERT_ARRAY_DATA_SANITY(a_) /* ASSERT_ARRAY_DATA_SANITY */
|
||||
#define ASSERT_TENSOR_SANITY(t_) /* ASSERT_TENSOR_SANITY */
|
||||
#define ASSERT_TENSOR_LIST_SANITY(tlist_) /* ASSERT_TENSOR_LIST_SANITY */
|
||||
#define ASSERT_TENSOR_DATA_SANITY(t_) /* ASSERT_TENSOR_DATA_SANITY */
|
||||
#define ASSERT_NODE_SANITY(node_) /* ASSERT_NODE_SANITY */
|
||||
#endif /*HAS_AI_ASSERT*/
|
||||
|
||||
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
/* Suppress unused function warnings */
|
||||
#define AI_UNUSED_FUNCTION __attribute__((unused))
|
||||
/* Manage false positives in address sanitizer */
|
||||
#define AI_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address))
|
||||
#else
|
||||
#define AI_UNUSED_FUNCTION /* AI_UNUSED_FUNCTION */
|
||||
#define AI_NO_SANITIZE_ADDRESS /* AI_NO_SANITIZE_ADDRESS */
|
||||
#endif
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
#define AI_NODE_TYPE(type_) \
|
||||
( (ai_node_type)((ai_u32)(type_)&0xFFFF) )
|
||||
|
||||
#define AI_NODE_OBJ(obj_) \
|
||||
((ai_node*)(obj_))
|
||||
|
||||
#define AI_NODE_FORWARD_FUNC(func_) \
|
||||
((node_forward_func)(func_))
|
||||
|
||||
#define AI_NODE_IS_FIRST(node) \
|
||||
(AI_NODE_OBJ(node)==AI_NODE_OBJ(AI_NODE_OBJ(node)->network->input_node))
|
||||
|
||||
#define AI_NODE_IS_LAST(node_) \
|
||||
((AI_NODE_OBJ(node_)==AI_NODE_OBJ(node_)->next) || \
|
||||
(AI_NODE_OBJ(node_)->next==NULL))
|
||||
|
||||
#define AI_NODE_COMMON_FIELDS_DECLARE \
|
||||
ai_node_type type; /*!< node type id (see @ref ai_node_type) */ \
|
||||
ai_id_obj id; /*!< node object instance id (see @ref ai_id_obj) */ \
|
||||
ai_klass_obj klass; /*!< opaque handler to specific layer implementations */ \
|
||||
struct ai_network_s* network; /*!< handle to global network context */ \
|
||||
struct ai_node_s* next; /*!< the next node object in the sequence */ \
|
||||
node_forward_func forward; /*!< forward function for the node */ \
|
||||
AI_CONST ai_tensor_chain* tensors; /*!< pointer to node tensor chain */
|
||||
|
||||
#define AI_NODE_COMMON_INIT(type_, id_, forward_, next_, network_, klass_obj_) \
|
||||
.type = AI_NODE_TYPE(type_), \
|
||||
.id = AI_ID_OBJ(id_), \
|
||||
.klass = AI_KLASS_OBJ(klass_obj_), \
|
||||
.network = AI_NETWORK_OBJ(network_), \
|
||||
.next = AI_NODE_OBJ(next_), \
|
||||
.forward = AI_NODE_FORWARD_FUNC(forward_), \
|
||||
.tensors = NULL
|
||||
|
||||
#define AI_FOR_EACH_NODE_DO(node_, nodes_) \
|
||||
for ( ai_node* node_ = AI_NODE_OBJ(nodes_); (node_); \
|
||||
node_ = ((AI_NODE_IS_LAST(node_)) ? NULL : (node_)->next) )
|
||||
|
||||
|
||||
/** TENSOR CHAINS LOOP MACROS & GETTERS *************************************/
|
||||
#define AI_FOR_EACH_TENSOR_CHAIN_DO(tlist_ptr_, chain_) \
|
||||
ai_tensor_list* tlist_ptr_ = (chain_)->chain; \
|
||||
for ( ; tlist_ptr_<(((chain_)->chain)+((chain_)->size)); tlist_ptr_++ )
|
||||
|
||||
#define AI_FOR_EACH_TENSOR_LIST_DO(idx_, t_ptr_, tlist_ptr_) \
|
||||
ai_tensor* t_ptr_ = (GET_TENSOR_LIST_SIZE(tlist_ptr_)>0) \
|
||||
? GET_TENSOR_LIST_ITEM(tlist_ptr_, 0) : NULL; \
|
||||
for ( ai_size idx_ = 0; \
|
||||
idx_ < GET_TENSOR_LIST_SIZE(tlist_ptr_) && \
|
||||
(t_ptr_ = GET_TENSOR_LIST_ITEM(tlist_ptr_, idx_)) != 0; ++idx_)
|
||||
|
||||
#define GET_TENSOR_LIST_INFO(list_) \
|
||||
( (list_)->info )
|
||||
|
||||
#define GET_TENSOR_LIST_META(list_, pos_) \
|
||||
( &(GET_TENSOR_LIST_INFO(list_)->meta[pos_]) )
|
||||
|
||||
#define GET_TENSOR_LIST_STATE(list_, pos_) \
|
||||
( &(GET_TENSOR_LIST_INFO(list_)->state[pos_]) )
|
||||
|
||||
#define GET_TENSOR_LIST_BUFFER(list_, pos_) \
|
||||
( &(GET_TENSOR_LIST_INFO(list_)->buffer[pos_]) )
|
||||
|
||||
#define GET_TENSOR_LIST_ITEM(list_, pos_) \
|
||||
( (NULL!=(list_)->tensor) \
|
||||
? (list_)->tensor[(pos_)] : NULL )
|
||||
|
||||
#define GET_TENSOR_LIST_ITEMS(list_) \
|
||||
( (list_)->tensor )
|
||||
|
||||
#define GET_TENSOR_LIST_SIZE(list_) \
|
||||
( (NULL!=(list_)) ? (list_)->size : 0 )
|
||||
|
||||
#define GET_TENSOR_CHAIN_SIZE(chain_) \
|
||||
( (NULL!=(chain_)) ? (chain_)->size : 0 )
|
||||
|
||||
#define GET_TENSOR_LIST(chain_, type_) \
|
||||
( (AI_CONCAT(AI_TENSOR_CHAIN_, type_)<(chain_)->size) \
|
||||
? &(chain_)->chain[AI_CONCAT(AI_TENSOR_CHAIN_, type_)] : NULL )
|
||||
|
||||
#define GET_TENSOR_LIST_IN(chain_) \
|
||||
( GET_TENSOR_LIST(chain_, INPUT) )
|
||||
|
||||
#define GET_TENSOR_LIST_OUT(chain_) \
|
||||
( GET_TENSOR_LIST(chain_, OUTPUT) )
|
||||
|
||||
#define GET_TENSOR_LIST_WEIGTHS(chain_) \
|
||||
( GET_TENSOR_LIST(chain_, WEIGHTS) )
|
||||
|
||||
#define GET_TENSOR_LIST_SCRATCH(chain_) \
|
||||
( GET_TENSOR_LIST(chain_, SCRATCH) )
|
||||
|
||||
#define GET_TENSOR_IN(chain_, pos_) \
|
||||
( GET_TENSOR_LIST_ITEM(GET_TENSOR_LIST_IN(chain_), (pos_)) )
|
||||
|
||||
#define GET_TENSOR_OUT(chain_, pos_) \
|
||||
( GET_TENSOR_LIST_ITEM(GET_TENSOR_LIST_OUT(chain_), (pos_)) )
|
||||
|
||||
#define SET_TENSOR_IN(chain_, pos_) \
|
||||
( GET_TENSOR_LIST_IN(chain_)->tensor[(pos_)] )
|
||||
|
||||
#define SET_TENSOR_OUT(chain_, pos_) \
|
||||
( GET_TENSOR_LIST_OUT(chain_)->tensor[(pos_)] )
|
||||
|
||||
#define GET_TENSOR_WEIGHTS(chain_, pos_) \
|
||||
( GET_TENSOR_LIST_ITEM(GET_TENSOR_LIST_WEIGTHS(chain_), (pos_)) )
|
||||
|
||||
#define GET_TENSOR_SCRATCH(chain_, pos_) \
|
||||
( GET_TENSOR_LIST_ITEM(GET_TENSOR_LIST_SCRATCH(chain_), (pos_)) )
|
||||
|
||||
#define AI_NODE_IO_GET(node_, in_, out_) \
|
||||
ASSERT_NODE_SANITY(node_) \
|
||||
ai_tensor* in_ = GET_TENSOR_IN((node_)->tensors, 0); \
|
||||
ai_tensor* out_ = GET_TENSOR_OUT((node_)->tensors, 0); \
|
||||
ASSERT_TENSOR_SANITY(in_) \
|
||||
ASSERT_TENSOR_SANITY(out_)
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
#if 1
|
||||
#define SECTION_SERIAL(expr) expr
|
||||
#define SECTION_PARALLEL(expr)
|
||||
#else
|
||||
#define SECTION_SERIAL(expr)
|
||||
#define SECTION_PARALLEL(expr) expr
|
||||
#endif
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @struct ai_node_type
|
||||
* @ingroup core_common
|
||||
* @brief generic network node numeric type ID
|
||||
*
|
||||
*/
|
||||
typedef uint16_t ai_node_type;
|
||||
|
||||
/*!
|
||||
* @typedef void (*node_forward_func)(struct ai_node_s* node)
|
||||
* @ingroup core_common
|
||||
* @brief Callback signatures for all forward functions
|
||||
*/
|
||||
typedef void (*node_forward_func)(struct ai_node_s* node);
|
||||
|
||||
/*!
|
||||
* @typedef ai_float (*func_nl_el)(const ai_float x)
|
||||
* @ingroup core_common
|
||||
* @brief Fuction pointer for generic elementwise transforms
|
||||
*
|
||||
* This function pointer abstracts a generic nonlinear function applied to a
|
||||
* single element. See @ref ai_math_sqrt in @ref math_helpers as examples.
|
||||
*/
|
||||
typedef ai_float (*func_nl_el)(const ai_float x);
|
||||
|
||||
/*!
|
||||
* @struct ai_node
|
||||
* @ingroup core_common
|
||||
* @brief Structure encoding a generic node of the network
|
||||
*
|
||||
* The node struct includes information about the network it belong to, the
|
||||
* next node in a sequential network and the forward function. The forward
|
||||
* functions are implemented in the @ref layers module.
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_node_s {
|
||||
AI_NODE_COMMON_FIELDS_DECLARE
|
||||
} ai_node;
|
||||
|
||||
/*!
|
||||
* @brief initialize core module
|
||||
* @ingroup core_common
|
||||
* @return false if initialization fails, false otherwise
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
ai_bool core_init(void);
|
||||
|
||||
/*!
|
||||
* @brief get 1st error raised during processing
|
||||
* @ingroup core_common
|
||||
* @param[out] error the @ref ai_error recorded during processing
|
||||
* @return the 1st error generated during processing. If no errors AI_ERROR_NONE
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
ai_error core_get_error(ai_error* error);
|
||||
|
||||
/*!
|
||||
* @brief set error recorded during processing
|
||||
* @ingroup core_common
|
||||
* @param[out] error the @ref ai_error to set
|
||||
* @param[in] type the specific error type to set
|
||||
* @param[in] code the specific error code to set
|
||||
* @return true if the error is set, false in case a precedent error was already
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
ai_bool core_set_error(
|
||||
ai_error* error, const ai_error_type type, const ai_error_code code);
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*__CORE_COMMON_H_*/
|
||||
@ -1,72 +0,0 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file core_utils.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @date 16-Aug-2018
|
||||
* @brief header file of core utils routines
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* <h2><center>© COPYRIGHT(c) 2018 STMicroelectronics</center></h2>
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted provided that the following conditions are met:
|
||||
* 1. Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
* 3. Neither the name of STMicroelectronics nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef __CORE_CONVERT_H_
|
||||
#define __CORE_CONVERT_H_
|
||||
#pragma once
|
||||
|
||||
#include "ai_platform.h"
|
||||
#include "ai_platform_interface.h"
|
||||
|
||||
#include "core_common.h"
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @defgroup core_convert Core Convert Routines
|
||||
* @brief Implementation of core node format convertion routines (Q7 to float, ... etc.)
|
||||
*/
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Convert input tensor array from input format to output format
|
||||
* @ingroup core_convert
|
||||
* @param[in] pNode in a handler to node (layer or operators) with tensor informations
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void node_convert(ai_node *pNode);
|
||||
|
||||
/*!
|
||||
* @brief Convert a shape struct into a stride struct
|
||||
* @ingroup core_convert
|
||||
* @param[in] in a pointer to a shape to convert
|
||||
* @return a condverted stride datastruct
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void core_shape_to_stride(ai_stride* out, const ai_shape* in);
|
||||
|
||||
|
||||
#endif /*__CORE_CONVERT_H_*/
|
||||
@ -1,59 +0,0 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file core_datatypes.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @date 22-Aug-2018
|
||||
* @brief header file of core module private defines and datatypes
|
||||
* to public nor codegen tool
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* <h2><center>© Copyright (c) 2018 STMicroelectronics.
|
||||
* All rights reserved.</center></h2>
|
||||
*
|
||||
* This software component is licensed by ST under Ultimate Liberty license
|
||||
* SLA0044, the "License"; You may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at:
|
||||
* www.st.com/SLA0044
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef __AI_CORE_DATATYPES_H_
|
||||
#define __AI_CORE_DATATYPES_H_
|
||||
#pragma once
|
||||
#include <stdint.h>
|
||||
|
||||
/*!
|
||||
* @defgroup Core Module Datatypes
|
||||
* @brief Data structures and defines used by core module
|
||||
*/
|
||||
|
||||
/*!
|
||||
* @brief platform runtime core library version
|
||||
*/
|
||||
#define AI_PLATFORM_RUNTIME_MAJOR 4
|
||||
#define AI_PLATFORM_RUNTIME_MINOR 1
|
||||
#define AI_PLATFORM_RUNTIME_MICRO 0
|
||||
|
||||
#define AI_MAGIC_CONTEXT_TOKEN (0xA1C00100) /*!< AI Cool! Magic Token */
|
||||
|
||||
#define AI_MAGIC_INSPECTOR_TOKEN (0xA1C00101) /*!< AI Cool! Magic Token */
|
||||
|
||||
|
||||
#define AI_ID_OBJ(id) \
|
||||
((ai_id_obj)(id))
|
||||
|
||||
#define AI_C_ARRAY_COUNT(array_) \
|
||||
( sizeof(array_) / sizeof((array_)[0]) )
|
||||
|
||||
/*!
|
||||
* @typedef ai_id_obj
|
||||
* @ingroup core_datatypes
|
||||
* @brief numeric identifier for generic object instances (e.g. layers,
|
||||
* operators, etc.) It is used by codegen tool to keep tracks of specific
|
||||
* instances created
|
||||
*/
|
||||
typedef uint16_t ai_id_obj;
|
||||
|
||||
#endif /*__AI_CORE_DATATYPES_H_*/
|
||||
@ -1,117 +0,0 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file core_log.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @date 14-Aug-2018
|
||||
* @brief header file of core log interfaces
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* <h2><center>© Copyright (c) 2018 STMicroelectronics.
|
||||
* All rights reserved.</center></h2>
|
||||
*
|
||||
* This software component is licensed by ST under Ultimate Liberty license
|
||||
* SLA0044, the "License"; You may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at:
|
||||
* www.st.com/SLA0044
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef __CORE_LOG_H_
|
||||
#define __CORE_LOG_H_
|
||||
#pragma once
|
||||
|
||||
#include "ai_platform.h"
|
||||
|
||||
/*!
|
||||
* @defgroup core_log Logger core routines wrapper interface
|
||||
* @brief Common macros, datatypes and routines of ai logger module
|
||||
* @details This header defines the wrapping macros interfaces to handle the
|
||||
* global logger module. These macro are defined when the macro HAS_LOG is
|
||||
* defined, otherwise they are all set to NOP routines and no logger code is
|
||||
* compiled at all. When the macro HAS_LOG is defined, only the log messages
|
||||
* having an enum id >= the value of the macro are compiled. Thus to include in
|
||||
* compilation only log messages up to the error level the value of HAS_LOG must
|
||||
* be equal the the enum value of LOG_ERROR macro (i.e. 3). a value of 6 means
|
||||
* to include all log messages up to the lower LOG_TRACE level.
|
||||
*/
|
||||
|
||||
#if defined HAS_LOG && (HAS_LOG>=0)
|
||||
#include "ai_log.h"
|
||||
#define AI_LOG_SECTION(...) { __VA_ARGS__ }
|
||||
|
||||
#define AI_LOG_ACQUIRE() \
|
||||
ai_log_acquire()
|
||||
#define AI_LOG_SET_LEVEL(level_) \
|
||||
AI_WRAP_FUNC(ai_log_set_level(level_);)
|
||||
#define AI_LOG_SET_QUIET(onoff_) \
|
||||
AI_WRAP_FUNC(ai_log_set_quiet(onoff_);)
|
||||
#define AI_LOG_SET_LOCK_FN(fn_, udata_) \
|
||||
AI_WRAP_FUNC(ai_log_set_lock(fn_, udata_);)
|
||||
#define AI_LOG_CHANNEL_PUSH(level_, fn_, udata_) \
|
||||
AI_WRAP_FUNC(ai_log_channel_push(level_, fn_, udata_);)
|
||||
#define AI_LOG_CHANNEL_POP(fn_, udata_) \
|
||||
AI_WRAP_FUNC(ai_log_channel_pop(fn_, udata_);)
|
||||
#ifdef LOG_USE_FILE
|
||||
#define AI_LOG_SET_FILE_POINTER(fp_) \
|
||||
AI_WRAP_FUNC(ai_log_set_fp(fp_);)
|
||||
#else
|
||||
#define AI_LOG_SET_FILE_POINTER(fp_) \
|
||||
/*AI_LOG_SET_FILE_POINTER()*/
|
||||
#endif
|
||||
#else
|
||||
#define AI_LOG_SECTION(...) /*AI_LOG_SECTION()*/
|
||||
|
||||
#define AI_LOG_ACQUIRE() (NULL)
|
||||
#define AI_LOG_SET_LEVEL(level_) /*AI_LOG_SET_LEVEL()*/
|
||||
#define AI_LOG_SET_QUIET(onoff_) /*AI_LOG_SET_QUIET()*/
|
||||
#define AI_LOG_SET_LOCK_FN(fn_, udata_) /*AI_LOG_SET_LOCK_FN()*/
|
||||
#define AI_LOG_CHANNEL_PUSH(level_, fn_, udata_) /*AI_LOG_CHANNEL_PUSH()*/
|
||||
#define AI_LOG_CHANNEL_POP(fn_, udata_) /*AI_LOG_CHANNEL_POP()*/
|
||||
#define AI_LOG_SET_FILE_POINTER(fp_) /*AI_LOG_SET_FILE_POINTER()*/
|
||||
#endif
|
||||
|
||||
#if defined HAS_LOG && (HAS_LOG>=LOG_SUDO)
|
||||
#define AI_LOG_SUDO(...) AI_WRAP_FUNC(ai_log_log(LOG_SUDO, __FILE__, __LINE__, __VA_ARGS__);)
|
||||
#else
|
||||
#define AI_LOG_SUDO(...) /*AI_LOG_SUDO()*/
|
||||
#endif
|
||||
|
||||
#if defined HAS_LOG && (HAS_LOG>=LOG_TRACE)
|
||||
#define AI_LOG_TRACE(...) AI_WRAP_FUNC(ai_log_log(LOG_TRACE, __FILE__, __LINE__, __VA_ARGS__);)
|
||||
#else
|
||||
#define AI_LOG_TRACE(...) /*AI_LOG_TRACE()*/
|
||||
#endif
|
||||
|
||||
#if defined HAS_LOG && (HAS_LOG>=LOG_DEBUG)
|
||||
#define AI_LOG_DEBUG(...) AI_WRAP_FUNC(ai_log_log(LOG_DEBUG, __FILE__, __LINE__, __VA_ARGS__);)
|
||||
#else
|
||||
#define AI_LOG_DEBUG(...) /*AI_LOG_DEBUG()*/
|
||||
#endif
|
||||
|
||||
#if defined HAS_LOG && (HAS_LOG>=LOG_INFO)
|
||||
#define AI_LOG_INFO(...) AI_WRAP_FUNC(ai_log_log(LOG_INFO, __FILE__, __LINE__, __VA_ARGS__);)
|
||||
#else
|
||||
#define AI_LOG_INFO(...) /*AI_LOG_INFO()*/
|
||||
#endif
|
||||
|
||||
#if defined HAS_LOG && (HAS_LOG>=LOG_WARN)
|
||||
#define AI_LOG_WARN(...) AI_WRAP_FUNC(ai_log_log(LOG_WARN, __FILE__, __LINE__, __VA_ARGS__);)
|
||||
#else
|
||||
#define AI_LOG_WARN(...) /*AI_LOG_WARN()*/
|
||||
#endif
|
||||
|
||||
#if defined HAS_LOG && (HAS_LOG>=LOG_ERROR)
|
||||
#define AI_LOG_ERROR(...) AI_WRAP_FUNC(ai_log_log(LOG_ERROR, __FILE__, __LINE__, __VA_ARGS__);)
|
||||
#else
|
||||
#define AI_LOG_ERROR(...) /*AI_LOG_ERROR()*/
|
||||
#endif
|
||||
|
||||
#if defined HAS_LOG && (HAS_LOG>=LOG_FATAL)
|
||||
#define AI_LOG_FATAL(...) AI_WRAP_FUNC(ai_log_log(LOG_FATAL, __FILE__, __LINE__, __VA_ARGS__);)
|
||||
#else
|
||||
#define AI_LOG_FATAL(...) /*AI_LOG_FATAL()*/
|
||||
#endif
|
||||
|
||||
#endif /*__CORE_LOG_H_*/
|
||||
@ -1,96 +0,0 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file core_net_inspect.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @date 20-Lug-2018
|
||||
* @brief header file of core network inspection APIs
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* <h2><center>© Copyright (c) 2018 STMicroelectronics.
|
||||
* All rights reserved.</center></h2>
|
||||
*
|
||||
* This software component is licensed by ST under Ultimate Liberty license
|
||||
* SLA0044, the "License"; You may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at:
|
||||
* www.st.com/SLA0044
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef __CORE_NET_INSPECT_H_
|
||||
#define __CORE_NET_INSPECT_H_
|
||||
#pragma once
|
||||
|
||||
#include "core_net_inspect_interface.h"
|
||||
|
||||
#include "core_common.h"
|
||||
#include "layers_common.h"
|
||||
|
||||
/*!
|
||||
* @defgroup core_net_inspect Core Network Inspection routines
|
||||
* @brief Implementation of core network inspection routines that allows to
|
||||
* inspect on a node basis a generated network model
|
||||
* @details A network context @ref ai_network basically contains a chained list
|
||||
* of nodes @ref ai_node that have an associated forward function.
|
||||
* Each ai)network context and ai_node datastructs have as a required member
|
||||
* field an opaque handler (i.e. a void pointer) to a klass object.
|
||||
* This handler is intended to be used as a platform specific node context
|
||||
* that implements specific target platform routines.
|
||||
* The inspector module basically acts as a plugin that exploiting these features
|
||||
* by temporary creating an hidden inspection context (see
|
||||
* @ref ai_core_inspect_net_klass) associated to the network and
|
||||
* linking it by re-routing the klass field to this inspection context. The
|
||||
* inspection context saves as part of its state (by a stack push operation), the
|
||||
* internal state of the network (all node / network klass pointers and actual
|
||||
* forward functions).
|
||||
* Thus, for each node it re-routes all node's forward functions to a dedicated
|
||||
* inspection forward function (see @ref _forward_inspect_validate() routine)
|
||||
* This routine is the core of the mechanism and it allows to inspect a network
|
||||
* node by node. Some additional inspection could thus be done inside the
|
||||
* _forward_inspect_validate() routine before and after the actual node
|
||||
* forward function is called;
|
||||
*
|
||||
*/
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @defgroup core_net_inspect Network Inspection Core
|
||||
* @brief Implementation of the validation network routines
|
||||
*/
|
||||
|
||||
/*!
|
||||
* @brief Initialize the network inspection context on a given network
|
||||
* @ingroup core net inspect
|
||||
* @param network opaque handler to the network instance
|
||||
* @param cfg a pointer to the inspector configuration we want to use
|
||||
* @return true if execution of the API is fine, false otherwise
|
||||
*/
|
||||
AI_API_ENTRY
|
||||
ai_bool ai_network_inspect_init(
|
||||
ai_handle network, const ai_inspect_config* cfg);
|
||||
|
||||
/*!
|
||||
* @brief Get a summary report from the inspected network
|
||||
* @ingroup core net inspect
|
||||
* @param network opaque handler to the network instance
|
||||
* @param report a pointer to the report provided back by the inspection
|
||||
* @return true if execution of the API is fine, false otherwise
|
||||
*/
|
||||
AI_API_ENTRY
|
||||
ai_bool ai_network_inspect_get_report(
|
||||
ai_handle network, ai_inspect_net_report* report);
|
||||
|
||||
/*!
|
||||
* @brief Destroy the network inspection context on a given network
|
||||
* @ingroup core net inspect
|
||||
* @param network opaque handler to the network instance
|
||||
* @return true if execution of the API is fine, false otherwise
|
||||
*/
|
||||
AI_API_ENTRY
|
||||
ai_bool ai_network_inspect_destroy(ai_handle network);
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*__CORE_NET_INSPECT_H_*/
|
||||
@ -1,121 +0,0 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file core_net_inspect_interface.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @date 20-Lug-2018
|
||||
* @brief header file of core network inspection interface APIs
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* <h2><center>© Copyright (c) 2018 STMicroelectronics.
|
||||
* All rights reserved.</center></h2>
|
||||
*
|
||||
* This software component is licensed by ST under Ultimate Liberty license
|
||||
* SLA0044, the "License"; You may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at:
|
||||
* www.st.com/SLA0044
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef __CORE_NET_INSPECT_INTERFACE_H_
|
||||
#define __CORE_NET_INSPECT_INTERFACE_H_
|
||||
#pragma once
|
||||
|
||||
#include "ai_platform.h"
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @defgroup core_validation Validation Core
|
||||
* @brief Implementation of the validation network interface headers
|
||||
*/
|
||||
|
||||
|
||||
/*!
|
||||
* @struct ai_inspect_node_info
|
||||
* @brief network node inspection context: there is one of this datastruct
|
||||
* for each node of the network
|
||||
*/
|
||||
typedef struct ai_inspect_node_info_s {
|
||||
ai_u16 type; /*!< node type info @see ai_node datastruct */
|
||||
ai_u16 id; /*!< node id assigned by codegen tool to identify
|
||||
the specific node instance */
|
||||
ai_u16 batch_id; /*!< current node batch processed */
|
||||
ai_u16 n_batches; /*!< total number of node batches to process */
|
||||
ai_float elapsed_ms; /*!< node performance analysys: time in
|
||||
milliseconds to execute the node forward
|
||||
function */
|
||||
ai_u16 in_size; /*!< number of node's input activation buffers */
|
||||
ai_u16 out_size; /*!< number of node's output activation buffers */
|
||||
ai_buffer* in; /*!< input node activation buffer see @ref ai_buffer */
|
||||
ai_buffer* out; /*!< output node activation buffer see @ref ai_buffer */
|
||||
} ai_inspect_node_info;
|
||||
|
||||
/*!
|
||||
* @struct ai_inspect_net_report
|
||||
* @brief network inspection report context
|
||||
*/
|
||||
typedef struct ai_inspect_net_report_s {
|
||||
ai_u32 id; /*!< id of the report */
|
||||
ai_signature signature; /*!< network identification checksum */
|
||||
ai_u32 num_inferences; /*!< total number of inferences processed
|
||||
during the inspection */
|
||||
ai_u32 n_nodes; /*!< number of nodes in the network */
|
||||
ai_float elapsed_ms; /*!< network total time (in ms) for processing
|
||||
num_inferences inferences */
|
||||
ai_inspect_node_info* node; /*!< pointer to the array of size n_nodes where
|
||||
a single node report is reported. see @ref
|
||||
ai_inspect_node_info datastruct */
|
||||
} ai_inspect_net_report;
|
||||
|
||||
/*!
|
||||
* @enum net inspector inspection mode
|
||||
* @brief configuration flags to set net inspection mode
|
||||
*/
|
||||
typedef enum {
|
||||
VALIDATION_INSPECT = (0x1<<0), /**< Network validation inspection mode */
|
||||
STORE_ALL_IO_ACTIVATIONS = (0x1<<7), /**< Store all I/O activations on snapshot datastruct */
|
||||
} ai_inspect_mode;
|
||||
|
||||
typedef enum {
|
||||
AI_NODE_EXEC_PRE_FORWARD_STAGE = 0x0,
|
||||
AI_NODE_EXEC_POST_FORWARD_STAGE = 0x1,
|
||||
} ai_node_exec_stage;
|
||||
|
||||
/*!
|
||||
* @brief function pointer to callback report
|
||||
*/
|
||||
typedef void (*ai_inspect_report_cb_func)(
|
||||
const ai_handle cookie,
|
||||
const ai_inspect_net_report* report);
|
||||
|
||||
/*!
|
||||
* @brief function pointer to node execute
|
||||
*/
|
||||
typedef void (*ai_inspect_exec_node_cb_func)(
|
||||
const ai_handle cookie,
|
||||
const ai_inspect_node_info* node_info,
|
||||
const ai_node_exec_stage stage);
|
||||
|
||||
/*!
|
||||
* @struct ai_inspect_config
|
||||
* @brief inspection config datastruct
|
||||
*/
|
||||
typedef struct ai_inspect_config_s {
|
||||
ai_u8 validation_mode; /*!< validation mode flags
|
||||
see @ref ai_inspect_mode */
|
||||
ai_u8 log_level; /*!< log class level see @ref LOG_SUDO */
|
||||
ai_bool log_quiet; /*!< log class quiet mode */
|
||||
ai_inspect_report_cb_func on_report_destroy; /*!< callback function
|
||||
called when a report datastruct
|
||||
is released from memory */
|
||||
ai_inspect_exec_node_cb_func on_exec_node; /*!< callback function
|
||||
called when a node is executed (pre & post) */
|
||||
ai_handle cookie;
|
||||
} ai_inspect_config;
|
||||
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*__CORE_NET_INSPECT_INTERFACE_H_*/
|
||||
@ -1,58 +0,0 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file datatypes_network.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @date 30-Aug-2017
|
||||
* @brief Definitions of code generated network types
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* <h2><center>© Copyright (c) 2017 STMicroelectronics.
|
||||
* All rights reserved.</center></h2>
|
||||
*
|
||||
* This software component is licensed by ST under Ultimate Liberty license
|
||||
* SLA0044, the "License"; You may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at:
|
||||
* www.st.com/SLA0044
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef __DATATYPES_NETWORK_H__
|
||||
#define __DATATYPES_NETWORK_H__
|
||||
#pragma once
|
||||
|
||||
/*
|
||||
* Header to be overriden by the generated version
|
||||
* by including with <> the include directories are searched in the order
|
||||
* specified in the compiler
|
||||
* To enable the override, put the generated path before the API path
|
||||
*/
|
||||
|
||||
#include "ai_platform.h"
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
#ifdef AI_OVERRIDE_CUSTOM_TYPES
|
||||
#warning "Warning: Custom Types have been already defined!\n"
|
||||
#endif
|
||||
|
||||
#define AI_CUSTOM_TYPES_COUNT (3)
|
||||
|
||||
#define AI_CUSTOM_TYPES_SIGNATURE_DECLARE(name) \
|
||||
const ai_custom_type_signature name[AI_CUSTOM_TYPES_COUNT+1] = { \
|
||||
AI_CUSTOM_TYPES_COUNT, \
|
||||
AI_CUSTOM_SIZE(ai_shape_dimension), \
|
||||
AI_CUSTOM_SIZE(ai_stride_dimension), \
|
||||
AI_CUSTOM_SIZE(ai_array_size), \
|
||||
};
|
||||
|
||||
|
||||
typedef ai_u32 ai_shape_dimension;
|
||||
typedef ai_i32 ai_stride_dimension;
|
||||
typedef ai_u32 ai_array_size;
|
||||
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*__DATATYPES_NETWORK_H__*/
|
||||
@ -1,69 +0,0 @@
|
||||
|
||||
/* FMT_ENTRY( exp_(0/1 only), name_, type_id_,
|
||||
* sign_bit_, float_bit_, pbits_, bits_, fbits_, ldiv_bits_)
|
||||
* Specifications (in order of the bit fields, little endian):
|
||||
- name_ : it is the enum used to define both the ai_array_format and
|
||||
ai_buffer_format.
|
||||
- exp_ (1bit) : it is a boolean flag (0 or 1) indicating whether the format
|
||||
is available as a public APIs ai_buffer format. in this case the field
|
||||
exp_name_ indicates the enum name of the ai_buffer format
|
||||
- (7 bits): reserved for flags
|
||||
- sign_bit_ (1bit) : codes whether or not the format is of a signed type
|
||||
- float_bit_ (1bit) : codes if the format is float
|
||||
- ldiv_bits (2 bits) : right shift value for computing the byte size of the
|
||||
format
|
||||
- type_id_ (4bits) : it is used to define the "family" of the format:
|
||||
see @ref AI_FMT_Q as an example. Currently supported types are:
|
||||
AI_FMT_Q (fixed point types), AI_FMT_FLOAT (floating point values),
|
||||
AI_FMT_LUT4 or AI_FMT_LUT8 (compressed formats)
|
||||
- pbits_ (3bits) : number of padding bits for the format
|
||||
- bits_ (7bits) : size in bits of the format (NB: integer+fractional bits)
|
||||
- fbits_ (7bits) : number of fractional bits for the format (for AI_FMT_Q only)
|
||||
|
||||
*/
|
||||
|
||||
/* Macro tricks are here:
|
||||
* https://github.com/pfultz2/Cloak/wiki/C-Preprocessor-tricks,-tips,-and-idioms
|
||||
*/
|
||||
|
||||
/* Format none entry */
|
||||
FMT_ENTRY(1, NONE, AI_FMT_NONE, 0, 0, 0, 0, 0, 0)
|
||||
|
||||
/* Floating point formats */
|
||||
FMT_ENTRY(1, FLOAT, AI_FMT_FLOAT, 1, 1, 0, 32, 0, 0)
|
||||
FMT_ENTRY(0, FLOAT64, AI_FMT_FLOAT, 1, 1, 0, 64, 0, 0)
|
||||
FMT_ENTRY(0, FLOAT16, AI_FMT_FLOAT, 1, 1, 0, 16, 0, 0)
|
||||
|
||||
/* Integer formats (i.e. fractional bits = 0!) */
|
||||
FMT_ENTRY(1, U8, AI_FMT_Q, 0, 0, 0, 8, 0, 0)
|
||||
FMT_ENTRY(1, U16, AI_FMT_Q, 0, 0, 0, 16, 0, 0)
|
||||
FMT_ENTRY(0, U32, AI_FMT_Q, 0, 0, 0, 32, 0, 0)
|
||||
FMT_ENTRY(0, U64, AI_FMT_Q, 0, 0, 0, 64, 0, 0)
|
||||
FMT_ENTRY(0, U4, AI_FMT_Q, 0, 0, 0, 4, 0, 0)
|
||||
|
||||
FMT_ENTRY(1, S8, AI_FMT_Q, 1, 0, 0, 8, 0, 0)
|
||||
FMT_ENTRY(1, S16, AI_FMT_Q, 1, 0, 0, 16, 0, 0)
|
||||
FMT_ENTRY(0, S32, AI_FMT_Q, 1, 0, 0, 32, 0, 0)
|
||||
FMT_ENTRY(0, S64, AI_FMT_Q, 1, 0, 0, 64, 0, 0)
|
||||
FMT_ENTRY(0, S4, AI_FMT_Q, 1, 0, 0, 4, 0, 0)
|
||||
|
||||
/* Fixed-point formats including ARM CMSIS Q7, Q15, Q31 ones */
|
||||
FMT_ENTRY(1, Q, AI_FMT_Q, 1, 0, 0, 0, 0, 0)
|
||||
FMT_ENTRY(1, Q7, AI_FMT_Q, 1, 0, 0, 8, 7, 0)
|
||||
FMT_ENTRY(1, Q15, AI_FMT_Q, 1, 0, 0, 16, 15, 0)
|
||||
FMT_ENTRY(0, Q31, AI_FMT_Q, 1, 0, 0, 32, 31, 0)
|
||||
|
||||
FMT_ENTRY(1, UQ, AI_FMT_Q, 0, 0, 0, 0, 0, 0)
|
||||
FMT_ENTRY(1, UQ7, AI_FMT_Q, 0, 0, 0, 8, 7, 0)
|
||||
FMT_ENTRY(1, UQ15, AI_FMT_Q, 0, 0, 0, 16, 15, 0)
|
||||
FMT_ENTRY(0, UQ31, AI_FMT_Q, 0, 0, 0, 32, 31, 0)
|
||||
|
||||
/* Compressed formats */
|
||||
FMT_ENTRY(0, LUT4_FLOAT, AI_FMT_LUT4, 1, 1, 0, 32, 0, 3)
|
||||
FMT_ENTRY(0, LUT8_FLOAT, AI_FMT_LUT8, 1, 1, 0, 32, 0, 2)
|
||||
FMT_ENTRY(0, LUT4_Q15, AI_FMT_LUT4, 1, 0, 0, 16, 15, 2)
|
||||
FMT_ENTRY(0, LUT8_Q15, AI_FMT_LUT8, 1, 0, 0, 16, 15, 1)
|
||||
FMT_ENTRY(0, LUT4_UQ15, AI_FMT_LUT4, 0, 0, 0, 16, 15, 2)
|
||||
FMT_ENTRY(0, LUT8_UQ15, AI_FMT_LUT8, 0, 0, 0, 16, 15, 1)
|
||||
|
||||
#undef FMT_ENTRY
|
||||
@ -1,88 +0,0 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @date 01-May-2017
|
||||
* @brief header file of AI platform layers datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* <h2><center>© Copyright (c) 2017 STMicroelectronics.
|
||||
* All rights reserved.</center></h2>
|
||||
*
|
||||
* This software component is licensed by ST under Ultimate Liberty license
|
||||
* SLA0044, the "License"; You may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at:
|
||||
* www.st.com/SLA0044
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef __LAYERS_H_
|
||||
#define __LAYERS_H_
|
||||
#pragma once
|
||||
|
||||
#include "layers_common.h"
|
||||
#include "layers_conv2d.h"
|
||||
#include "layers_generic.h"
|
||||
#include "layers_nl.h"
|
||||
#include "layers_norm.h"
|
||||
#include "layers_pool.h"
|
||||
#include "layers_rnn.h"
|
||||
#include "layers_dense.h"
|
||||
#include "layers_sm.h"
|
||||
|
||||
#ifdef USE_OPERATORS
|
||||
#include "layers_lambda.h"
|
||||
#endif /* USE_OPERATORS */
|
||||
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @defgroup layers Layers
|
||||
* @brief Definition of the forward functions for the layers and the general
|
||||
* ai_layer datastructure used to abstract specific layer implementation in the
|
||||
* generic forward function definition
|
||||
*
|
||||
* The forward function for a layer computes the layer activations given the
|
||||
* activations of the previous layer. They are added to the layer as function
|
||||
* pointer and called implicitly by the @ref ai_layers_forward_all function.
|
||||
* The input activations are read from layer → in and the computed
|
||||
* activations stored in layer → out. The layer type needs to be compatible
|
||||
* with the forward function, but layers with the same layout (e.g. `mp` and
|
||||
* `ap`) can share the same structure.
|
||||
*/
|
||||
|
||||
/******************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/******************************************************************************/
|
||||
|
||||
/*!
|
||||
* @brief Executes a single layer in the network.
|
||||
* @ingroup layers
|
||||
* @param layer the layer to process
|
||||
* @return pointer to the next layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
ai_layer* ai_layers_forward_layer(ai_layer* layer);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Computes the ouptut of the network given the input.
|
||||
* @ingroup layers
|
||||
*
|
||||
* Given a network with the input pre-loaded in the net → in tensor,
|
||||
* computes the output by calling the forward functions of each layer and
|
||||
* selecting the next layer. When the layer has no successor or it's in a
|
||||
* loop-back configuration (layer → next is again layer), the function
|
||||
* stops. The result is stored in net → out.
|
||||
*
|
||||
* @param net the network to evaluate
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void ai_layers_forward_all(ai_network* net);
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /* __LAYERS_H_ */
|
||||
@ -1,192 +0,0 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_common.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @date 17-Nov-2017
|
||||
* @brief header file of AI platform layers datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* <h2><center>© Copyright (c) 2017 STMicroelectronics.
|
||||
* All rights reserved.</center></h2>
|
||||
*
|
||||
* This software component is licensed by ST under Ultimate Liberty license
|
||||
* SLA0044, the "License"; You may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at:
|
||||
* www.st.com/SLA0044
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef __LAYERS_COMMON_H_
|
||||
#define __LAYERS_COMMON_H_
|
||||
#pragma once
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
#ifdef USE_CYCLE_MEASUREMENTS
|
||||
#include "layers_cycles_estimation.h"
|
||||
#endif
|
||||
#include "ai_platform.h"
|
||||
#include "ai_common_config.h"
|
||||
|
||||
#include "core_common.h"
|
||||
#include "core_convert.h"
|
||||
|
||||
/* optimizations */
|
||||
#define AI_OPTIM_DICT8_DOT_ARRAY_F32 (1)
|
||||
#define AI_OPTIM_DICT8_DTCM (1)
|
||||
#define AI_OPTIM_FUNC_MP_ARRAY_F32 (0)
|
||||
|
||||
|
||||
#define AI_LAYER_OBJ(obj_) \
|
||||
((ai_layer*)(obj_))
|
||||
|
||||
#define AI_LAYER_FORWARD_FUNC(func_) \
|
||||
((layer_forward_func)(func_))
|
||||
|
||||
#define AI_LAYER_TYPE(type_) \
|
||||
( (ai_layer_type)((ai_u32)(type_)&0xFFFF) )
|
||||
|
||||
#define AI_LAYER_TYPE_ENTRY(type_) \
|
||||
AI_CONCAT(AI_CONCAT(AI_LAYER_, type_), _TYPE)
|
||||
|
||||
#define AI_LAYER_TYPE_NAME(type_) \
|
||||
ai_layer_type_name(AI_LAYER_TYPE(type_))
|
||||
|
||||
#define AI_LAYER_TYPE_IS_VALID(type_) \
|
||||
ai_layer_type_is_valid(AI_LAYER_TYPE(type_))
|
||||
|
||||
#define AI_LAYER_COMMON_INIT(type_, id_, forward_, next_, network_, klass_) \
|
||||
.type = AI_NODE_TYPE(type_), \
|
||||
.id = AI_ID_OBJ(id_), \
|
||||
.network = AI_NETWORK_OBJ(network_), \
|
||||
.next = AI_LAYER_OBJ(next_), \
|
||||
.forward = AI_LAYER_FORWARD_FUNC(forward_), \
|
||||
.klass = AI_KLASS_OBJ(klass_)
|
||||
|
||||
#define AI_LAYER_OBJ_INIT(type_, id_, network_, next_, forward_, ...) { \
|
||||
AI_LAYER_COMMON_INIT(AI_CONCAT(AI_LAYER_, type_), id_, forward_, next_, network_, NULL), \
|
||||
## __VA_ARGS__ }
|
||||
|
||||
#define AI_LAYER_OBJ_DECLARE(varname_, id_, type_, struct_, forward_func_, \
|
||||
network_, next_, attr_, ...) \
|
||||
AI_ALIGNED(4) \
|
||||
attr_ AI_CONCAT(ai_layer_, struct_) varname_ = \
|
||||
AI_LAYER_OBJ_INIT( type_, id_, network_, \
|
||||
next_, forward_func_, \
|
||||
## __VA_ARGS__ );
|
||||
|
||||
#define AI_LAYER_IO_GET(layer_, in_, out_) \
|
||||
ASSERT_LAYER_SANITY(layer_) \
|
||||
const ai_tensor* in_ = GET_TENSOR_IN((layer_)->tensors, 0); \
|
||||
ai_tensor* out_ = GET_TENSOR_OUT((layer_)->tensors, 0); \
|
||||
ASSERT_TENSOR_DATA_SANITY(in_) \
|
||||
ASSERT_TENSOR_DATA_SANITY(out_)
|
||||
|
||||
#define AI_LAYER_LIST_IO_GET(layer_, in_, out_) \
|
||||
ASSERT_LAYER_SANITY(layer_) \
|
||||
const ai_tensor_list* in_ = GET_TENSOR_LIST_IN((layer_)->tensors); \
|
||||
ai_tensor_list* out_ = GET_TENSOR_LIST_OUT((layer_)->tensors); \
|
||||
ASSERT_TENSOR_LIST_SANITY(in_) \
|
||||
ASSERT_TENSOR_LIST_SANITY(out_)
|
||||
|
||||
#ifdef HAS_AI_ASSERT
|
||||
#define AI_LAYER_WEIGHTS_GET(layer_, weights_, bias_) \
|
||||
const ai_tensor* weights_ = GET_TENSOR_WEIGHTS((layer_)->tensors, 0); \
|
||||
const ai_tensor* bias_ = (GET_TENSOR_LIST_SIZE(GET_TENSOR_LIST_WEIGTHS((layer_)->tensors))>1) \
|
||||
? GET_TENSOR_WEIGHTS((layer_)->tensors, 1) \
|
||||
: NULL; \
|
||||
ASSERT_TENSOR_DATA_SANITY(weights_) \
|
||||
if (bias_) { ASSERT_TENSOR_DATA_SANITY(bias_) }
|
||||
#else
|
||||
#define AI_LAYER_WEIGHTS_GET(layer_, weights_, bias_) \
|
||||
const ai_tensor* weights_ = GET_TENSOR_WEIGHTS((layer_)->tensors, 0); \
|
||||
const ai_tensor* bias_ = (GET_TENSOR_LIST_SIZE(GET_TENSOR_LIST_WEIGTHS((layer_)->tensors))>1) \
|
||||
? GET_TENSOR_WEIGHTS((layer_)->tensors, 1) \
|
||||
: NULL; \
|
||||
|
||||
#endif /*HAS_AI_ASSERT*/
|
||||
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @defgroup layers_common Layers Common
|
||||
* @brief Implementation of the common layers datastructures
|
||||
* This header enumerates the layers specific definition implemented in the
|
||||
* library toghether with the macros and datatypes used to manipulate them.
|
||||
*/
|
||||
|
||||
/*!
|
||||
* @enum ai_layer_type
|
||||
* @ingroup layers
|
||||
* @brief ai_tools supported layers type id
|
||||
*/
|
||||
typedef enum {
|
||||
#define LAYER_ENTRY(type_, id_, struct_, forward_func_) \
|
||||
AI_LAYER_TYPE_ENTRY(type_) = id_,
|
||||
#include "layers_list.h"
|
||||
} ai_layer_type;
|
||||
|
||||
#define AI_LAYER_COMMON_FIELDS_DECLARE \
|
||||
AI_NODE_COMMON_FIELDS_DECLARE
|
||||
|
||||
/*!
|
||||
* @typedef void (*layer_forward_func)(struct ai_layer_* layer)
|
||||
* @ingroup layers_common
|
||||
* @brief Callback signatures for all layers forward functions
|
||||
*/
|
||||
typedef node_forward_func layer_forward_func;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer
|
||||
* @ingroup layers_common
|
||||
* @brief Structure encoding a layer in the network
|
||||
*
|
||||
* The layer struct is an alias for a generic @ref ai_node datastrutcture
|
||||
*/
|
||||
typedef ai_node ai_layer;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_base
|
||||
* @ingroup layers_common
|
||||
* @brief Structure encoding a base layer in the network
|
||||
*
|
||||
* The layer_base struct is an alias for a generic @ref ai_layer datastrutcture
|
||||
*/
|
||||
typedef ai_layer ai_layer_base;
|
||||
|
||||
/*!
|
||||
* @brief Check the custom network types against the internally compiled ones
|
||||
* Helper function to check if the private APIs where compiled with a different
|
||||
* `datatypes_network.h` than the one provided to the caller.
|
||||
* @ingroup layers_common
|
||||
* @param signatures list of type sizes signatures (first element is the number of types)
|
||||
* @return false if there is a type size mismatch
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
ai_bool ai_check_custom_types(const ai_custom_type_signature* signatures);
|
||||
|
||||
/*!
|
||||
* @brief Helper API to retrieve a human readable layer type from enum
|
||||
* @ingroup layers_common
|
||||
* @param type in type of layer
|
||||
* @return string defining the type of the layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
const char* ai_layer_type_name(const ai_layer_type type);
|
||||
|
||||
/*!
|
||||
* @brief Helper API to check if a node is a valid layer type
|
||||
* @ingroup layers_common
|
||||
* @param type in type of layer
|
||||
* @return true if the layer is one of the ones listed in the enum,
|
||||
* false otherwise
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
ai_bool ai_layer_type_is_valid(const ai_layer_type type);
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /* __LAYERS_COMMON_H_ */
|
||||
@ -1,196 +0,0 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_conv2d.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @date 18-Apr-2018
|
||||
* @brief header file of AI platform conv2d layers datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* <h2><center>© Copyright (c) 2018 STMicroelectronics.
|
||||
* All rights reserved.</center></h2>
|
||||
*
|
||||
* This software component is licensed by ST under Ultimate Liberty license
|
||||
* SLA0044, the "License"; You may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at:
|
||||
* www.st.com/SLA0044
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef __LAYERS_CONV2D_H_
|
||||
#define __LAYERS_CONV2D_H_
|
||||
#pragma once
|
||||
|
||||
#include "layers_nl.h"
|
||||
#include "layers_pool.h"
|
||||
|
||||
#define AI_LAYER_CONV2D_FIELDS_DECLARE \
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE \
|
||||
ai_u32 groups; /*!< groups for separable convolution */ \
|
||||
AI_CONST ai_array* nl_params; /*!< array pointer to non linear parameters */ \
|
||||
func_nl nl_func; /*!< function pointer to non linear transform */ \
|
||||
ai_shape_2d filter_stride; /*!< filter stride, how much the filter moves */ \
|
||||
ai_shape_2d dilation; /*!< dilation value along axis of the filter */ \
|
||||
ai_shape filter_pad; /*!< filter pad 4d */
|
||||
|
||||
/*!
|
||||
* @defgroup layers_conv2d Convolutive Layers Definitions
|
||||
* @brief definition
|
||||
*
|
||||
*/
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_dense
|
||||
* @ingroup layers_conv2d
|
||||
* @brief Dense (fully connected) layer
|
||||
*/
|
||||
typedef ai_layer ai_layer_dense;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_gemm
|
||||
* @ingroup layers_conv2d
|
||||
* @brief layer for General Matrix Multiplication
|
||||
*
|
||||
* Layer for General Matrix Multiplication (GEMM):
|
||||
* \f{equation}{ Y = \alpha A \cdot B + \beta C \f}
|
||||
* \f$\alpha\f$ and \f$\beta\f$ are paramaters, A and B are matrices,
|
||||
* C is a matrix or an array. Size checks for A, B, C, and Y are performed and
|
||||
* broadcast is applied on C if necessary.
|
||||
* This is a sequential layer (see @ref ai_layer).
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_gemm_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_float alpha; /*!< alpha coefficient */
|
||||
ai_float beta; /*!< beta coefficient */
|
||||
ai_u8 tA; /*!< transpose A flag */
|
||||
ai_u8 tB; /*!< transpose B flag */
|
||||
} ai_layer_gemm;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_conv2d
|
||||
* @ingroup layers_conv2d
|
||||
* @brief 2D convolutional layer with strides and pads
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_conv2d_ {
|
||||
AI_LAYER_CONV2D_FIELDS_DECLARE
|
||||
} ai_layer_conv2d;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_conv2d_nl_pool
|
||||
* @ingroup layers_conv2d
|
||||
* @brief 2D convolutional layer + nl + pooling with strides and pads
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_conv2d_nl_pool_ {
|
||||
AI_LAYER_CONV2D_FIELDS_DECLARE
|
||||
|
||||
ai_shape_2d pool_size; /*!< pooling size */
|
||||
ai_shape_2d pool_stride; /*!< pooling stride */
|
||||
ai_shape pool_pad; /*!< pooling pad */
|
||||
|
||||
func_pool pool_func; /*!< function pointer to pooling transform */
|
||||
} ai_layer_conv2d_nl_pool;
|
||||
|
||||
|
||||
AI_INTERNAL_API
|
||||
void ai_dict8_dot_array_f32(ai_handle out, ai_ptr_const data0, ai_ptr_const lut,
|
||||
const ai_float* data1, const ai_size data_size);
|
||||
|
||||
AI_INTERNAL_API
|
||||
void ai_dict4_dot_array_f32(ai_handle out, ai_ptr_const data0, ai_ptr_const lut,
|
||||
const ai_float* data1, const ai_size data_size);/******************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/******************************************************************************/
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a 2D convolutional layer.
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a @ref ai_layer_conv2d_nl_pool layer
|
||||
* The @ref ai_layer_conv2d_nl_pool is a fused conv2D + optional nonlinear
|
||||
* layer + optional pooling / nonlinearity (average, max, softmax)
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer see @ai_layer_conv2d_nl_pool
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_nl_pool(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a GEMM layer.
|
||||
* @ingroup layers
|
||||
* @param layer the layer including output and input tensors
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_gemm(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes matmul layer, intended as numpy.matmul(A,B).
|
||||
* @ingroup layers
|
||||
* @param layer the layer including output and input tensors
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_matmul(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a dense (fully connected) layer.
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the dense layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense(ai_layer* layer);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a fixed point 2D convolutional layer.
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_fixed(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a fixed point @ref ai_layer_conv2d_nl_pool
|
||||
* layer.
|
||||
* The @ref ai_layer_conv2d_nl_pool is a fused conv2D + optional nonlinear
|
||||
* layer + optional pooling / nonlinearity (average, max)
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer see @ai_layer_conv2d_nl_pool
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_nl_pool_fixed(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a integer quantized 2D convolutional layer.
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer the convolutional (conv) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_integer(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a integer @ref ai_layer_conv2d_nl_pool layer.
|
||||
* The @ref ai_layer_conv2d_nl_pool is a fused conv2D + optional nonlinear
|
||||
* layer + optional pooling / nonlinearity (average, max)
|
||||
* @ingroup layers_conv2d
|
||||
* @param layer see @ai_layer_conv2d_nl_pool
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_conv2d_nl_pool_integer(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a integer dense (fully connected) layer.
|
||||
* @ingroup layers_dense
|
||||
* @param layer the dense layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_integer(ai_layer *pLayer);
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*__LAYERS_CONV2D_H_*/
|
||||
@ -1,65 +0,0 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_dense.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @date 18-Apr-2018
|
||||
* @brief header file of AI platform dense layers datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* <h2><center>© COPYRIGHT(c) 2018 STMicroelectronics</center></h2>
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
* 1. Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. Neither the name of STMicroelectronics nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef __LAYERS_DENSE_H_
|
||||
#define __LAYERS_DENSE_H_
|
||||
#pragma once
|
||||
|
||||
|
||||
#include "layers_common.h"
|
||||
|
||||
|
||||
/*!
|
||||
* @defgroup layers Normalization Layers Definitions
|
||||
* @brief definition
|
||||
*
|
||||
*/
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a fixed point dense (fully connected) layer.
|
||||
* @ingroup layers_dense
|
||||
* @param layer the dense layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_dense_fixed(ai_layer *pLayer);
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*__LAYERS_DENSE_H_*/
|
||||
|
||||
@ -1,371 +0,0 @@
|
||||
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_generic.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @date 18-Apr-2018
|
||||
* @brief header file of AI platform generic layers datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* <h2><center>© Copyright (c) 2018 STMicroelectronics.
|
||||
* All rights reserved.</center></h2>
|
||||
*
|
||||
* This software component is licensed by ST under Ultimate Liberty license
|
||||
* SLA0044, the "License"; You may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at:
|
||||
* www.st.com/SLA0044
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef __LAYERS_GENERIC_H_
|
||||
#define __LAYERS_GENERIC_H_
|
||||
#pragma once
|
||||
|
||||
#include "layers_common.h"
|
||||
|
||||
/*!
|
||||
* @defgroup layers_generic Generic Layers Definitions
|
||||
* @brief definition
|
||||
*
|
||||
*/
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_time_delay
|
||||
* @ingroup layers_generic
|
||||
* @brief TimeDelay layer with sparse kernel
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_time_delay_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
AI_CONST ai_array* mask; /*!< sparse filter mask */
|
||||
} ai_layer_time_delay;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_split
|
||||
* @ingroup layers_generic
|
||||
* @brief Split layer definition
|
||||
*
|
||||
* This layer defines the params of a splitting layer. It is intended to be used
|
||||
* by his associated forward function @ref forward_split
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_split_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_u16 out_layers_count; /*!< number of output layers to split*/
|
||||
ai_u16 out_layer_curr; /*!< current layer to split */
|
||||
ai_layer** out_layers; /*!< output layers list */
|
||||
ai_tensor** out_tensors; /*!< output tensors list */
|
||||
ai_tensor* in_tensor; /*!< input tensor */
|
||||
func_copy_tensor copy_to_out_tensor; /*!< pointer to copy tensor func
|
||||
(NULL = no copy) */
|
||||
} ai_layer_split;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_topK
|
||||
* @ingroup layers_generic
|
||||
* @brief topK layer definition
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_topK_{
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_i16 axis;
|
||||
ai_i32 k;
|
||||
} ai_layer_topK;
|
||||
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_slice
|
||||
* @ingroup layers_generic
|
||||
* @brief Slice layer definition
|
||||
*
|
||||
* This layer defines the params of a slicing layer. It is intended to be used
|
||||
* by his associated forward function @ref forward_slice
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_slice_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
AI_CONST ai_array* axes; /*!< Axes that 'starts' and 'ends' apply to. It's optional*/
|
||||
AI_CONST ai_array* starts; /*!< Starting indices of corrisponding axis in axes*/
|
||||
AI_CONST ai_array* ends; /*!< Ending indices (exclusive) of corrisponding axis in axes*/
|
||||
} ai_layer_slice;
|
||||
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_tile
|
||||
* @ingroup layers generic
|
||||
* @brief Tile layer definition
|
||||
*
|
||||
* This layer defines the param of an tile layer. It constructs a tensor by tiling a
|
||||
* given tensor. It is intended to be used by its associated forward function
|
||||
* @ref forward_upsample
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_tile_{
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
AI_CONST ai_array* repeats; /*!< numbers of repeated copies along each dimension */
|
||||
} ai_layer_tile;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_upsample
|
||||
* @ingroup layers generic
|
||||
* @brief Upsample layer definition
|
||||
*
|
||||
* This layer defines the param of an upsampling layer. It overloads its params
|
||||
* to allow zeros upsampling, helpful traspose convolutions, for instance.
|
||||
* It is intended to be used by its associated forward function @ref forward_upsample
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_upsample_{
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_upsample_mode mode; /*!< upsample mode */
|
||||
AI_CONST ai_array* scales; /*!< scale array along each dimension */
|
||||
} ai_layer_upsample;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_instanceNormalization
|
||||
* @ingroup layers generic
|
||||
* @brief instance normalization layer definition
|
||||
*
|
||||
* This layer defines the params of an instance normalization layer.
|
||||
* It is intended to be used by its associated forward function @ref forward_instanceNormalization
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_instanceNormaization_{
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_float eps; /*!< epsilon value, to avoid by zero division */
|
||||
AI_CONST ai_array* scale; /*!< scale array */
|
||||
AI_CONST ai_array* bias; /*!< bias array */
|
||||
} ai_layer_instanceNormalization;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_mode
|
||||
* @ingroup layers generic
|
||||
* @brief Pad layer definition
|
||||
*
|
||||
* This layer defines the param of an pad layer. It pad a tensor.
|
||||
* It is intended to be used by its associated forward function @ref forward_pad
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_pad_{
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_pad_mode mode; /*!< pad mode */
|
||||
ai_shape pads; /*!< Number of padding to add or remove at the beginning and end of each axis */
|
||||
ai_float value; /*!< Indicates the value to be filled */
|
||||
} ai_layer_pad;
|
||||
/*!
|
||||
* @struct ai_layer_add
|
||||
* @ingroup layers_generic
|
||||
* @brief Add layer definition
|
||||
*
|
||||
* This layer defines the params of an add layer.
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_add_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_u16 in_layers_count; /*!< number of input layers to concat */
|
||||
ai_u16 in_layer_curr; /*!< current layer to concat */
|
||||
ai_tensor** in_tensors; /*!< input tensors list (if NULL==no copy) */
|
||||
ai_tensor* out_tensor; /*!< output tensor (if NULL==no copy) */
|
||||
func_copy_tensor copy_to_out_tensor; /*!< pointer to copy tensor func
|
||||
(NULL = no copy) */
|
||||
ai_layer* split_layer; /*!< pointer to associated split layer */
|
||||
ai_layer* next_layer; /*!< pointer to next layer to process */
|
||||
} ai_layer_add;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_transpose
|
||||
* @ingroup layers_generic
|
||||
* @brief Transpose layer datastruct declaration. This defines the params of a
|
||||
* transpose layer. It is intended to be used by his associated forward function
|
||||
* @ref forward_transpose
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_transpose_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_shape out_mapping; /*!< transpose output mapping order. I.e. tt is a
|
||||
permutation of the input tensor shape */
|
||||
} ai_layer_transpose;
|
||||
|
||||
|
||||
#define AI_TIME_DISTRIBUTED_AXIS (AI_SHAPE_HEIGHT)
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_time_distributed
|
||||
* @ingroup layers_generic
|
||||
* @brief Time distributed layer datastruct declaration. This defines the params
|
||||
* of a time distributed layer. It is intended to be used by his associated
|
||||
* forward function @ref forward_time_distributed
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_time_distributed_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_layer* inner_layer; /*!< inner layer to process */
|
||||
} ai_layer_time_distributed;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_concat
|
||||
* @ingroup layers_generic
|
||||
* @brief Concatenation layer
|
||||
*
|
||||
* Concat Layer.
|
||||
* It is a sequential layer. see @ref ai_layer_sequential
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_concat_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_shape_dimension axis; /*!< which axis to concatenate on */
|
||||
} ai_layer_concat;
|
||||
|
||||
typedef ai_float (*func_binary)(const ai_float a, const ai_float b);
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_eltwise
|
||||
* @ingroup layers_generic
|
||||
* @brief General element-wise transformation layer
|
||||
*
|
||||
* Elementwise Layer.
|
||||
* It is a sequential layer. see @ref ai_layer_sequential
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_eltwise_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
func_binary operation; /*!< operation to apply elementwise */
|
||||
} ai_layer_eltwise;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_reduce
|
||||
* @ingroup layers_generic
|
||||
* @brief General dimension reduction layer
|
||||
*
|
||||
* reduction Layer.
|
||||
* It is a sequential layer. see @ref ai_layer_sequential
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_reduce_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_float neutral_value; /*!< Initialization value for operation */
|
||||
func_binary operation; /*!< operation to apply elementwise */
|
||||
} ai_layer_reduce;
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/******************************************************************************/
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a TimeDelay layer.
|
||||
* @ingroup layers_generic
|
||||
* @param layer the time delay layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_time_delay(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Split network computation in N parallel branches.
|
||||
* @ingroup layers_generic
|
||||
* @param layer the split layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_split(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Add network computation from N parallel branches.
|
||||
* @ingroup layers_generic
|
||||
* @param layer the add layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_add(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Transpose a tensor along a pivot and save transposed values into an output
|
||||
* tensor
|
||||
* @ingroup layers_generic
|
||||
* @param layer the transpose layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_transpose(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief TimeDistrubuted forward layer function. This forward function
|
||||
* implements the timedistributed layer.
|
||||
* @ingroup layers_generic
|
||||
* @param layer the time distributed layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_time_distributed(ai_layer* layer);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Concatenates a list of tensors into a single tensor.
|
||||
* @ingroup layers_generic
|
||||
* @param layer the concatenation layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_concat(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Slice an input tensors
|
||||
* @ingroup layers_generic
|
||||
* @param layer the sliced layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_slice(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Tile an input tensors
|
||||
* @ingroup layers_generic
|
||||
* @param layer the tiled layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_tile(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief TopK an input tensors
|
||||
* @ingroup layers_generic
|
||||
* @param layer the Topked layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_topK(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Pad an input tensors
|
||||
* @ingroup layers_generic
|
||||
* @param layer the pad layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_pad(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Upsample an input tensors
|
||||
* @ingroup layers_generic
|
||||
* @param layer the upsampled layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_upsample(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Instance Normalization on an input tensors
|
||||
* @ingroup layers_generic
|
||||
* @param layer the instance normalization layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_instanceNormalization(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Apply an elementwise transformation to the input tensors
|
||||
* @ingroup layers_generic
|
||||
* @param layer the elementwise layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_eltwise(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Apply a reduce transformation to the input tensors
|
||||
* @ingroup layers_generic
|
||||
* @param layer the reduce layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_reduce(ai_layer* layer);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Apply an elementwise addition to the input tensors
|
||||
* @ingroup layers_generic
|
||||
* @param layer the elementwise layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_add_integer(ai_layer* layer);
|
||||
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*__LAYERS_GENERIC_H_*/
|
||||
@ -1,92 +0,0 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_list.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @date 20-Jul-2018
|
||||
* @brief header file of AI platform layers datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* <h2><center>© Copyright (c) 2018 STMicroelectronics.
|
||||
* All rights reserved.</center></h2>
|
||||
*
|
||||
* This software component is licensed by ST under Ultimate Liberty license
|
||||
* SLA0044, the "License"; You may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at:
|
||||
* www.st.com/SLA0044
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
|
||||
/* No sentry. This is deliberate!! */
|
||||
/* Template: LAYER_ENTRY(type_, id_, struct_, forward_func_)
|
||||
* Where:
|
||||
* - type_ is the (enum) type name of the layer. to have the complete enum
|
||||
* value you should use the macro @ref AI_LAYER_TYPE_ENTRY(type_) that adds
|
||||
* the specific prefix and postfix tokens to the type_
|
||||
* - id_ is the numeric id of the layer
|
||||
* - struct_ is the name of the datastruct of the layer
|
||||
* - forward_func_ is the forward function name of the routine implementing
|
||||
* actual layer processing
|
||||
*/
|
||||
|
||||
/*!< Elementwise addition layer */
|
||||
LAYER_ENTRY(ADD, 10001, ai_layer_add, forward_add)
|
||||
/*!< Batch normalization layer */
|
||||
LAYER_ENTRY(BN, 10002, ai_layer_bn, forward_bn)
|
||||
/*!< 2D Convolutional layer */
|
||||
LAYER_ENTRY(CONV2D, 10004, ai_layer_conv2d, forward_conv2d)
|
||||
/*!< Dense layer */
|
||||
LAYER_ENTRY(DENSE, 10005, ai_layer_dense, forward_dense)
|
||||
/*!< Gated Recurrent Unit layer */
|
||||
LAYER_ENTRY(GRU, 10006, ai_layer_gru, forward_gru)
|
||||
/*!< Local Response Normalization layer */
|
||||
LAYER_ENTRY(LRN, 10007, ai_layer_lrn, forward_lrn)
|
||||
/*!< Long Short Time Memory layer */
|
||||
LAYER_ENTRY(LSTM, 10008, ai_layer_lstm, forward_lstm)
|
||||
/*!< Nonlinearity layer */
|
||||
LAYER_ENTRY(NL, 10009, ai_layer_nl, NULL)
|
||||
/*!< Normalization layer */
|
||||
LAYER_ENTRY(NORM, 10010, ai_layer_norm, forward_norm)
|
||||
/*!< Merged Conv2d / Pool layer */
|
||||
LAYER_ENTRY(OPTIMIZED_CONV2D, 10011, ai_layer_conv2d_nl_pool, forward_conv2d_nl_pool)
|
||||
/*!< Transpose Tensor layer */
|
||||
LAYER_ENTRY(TRANSPOSE, 10012, ai_layer_transpose, forward_transpose)
|
||||
/*!< Pooling layer */
|
||||
LAYER_ENTRY(POOL, 10013, ai_layer_pool, forward_pool)
|
||||
/*!< Softmax layer */
|
||||
LAYER_ENTRY(SM, 10014, ai_layer_nl, forward_sm)
|
||||
/*!< Split layer */
|
||||
LAYER_ENTRY(SPLIT, 10015, ai_layer_split, forward_split)
|
||||
/*!< TimeDelay layer */
|
||||
LAYER_ENTRY(TIME_DELAY, 10016, ai_layer_time_delay, forward_time_delay)
|
||||
/*!< TimeDistributed layer */
|
||||
LAYER_ENTRY(TIME_DISTRIBUTED, 10017, ai_layer_time_distributed, forward_time_distributed)
|
||||
/*!< Concat Tensor layer */
|
||||
LAYER_ENTRY(CONCAT, 10019, ai_layer_concat, forward_concat)
|
||||
/*!< GEMM layer */
|
||||
LAYER_ENTRY(GEMM, 10020, ai_layer_gemm, forward_gemm)
|
||||
/*!< Upsample layer */
|
||||
LAYER_ENTRY(UPSAMPLE, 10021, ai_layer_upsample, forward_upsample)
|
||||
/*!< Container layer for eltwise operations */
|
||||
LAYER_ENTRY(ELTWISE, 10022, ai_layer_eltwise, forward_eltwise)
|
||||
/*!< Generic layer */
|
||||
LAYER_ENTRY(GENERIC, 10023, ai_layer, NULL)
|
||||
/*!< InstanceNormalization layer */
|
||||
LAYER_ENTRY(INSTANCENORMALIZATION, 10024, ai_layer_instanceNormalization, forward_instanceNormalization)
|
||||
/*!< Pad layer */
|
||||
LAYER_ENTRY(PAD, 10025, ai_layer_pad, forward_pad)
|
||||
/*!< Slice layer */
|
||||
LAYER_ENTRY(SLICE, 10026, ai_layer_slice, forward_slice)
|
||||
/*!< Tile layer */
|
||||
LAYER_ENTRY(TILE, 10027, ai_layer_tile, forward_tile)
|
||||
/*!< Container layer for reduce operations */
|
||||
LAYER_ENTRY(REDUCE, 10028, ai_layer_reduce, forward_reduce)
|
||||
#ifdef USE_OPERATORS
|
||||
/*!< Container layer for operators */
|
||||
LAYER_ENTRY(CONTAINER, 10003, ai_layer_container, forward_container)
|
||||
/*!< Container layer for operators */
|
||||
LAYER_ENTRY(LAMBDA, 10018, ai_layer_lambda, forward_lambda)
|
||||
#endif
|
||||
#undef LAYER_ENTRY
|
||||
@ -1,925 +0,0 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_nl.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @date 18-Apr-2018
|
||||
* @brief header file of AI platform nonlinearity layers datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* <h2><center>© Copyright (c) 2018 STMicroelectronics.
|
||||
* All rights reserved.</center></h2>
|
||||
*
|
||||
* This software component is licensed by ST under Ultimate Liberty license
|
||||
* SLA0044, the "License"; You may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at:
|
||||
* www.st.com/SLA0044
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef __LAYERS_NL_H_
|
||||
#define __LAYERS_NL_H_
|
||||
#pragma once
|
||||
|
||||
#include "layers_common.h"
|
||||
|
||||
/*!
|
||||
* @defgroup layers_nl Normalization Layers Definitions
|
||||
* @brief definition
|
||||
*
|
||||
*/
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_nl
|
||||
* @ingroup layers_nl
|
||||
* @brief Generic Nonlinearity layer
|
||||
*
|
||||
* The type of nonlinearity is handled by the specific forward function.
|
||||
* It is a sequential layer. see @ref ai_layer
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_nl_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
AI_CONST ai_array* nl_params; /*!< associated parameters array */
|
||||
} ai_layer_nl;
|
||||
|
||||
/*!
|
||||
* @typedef (*func_nl)
|
||||
* @ingroup layers_nl
|
||||
* @brief Fuction pointer for generic non linear transform
|
||||
* this function pointer abstracts a generic non linear layer.
|
||||
* see @ref nl_func_tanh_array_f32 and similar as examples.
|
||||
*/
|
||||
typedef void (*func_nl)(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Softmax pooling computed on a single float channel
|
||||
* @ingroup layers_nl
|
||||
* @param out opaque handler to float output channel
|
||||
* @param in opaque handler to float input channel
|
||||
* @param channel_size number of elements of the input channel
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_sm_channel_f32(ai_array *out, const ai_array *in,
|
||||
const ai_size channel_size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Softmax normalization computed on an array of float channels
|
||||
* @ingroup layers_nl
|
||||
* @param out opaque handler to float output channel array
|
||||
* @param in opaque handler to float input channel array
|
||||
* @param in_size total size (number of elements) to process on the input
|
||||
* @param channel_size number of elements of the input channel
|
||||
* @param in_channel_step number of elements to move to next input element
|
||||
* @param out_channel_step number of elements to move to next output element
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_sm_array_f32(ai_array *out, ai_array *in,
|
||||
const ai_size in_size,
|
||||
const ai_size channel_size,
|
||||
const ai_size in_channel_step,
|
||||
const ai_size out_channel_step);
|
||||
|
||||
/*!
|
||||
* @brief Computes the tanh function on a float data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to float, size should be 1
|
||||
* @param out opaque handler to float output elem
|
||||
* @param size number of elements in the input buffer
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_tanh_array_f32(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the tanh function on a fixed point data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to input elements to process
|
||||
* @param out opaque handler to output elements
|
||||
* @param size total size (number of elements) to process on the input
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_tanh_array_fixed(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Computes the sigmoid function on a float data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to float, size should be 1
|
||||
* @param out opaque handler to float output elem
|
||||
* @param size number of elements in the input buffer
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_sigmoid_array_f32(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the sigmoid function on a fixed point data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to input elements to process
|
||||
* @param out opaque handler to output elements
|
||||
* @param size total size (number of elements) to process on the input
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_sigmoid_array_fixed(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Computes the hard sigmoid function on a float data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to float, size should be 1
|
||||
* @param out opaque handler to float output elem
|
||||
* @param size number of elements in the input buffer
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_hard_sigmoid_array_f32(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the absolute value function on a float data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to float, size should be 1
|
||||
* @param out opaque handler to float output elem
|
||||
* @param size number of elements in the input buffer
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_abs_array_f32(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the cosine function on a float data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to float, size should be 1
|
||||
* @param out opaque handler to float output elem
|
||||
* @param size number of elements in the input buffer
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_cos_array_f32(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the inverse cosine function on a float data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to float, size should be 1
|
||||
* @param out opaque handler to float output elem
|
||||
* @param size number of elements in the input buffer
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_acos_array_f32(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the hyperbolic cosine function on a float data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to float, size should be 1
|
||||
* @param out opaque handler to float output elem
|
||||
* @param size number of elements in the input buffer
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_cosh_array_f32(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the inverse hyperbolic cosine function on a float data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to float, size should be 1
|
||||
* @param out opaque handler to float output elem
|
||||
* @param size number of elements in the input buffer
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_acosh_array_f32(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the sine function on a float data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to float, size should be 1
|
||||
* @param out opaque handler to float output elem
|
||||
* @param size number of elements in the input buffer
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_sin_array_f32(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the inverse sine function on a float data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to float, size should be 1
|
||||
* @param out opaque handler to float output elem
|
||||
* @param size number of elements in the input buffer
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_asin_array_f32(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the hyperbolic sine function on a float data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to float, size should be 1
|
||||
* @param out opaque handler to float output elem
|
||||
* @param size number of elements in the input buffer
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_sinh_array_f32(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the inverse hyperbolic sine function on a float data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to float, size should be 1
|
||||
* @param out opaque handler to float output elem
|
||||
* @param size number of elements in the input buffer
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_asinh_array_f32(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the tangent function on a float data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to float, size should be 1
|
||||
* @param out opaque handler to float output elem
|
||||
* @param size number of elements in the input buffer
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_tan_array_f32(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the inverse tangent function on a float data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to float, size should be 1
|
||||
* @param out opaque handler to float output elem
|
||||
* @param size number of elements in the input buffer
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_atan_array_f32(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the inverse hyperbolic tangent function on a float data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to float, size should be 1
|
||||
* @param out opaque handler to float output elem
|
||||
* @param size number of elements in the input buffer
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_atanh_array_f32(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the error function on a float data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to float, size should be 1
|
||||
* @param out opaque handler to float output elem
|
||||
* @param size number of elements in the input buffer
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_erf_array_f32(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the natural logarithm function on a float data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to float, size should be 1
|
||||
* @param out opaque handler to float output elem
|
||||
* @param size number of elements in the input buffer
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_log_array_f32(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the reciprocal square root function on a float data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to float, size should be 1
|
||||
* @param out opaque handler to float output elem
|
||||
* @param size number of elements in the input buffer
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_rsqrt_array_f32(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the floor function on a float data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to float, size should be 1
|
||||
* @param out opaque handler to float output elem
|
||||
* @param size number of elements in the input buffer
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_floor_array_f32(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the ceil function on a float data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to float, size should be 1
|
||||
* @param out opaque handler to float output elem
|
||||
* @param size number of elements in the input buffer
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_ceil_array_f32(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the rounding function on a float data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to float, size should be 1
|
||||
* @param out opaque handler to float output elem
|
||||
* @param size number of elements in the input buffer
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_round_array_f32(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the exponential function on a float data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to float, size should be 1
|
||||
* @param out opaque handler to float output elem
|
||||
* @param size number of elements in the input buffer
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_exp_array_f32(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the sign negation function on a float data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to float, size should be 1
|
||||
* @param out opaque handler to float output elem
|
||||
* @param size number of elements in the input buffer
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_neg_array_f32(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the reciprocal function on a float data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to float, size should be 1
|
||||
* @param out opaque handler to float output elem
|
||||
* @param size number of elements in the input buffer
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_reciprocal_array_f32(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the square root function on a float data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to float, size should be 1
|
||||
* @param out opaque handler to float output elem
|
||||
* @param size number of elements in the input buffer
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_sqrt_array_f32(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the soft plus function on a float data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to float, size should be 1
|
||||
* @param out opaque handler to float output elem
|
||||
* @param size number of elements in the input buffer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_soft_plus_array_f32(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the soft sign function on a float data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to float, size should be 1
|
||||
* @param out opaque handler to float output elem
|
||||
* @param size number of elements in the input buffer
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_soft_sign_array_f32(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the sign function on a single float element.
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to float, size should be 1
|
||||
* @param out opaque handler to float output elem
|
||||
* @param size number of elements in the input buffer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_sign_array_f32(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the clip function on a float data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to float, size should be 1
|
||||
* @param out opaque handler to float output elem
|
||||
* @param size number of elements in the input buffer
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_clip_array_f32(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the hardmax function on a float data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to float, size should be 1
|
||||
* @param out opaque handler to float output elem
|
||||
* @param axis direction of the max index to be searched
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_hardmax_array_f32(ai_array *out, const ai_array *in,
|
||||
const ai_shape *shape, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the generic relu function on a float data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to float, size should be 1
|
||||
* @param out opaque handler to float output elem
|
||||
* @param size number of elements in the input buffer
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_relu_generic_array_f32(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the thresholded relu function on a float data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to float, size should be 1
|
||||
* @param out opaque handler to float output elem
|
||||
* @param size number of elements in the input buffer
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_relu_thresholded_array_f32(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the relu function on a float data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to float, size should be 1
|
||||
* @param out opaque handler to float output elem
|
||||
* @param size number of elements in the input buffer
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_relu_array_f32(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the relu function on a fixed point data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to input elements to process
|
||||
* @param out opaque handler to output elements
|
||||
* @param size total size (number of elements) to process on the input
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_relu_array_fixed(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activation function on an integer-quantized data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to input elements to process
|
||||
* @param out opaque handler to output elements
|
||||
* @param size total size (number of elements) to process on the input
|
||||
* @param params opaque handler to generated and used LUT
|
||||
*/
|
||||
void nl_func_array_integer(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the elu function on a float data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to float, size should be 1
|
||||
* @param out opaque handler to float output elem
|
||||
* @param size number of elements in the input buffer
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_elu_array_f32(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the max relu function on a fixed point data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to input elements to process
|
||||
* @param out opaque handler to output elements
|
||||
* @param size total size (number of elements) to process on the input
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_relu_max_array_fixed(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the selu function on a float data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to float, size should be 1
|
||||
* @param out opaque handler to float output elem
|
||||
* @param size number of elements in the input buffer
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_selu_array_f32(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
/*!
|
||||
* @brief Computes the prelu function on a float data array
|
||||
* @ingroup layers_nl
|
||||
* @param in opaque handler to float, size should be 1
|
||||
* @param slope opaque handler to float, size should be 1
|
||||
* @param out opaque handler to float output elem
|
||||
* @param size size of the input data in bytes
|
||||
* @param params opaque handler to optional nl parameters
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void nl_func_prelu_array_f32(ai_array *out, const ai_array *in,
|
||||
const ai_size size, const ai_handle params);
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
/** Forward Functions Section **/
|
||||
/******************************************************************************/
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a ReLU nonlinear layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_relu(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a fixed point ReLU nonlinear layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_relu_fixed(ai_layer *pLayer);
|
||||
|
||||
#if 0
|
||||
/*!
|
||||
* @brief Computes the activations of a integer-quantized ReLU nonlinear layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_relu_integer(ai_layer *pLayer);
|
||||
#endif
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a ReLU6 nonlinear layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_relu_thresholded(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a fixed point max ReLU layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_relu_max_fixed(ai_layer *pLayer);
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a ELU nonlinear layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_elu(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a SELU nonlinear layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_selu(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a PRELU nonlinear layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_prelu(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a binary tanh (sign) nonlinear layer.
|
||||
* @ingroup layers
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_sign(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a clip nonlinear layer.
|
||||
* @ingroup layers
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_clip(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a sigmoid nonlinear layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_sigmoid(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a fixed point sigmoid nonlinear layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_sigmoid_fixed(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a hard sigmoid nonlinear layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_hard_sigmoid(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of an exponential nonlinear layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_exp(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of an square root nonlinear layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_sqrt(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a soft plus nonlinear layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_soft_plus(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a soft sign nonlinear layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_soft_sign(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a cosine (cos) layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_cos(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a inverse cosine (acos) layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_acos(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a hyperbolic cosine (cosh) layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_cosh(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a inverse hyperbolic cosine (acosh) layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_acosh(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a sine (sin) layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_sin(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a inverse sine (asin) layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_asin(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a hyperbolic sine (sinh) layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_sinh(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a inverse hyperbolic sine (asinh) layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_asinh(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a tangent (tan) layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_tan(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a inverse tangent (atan) layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_atan(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a hyperbolic tangent (tanh) layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_tanh(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a inverse hyperbolic tangent (atanh) layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_atanh(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a fixed point tanh nonlinear layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_tanh_fixed(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a error function (erf) layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_erf(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a natural logarithm (log) layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_log(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a reciprocal square root (rsqrt) layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_rsqrt(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of an absolute value (abs) layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_abs(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a ceil layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_ceil(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a floor layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_floor(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a rounding layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_round(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a sign negation (neg) layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_neg(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a reciprocal layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_reciprocal(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Hardmax on an input tensors
|
||||
* @ingroup layers_generic
|
||||
* @param layer the hardmax layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_hardmax(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a softmax nonlinear layer.
|
||||
* @ingroup layers_nl
|
||||
* @param layer the softmax (sm) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_sm(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of an integer quantized nonlinear layer.
|
||||
* Non linear operation is function of used LUT defined through
|
||||
* (pLayer->nl_params->data)
|
||||
* @ingroup layers_nl
|
||||
* @param layer the nonlinear (nl) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_nl_integer(ai_layer *pLayer);
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*__LAYERS_NL_H_*/
|
||||
@ -1,150 +0,0 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_norm.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @date 18-Apr-2018
|
||||
* @brief header file of AI platform normalization layers datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* <h2><center>© Copyright (c) 2018 STMicroelectronics.
|
||||
* All rights reserved.</center></h2>
|
||||
*
|
||||
* This software component is licensed by ST under Ultimate Liberty license
|
||||
* SLA0044, the "License"; You may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at:
|
||||
* www.st.com/SLA0044
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef __LAYERS_NORM_H_
|
||||
#define __LAYERS_NORM_H_
|
||||
#pragma once
|
||||
|
||||
#include "layers_common.h"
|
||||
|
||||
/*!
|
||||
* @defgroup layers_norm Normalization Layers Definitions
|
||||
* @brief definition
|
||||
*
|
||||
*/
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_bn
|
||||
* @ingroup layers_norm
|
||||
* @brief Batch normalization (scale with bias) layer
|
||||
*/
|
||||
typedef ai_layer ai_layer_bn;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_lrn
|
||||
* @ingroup layers_norm
|
||||
* @brief Local Response Normalization layer
|
||||
*
|
||||
* Divides each element by a scale factor computed
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_lrn_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_u32 local_size; /*!< size of the normalization window */
|
||||
ai_float k; /*!< bias term */
|
||||
ai_float alpha; /*!< input scale */
|
||||
ai_float beta; /*!< scale exponent */
|
||||
} ai_layer_lrn;
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_norm
|
||||
* @ingroup layers_norm
|
||||
* @brief Lp Normalization layer
|
||||
*
|
||||
* Normalizes the tensor along the 'axis' direction using the Lp norm.
|
||||
* Optionally divides the result by the number of the elements.
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_norm_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_shape_type axis; /*! normalization axis */
|
||||
ai_float exponent; /*!< normalization exponent p */
|
||||
ai_bool scale; /*!< multiplies by the pth root of the number of elements */
|
||||
} ai_layer_norm;
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Local response normalization computed on a float array
|
||||
* @ingroup layers_norm
|
||||
* @param out opaque handler to float output channel
|
||||
* @param in opaque handler to float input channel
|
||||
* @param pad amount of padding for the channels
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void func_lrn_array_f32(ai_handle out, const ai_handle in,
|
||||
const ai_size in_size, const ai_size channel_size,
|
||||
const ai_i32 pad, const ai_float k,
|
||||
const ai_float alpha, const ai_float beta);
|
||||
|
||||
/*!
|
||||
* @brief Lp normalization computed on a float array
|
||||
* @ingroup layers_norm
|
||||
* @param out opaque handler to float output channel
|
||||
* @param in opaque handler to float input channel
|
||||
* @param exponent p exponent for the Lp normalization
|
||||
* @param axis_stride stride (in array elements) of the normalization axis
|
||||
* @param axis_size size of the normalization axis
|
||||
* @param outer_size number of tensor slices (including the normalization axis)
|
||||
* on which compute the normalization
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void func_norm_array_f32(ai_handle out, const ai_handle in,
|
||||
const ai_float exponent,
|
||||
const ai_float norm,
|
||||
const ai_size axis_stride,
|
||||
const ai_size axis_size,
|
||||
const ai_size outer_size);
|
||||
|
||||
/*!
|
||||
* @brief Fast L2 normalization computed on a float array
|
||||
* @ingroup layers_norm
|
||||
* @param out opaque handler to float output channel
|
||||
* @param in opaque handler to float input channel
|
||||
* @param axis_size size of the normalization axis
|
||||
* @param n_el total number of elements in the tensor
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void func_norm_l2_fast_array_f32(ai_handle out, const ai_handle in,
|
||||
const ai_float norm,
|
||||
const ai_size axis_size,
|
||||
const ai_size outer_size);
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/******************************************************************************/
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a batchnorm (scale + bias) layer.
|
||||
* @ingroup layers_norm
|
||||
* @param layer the batch normalization (bn) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_bn(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a Local Response Normalization Layer.
|
||||
* @ingroup layers_norm
|
||||
* @param layer the local response normalization (lrn) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_lrn(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a normalization layer.
|
||||
* @ingroup layers_norm
|
||||
* @param layer the normalization (norm) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_norm(ai_layer* layer);
|
||||
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*__LAYERS_NORM_H_*/
|
||||
@ -1,285 +0,0 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_pool.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @date 18-Apr-2018
|
||||
* @brief header file of AI platform pooling layers datatypes
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* <h2><center>© Copyright (c) 2018 STMicroelectronics.
|
||||
* All rights reserved.</center></h2>
|
||||
*
|
||||
* This software component is licensed by ST under Ultimate Liberty license
|
||||
* SLA0044, the "License"; You may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at:
|
||||
* www.st.com/SLA0044
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef __LAYERS_POOL_H_
|
||||
#define __LAYERS_POOL_H_
|
||||
#pragma once
|
||||
|
||||
#include "layers_common.h"
|
||||
|
||||
/*!
|
||||
* @defgroup layers_pool Pooling Layers Definitions
|
||||
* @brief definition
|
||||
*
|
||||
*/
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_pool
|
||||
* @ingroup layers_pool
|
||||
* @brief Pooling layer
|
||||
*
|
||||
* The type of pooling function is handled by the specific forward function
|
||||
* @ref forward_pool
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_pool_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_shape_2d pool_size; /*!< pooling size */
|
||||
ai_shape_2d pool_stride; /*!< pooling stride */
|
||||
ai_shape pool_pad; /*!< pooling pad, y,x border sizes */
|
||||
ai_u8 count_include_pad; /*!< include pad flag */
|
||||
} ai_layer_pool;
|
||||
|
||||
|
||||
/*!
|
||||
* @typedef (*func_pool)
|
||||
* @ingroup layers_pool
|
||||
* @brief Fuction pointer for generic pooling transform
|
||||
* this function pointer abstracts a generic pooling layer.
|
||||
* see @ref pool_func_ap_array_f32 as examples
|
||||
*/
|
||||
typedef void (*func_pool)(ai_handle in,
|
||||
const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y,
|
||||
const ai_u16 padding_x, const ai_u16 padding_y,
|
||||
const ai_u16 stride_x, const ai_u16 stride_y,
|
||||
const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y,
|
||||
ai_handle out);
|
||||
|
||||
/*!
|
||||
* @brief Max Pooling on a float data array
|
||||
* @ingroup layers_pool
|
||||
* @param in opaque handler to input data to process
|
||||
* @param dim_im_in_x input feature map width
|
||||
* @param dim_im_in_y input feature map height
|
||||
* @param ch_im_in number of input channels
|
||||
* @param dim_kernel_x kernel width
|
||||
* @param dim_kernel_y kernel height
|
||||
* @param padding_x right padding value
|
||||
* @param padding_y top padding value
|
||||
* @param stride_x stride value on x dimension
|
||||
* @param stride_y stride value on y dimension
|
||||
* @param dim_im_out_x output feature map width
|
||||
* @param dim_im_out_y output feature map height
|
||||
* @param out opaque handler to scratch memory
|
||||
* @param out opaque handler to output data
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void pool_func_mp_array_f32(ai_handle in,
|
||||
const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y,
|
||||
const ai_u16 padding_x, const ai_u16 padding_y,
|
||||
const ai_u16 stride_x, const ai_u16 stride_y,
|
||||
const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y,
|
||||
ai_handle out);
|
||||
|
||||
/*!
|
||||
* @brief Max Pooling on a 8/16 bits fixed point data array
|
||||
* @ingroup layers_pool
|
||||
* @param in opaque handler to input data to process
|
||||
* @param dim_im_in_x input feature map width
|
||||
* @param dim_im_in_y input feature map height
|
||||
* @param ch_im_in number of input channels
|
||||
* @param dim_kernel_x kernel width
|
||||
* @param dim_kernel_y kernel height
|
||||
* @param padding_x right padding value
|
||||
* @param padding_y top padding value
|
||||
* @param stride_x stride value on x dimension
|
||||
* @param stride_y stride value on y dimension
|
||||
* @param dim_im_out_x output feature map width
|
||||
* @param dim_im_out_y output feature map height
|
||||
* @param out opaque handler to output data
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void pool_func_mp_array_fixed(ai_handle in,
|
||||
const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y,
|
||||
const ai_u16 padding_x, const ai_u16 padding_y,
|
||||
const ai_u16 stride_x, const ai_u16 stride_y,
|
||||
const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y,
|
||||
ai_handle out);
|
||||
|
||||
/*!
|
||||
* @brief Max Pooling on a 8-bits integer quantized data array
|
||||
* @ingroup layers_pool
|
||||
* @param in opaque handler to input data to process
|
||||
* @param dim_im_in_x input feature map width
|
||||
* @param dim_im_in_y input feature map height
|
||||
* @param ch_im_in number of input channels
|
||||
* @param dim_kernel_x kernel width
|
||||
* @param dim_kernel_y kernel height
|
||||
* @param padding_x right padding value
|
||||
* @param padding_y top padding value
|
||||
* @param stride_x stride value on x dimension
|
||||
* @param stride_y stride value on y dimension
|
||||
* @param dim_im_out_x output feature map width
|
||||
* @param dim_im_out_y output feature map height
|
||||
* @param out opaque handler to output data
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void pool_func_mp_array_integer(ai_handle in,
|
||||
const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y,
|
||||
const ai_u16 padding_x, const ai_u16 padding_y,
|
||||
const ai_u16 stride_x, const ai_u16 stride_y,
|
||||
const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y,
|
||||
ai_handle out);
|
||||
|
||||
/*!
|
||||
* @brief Average Pooling on a float data array
|
||||
* @ingroup layers_pool
|
||||
* @param in opaque handler to input data to process
|
||||
* @param dim_im_in_x input feature map width
|
||||
* @param dim_im_in_y input feature map height
|
||||
* @param ch_im_in number of input channels
|
||||
* @param dim_kernel_x kernel width
|
||||
* @param dim_kernel_y kernel height
|
||||
* @param padding_x right padding value
|
||||
* @param padding_y top padding value
|
||||
* @param stride_x stride value on x dimension
|
||||
* @param stride_y stride value on y dimension
|
||||
* @param dim_im_out_x output feature map width
|
||||
* @param dim_im_out_y output feature map height
|
||||
* @param out opaque handler to scratch memory
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void pool_func_ap_array_f32(ai_handle in,
|
||||
const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y,
|
||||
const ai_u16 padding_x, const ai_u16 padding_y,
|
||||
const ai_u16 stride_x, const ai_u16 stride_y,
|
||||
const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y,
|
||||
ai_handle out);
|
||||
|
||||
/*!
|
||||
* @brief Average Pooling on a 8/16 bits fixed point data array
|
||||
* @ingroup layers_pool
|
||||
* @param in opaque handler to input data to process
|
||||
* @param dim_im_in_x input feature map width
|
||||
* @param dim_im_in_y input feature map height
|
||||
* @param ch_im_in number of input channels
|
||||
* @param dim_kernel_x kernel width
|
||||
* @param dim_kernel_y kernel height
|
||||
* @param padding_x right padding value
|
||||
* @param padding_y top padding value
|
||||
* @param stride_x stride value on x dimension
|
||||
* @param stride_y stride value on y dimension
|
||||
* @param dim_im_out_x output feature map width
|
||||
* @param dim_im_out_y output feature map height
|
||||
* @param out opaque handler to scratch memory
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void pool_func_ap_array_fixed(ai_handle in,
|
||||
const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y,
|
||||
const ai_u16 padding_x, const ai_u16 padding_y,
|
||||
const ai_u16 stride_x, const ai_u16 stride_y,
|
||||
const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y,
|
||||
ai_handle out);
|
||||
|
||||
/*!
|
||||
* @brief Average Pooling on a 8-bits integer quantized data array
|
||||
* @ingroup layers_pool
|
||||
* @param in opaque handler to input data to process
|
||||
* @param dim_im_in_x input feature map width
|
||||
* @param dim_im_in_y input feature map height
|
||||
* @param ch_im_in number of input channels
|
||||
* @param dim_kernel_x kernel width
|
||||
* @param dim_kernel_y kernel height
|
||||
* @param padding_x right padding value
|
||||
* @param padding_y top padding value
|
||||
* @param stride_x stride value on x dimension
|
||||
* @param stride_y stride value on y dimension
|
||||
* @param dim_im_out_x output feature map width
|
||||
* @param dim_im_out_y output feature map height
|
||||
* @param out opaque handler to scratch memory
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void pool_func_ap_array_integer(ai_handle in,
|
||||
const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y,
|
||||
const ai_u16 ch_im_in,
|
||||
const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y,
|
||||
const ai_u16 padding_x, const ai_u16 padding_y,
|
||||
const ai_u16 stride_x, const ai_u16 stride_y,
|
||||
const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y,
|
||||
ai_handle out);
|
||||
|
||||
|
||||
/******************************************************************************/
|
||||
/* Forward Functions Section */
|
||||
/******************************************************************************/
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a max pooling layer.
|
||||
* @ingroup layers_pool
|
||||
* @param layer the pooling (pool) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_mp(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a fixed point max pooling layer.
|
||||
* @ingroup layers_pool
|
||||
* @param layer the pooling (pool) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_mp_fixed(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of an integer-quantized max pooling layer.
|
||||
* @ingroup layers_pool
|
||||
* @param layer the pooling (pool) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_mp_integer(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of an average pooling layer.
|
||||
* @ingroup layers_pool
|
||||
* @param layer the pooling (pool) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_ap(ai_layer* layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a fixed point average pooling layer.
|
||||
* @ingroup layers_pool
|
||||
* @param layer the pooling (pool) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_ap_fixed(ai_layer *pLayer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of an integer-quantized average pooling layer.
|
||||
* @ingroup layers_pool
|
||||
* @param layer the pooling (pool) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_ap_integer(ai_layer *pLayer);
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*__LAYERS_POOL_H_*/
|
||||
@ -1,110 +0,0 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_rnn.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @date 18-May-2018
|
||||
* @brief header file of RNN layers
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* <h2><center>© Copyright (c) 2018 STMicroelectronics.
|
||||
* All rights reserved.</center></h2>
|
||||
*
|
||||
* This software component is licensed by ST under Ultimate Liberty license
|
||||
* SLA0044, the "License"; You may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at:
|
||||
* www.st.com/SLA0044
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
#ifndef __LAYERS_RNN_H_
|
||||
#define __LAYERS_RNN_H_
|
||||
#pragma once
|
||||
|
||||
#include "layers_common.h"
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_lstm
|
||||
* @ingroup layers
|
||||
* @brief LSTM layer with generic nonlinearities and peephole connections
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_lstm_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_size n_units; /**< size of the hidden RNN state */
|
||||
func_nl_el activation_nl; /**< activation nonlinearity (input to cell) */
|
||||
func_nl_el recurrent_nl; /**< recurrent nonlinearity (hidden to cell) */
|
||||
func_nl_el out_nl; /**< output nonlinearity (cell to hidden) */
|
||||
ai_bool go_backwards; /**< process reversed input */
|
||||
ai_bool reverse_seq; /**< reverse output sequence */
|
||||
} ai_layer_lstm;
|
||||
|
||||
|
||||
/*!
|
||||
* @struct ai_layer_gru
|
||||
* @ingroup layers
|
||||
* @brief Gated Recurrent Unit (GRU) layer with generic nonlinearities
|
||||
*/
|
||||
typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_gru_ {
|
||||
AI_LAYER_COMMON_FIELDS_DECLARE
|
||||
ai_size n_units; /**< size of the hidden RNN state */
|
||||
func_nl_el activation_nl; /**< activation nonlinearity (input to cell) */
|
||||
func_nl_el recurrent_nl; /**< recurrent nonlinearity (hidden to cell) */
|
||||
ai_bool reset_after;
|
||||
ai_bool go_backwards; /**< process reversed input */
|
||||
ai_bool reverse_seq; /**< reverse output sequence */
|
||||
} ai_layer_gru;
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a Long-Short Term Memory (LSTM) layer.
|
||||
* @ingroup layers
|
||||
*
|
||||
* Implements a Long-Short Term Layer with peephole connections:
|
||||
* \f{eqnarray*}{
|
||||
* i_t &=& \sigma_a(x_t W_{xi} + h_{t-1} W_{hi}
|
||||
* + w_{ci} \odot c_{t-1} + b_i)\\
|
||||
* f_t &=& \sigma_a(x_t W_{xf} + h_{t-1} W_{hf}
|
||||
* + w_{cf} \odot c_{t-1} + b_f)\\
|
||||
* c_t &=& f_t \odot c_{t - 1}
|
||||
* + i_t \odot \sigma_r(x_t W_{xc} + h_{t-1} W_{hc} + b_c)\\
|
||||
* o_t &=& \sigma_a(x_t W_{xo} + h_{t-1} W_{ho} + w_{co} \odot c_t + b_o)\\
|
||||
* h_t &=& o_t \odot \sigma_o(c_t)
|
||||
* \f}
|
||||
* where \f$\sigma_a\f$ is the activation nonlinearity, \f$\sigma_r\f$ is the
|
||||
* recurrent nonlinearity and \f$\sigma_o\f$ is the out nonlinearity. The
|
||||
* \f$W_x\f$, \f$W_h\f$ and \f$W_c\f$ weights are sliced from the kernel,
|
||||
* recurrent and peephole weights.
|
||||
*
|
||||
* @param layer the LSTM layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_lstm(ai_layer * layer);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a Gated Recurrent Unit (GRU) layer.
|
||||
* @ingroup layers
|
||||
*
|
||||
* Implements a Gated Recurrent Unit with the formula:
|
||||
* \f{eqnarray*}{
|
||||
* r_t &=& \sigma_a(x_t W_{xr} + h_{t - 1} W_{hr} + b_r) \\
|
||||
* z_t &=& \sigma_a(x_t W_{xz} + h_{t - 1} W_{hz} + b_z) \\
|
||||
* c_t &=& \sigma_r(x_t W_{xc} + r_t \odot (h_{t - 1} W_{hc} + b_{hc}) + b_c)
|
||||
* \qquad \textnormal{when reset after is true} \\
|
||||
* c_t &=& \sigma_r(x_t W_{xc} + (r_t \odot h_{t - 1}) W_{hc} + b_{hc} + b_c)
|
||||
* \qquad \textnormal{when reset after is false (default)} \\
|
||||
* h_t &=& (1 - z_t) \odot h_{t - 1} + z_t \odot c_t
|
||||
* \f}
|
||||
* where \f$\sigma_a\f$ is the activation nonlinearity and \f$\sigma_r\f$ is
|
||||
* the recurrent nonlinearity. The weights are sliced from the kernel and
|
||||
* recurrent weights.
|
||||
*
|
||||
* @param layer the GRU layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_gru(ai_layer * layer);
|
||||
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /* __LAYERS_RNN_H_ */
|
||||
@ -1,80 +0,0 @@
|
||||
/**
|
||||
******************************************************************************
|
||||
* @file layers_sm.h
|
||||
* @author AST Embedded Analytics Research Platform
|
||||
* @date 18-Apr-2018
|
||||
* @brief header file of AI platform non softmax layer datatype
|
||||
******************************************************************************
|
||||
* @attention
|
||||
*
|
||||
* <h2><center>© COPYRIGHT(c) 2018 STMicroelectronics</center></h2>
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
* 1. Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. Neither the name of STMicroelectronics nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
******************************************************************************
|
||||
*/
|
||||
|
||||
#ifndef __LAYERS_SM_H_
|
||||
#define __LAYERS_SM_H_
|
||||
#pragma once
|
||||
|
||||
#include "layers_common.h"
|
||||
|
||||
/*!
|
||||
* @defgroup layers SoftMax Layer Definitions
|
||||
* @brief definition
|
||||
*
|
||||
*/
|
||||
|
||||
AI_API_DECLARE_BEGIN
|
||||
|
||||
/*!
|
||||
* @brief Softmax normalization computed on an array of fixed point channels
|
||||
* @ingroup layers_sm
|
||||
* @param out opaque handler to output channel array
|
||||
* @param in opaque handler to input channel array
|
||||
* @param in_size total size (number of elements) to process on the input
|
||||
* @param channel_size number of elements of the input channel
|
||||
* @param in_channel_step number of elements to move to next input element
|
||||
* @param out_channel_step number of elements to move to next output element
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void sm_func_sm_array_fixed(ai_handle out, const ai_handle in,
|
||||
const ai_size in_size,
|
||||
const ai_size channel_size,
|
||||
const ai_size in_channel_step,
|
||||
const ai_size out_channel_step);
|
||||
|
||||
/*!
|
||||
* @brief Computes the activations of a fixed point softmax nonlinear layer.
|
||||
* @ingroup layers_sm
|
||||
* @param layer the softmax (sm) layer
|
||||
*/
|
||||
AI_INTERNAL_API
|
||||
void forward_sm_fixed(ai_layer *pLayer);
|
||||
|
||||
AI_API_DECLARE_END
|
||||
|
||||
#endif /*__LAYERS_SM_H_*/
|
||||
|
||||
Binary file not shown.
@ -14,7 +14,7 @@ Starting from a trained network model, such as a *.h5 saved model* from Keras, C
|
||||
|
||||
> Note: It is recommended to use Linux to build the firmware. Even if it is still possible to do it with Windows, you may run into some issues, for example because of symbolic links in the project. If you're on Windows, you can use [Windows Subsystem for Linux (WSL)](https://docs.microsoft.com/en-us/windows/wsl/install-win10), it will provide a full Linux environnement and the compilation will be painless.
|
||||
|
||||
- stm32ai command line to generate the optimized code. **Version 4.1.0** [Download from ST website]( https://www.st.com/en/embedded-software/x-cube-ai.html)
|
||||
- stm32ai command line to generate the optimized code. [Download the latest version from ST website]( https://www.st.com/en/embedded-software/x-cube-ai.html)
|
||||
|
||||
If you have the extension already enabled in Cube.MX, just add
|
||||
`<home>/STM32Cube/Repository/Packs/STMicroelectronics/X-CUBE-AI/<version>/Utilities/<operating-system>/`
|
||||
@ -42,6 +42,31 @@ To start the shell, double-click `C:\MINGW\msys\1.0\msys.bat`
|
||||
> Every subsequent command shown in this document should be run in MinGW shell. Note that in MinGW the `C:\` directory is located in `/c/`
|
||||
> **Important note for Windows users:** It's recommended to do the compilation in the C:\ directory directly as the compilation might fail if the path to the object files is too long (`CreateProcess` has a limit of 32k characters). Moreover, be extra careful with symbolic links present in the project.
|
||||
|
||||
## Step 0 - Install the necessary files
|
||||
|
||||
You need to install the headers and library from Cube.AI into the project.
|
||||
|
||||
Inside stm32cubeai directory, run:
|
||||
|
||||
```bash
|
||||
mkdir -p AI/{Inc,Lib}
|
||||
mkdir data
|
||||
```
|
||||
|
||||
Then copy (or symlink) the files from Cube.AI to the AI directory:
|
||||
|
||||
```bash
|
||||
# If X-CUBE-AI has been installed from STM32Cube
|
||||
cp ~/STM32Cube/Repository/Packs/STMicroelectronics/X-CUBE-AI/<version>/Middlewares/ST/AI/Inc/* ./AI/Inc/
|
||||
cp ~/STM32Cube/Repository/Packs/STMicroelectronics/X-CUBE-AI/<version>/Middlewares/ST/AI/lib/ABI2.1/STM32H7/NetworkRuntime410_CM7_IAR.a ./AI/Lib/NetworkRuntime_CM7_GCC.a
|
||||
|
||||
# If X-CUBE-AI has been downloaded from ST website
|
||||
cp <cube-ai-path>/Middlewares/ST/AI/Inc/* ./AI/Inc/
|
||||
cp <cube-ai-path>/Middlewares/ST/AI/lib/ABI2.1/STM32H7/NetworkRuntime410_CM7_IAR.a ./NetworkRuntime_CM7_GCC.a
|
||||
```
|
||||
|
||||
> Note: On Windows, STM32Cube is usually installed in `C:\Users\name\`, on Linux it's in the `home` directory
|
||||
|
||||
## Step 1 - Generate the code for the network
|
||||
|
||||
### Code generation
|
||||
|
||||
@ -24,4 +24,4 @@ FIRM_OBJ += $(addprefix $(BUILD)/stm32cubeai/,\
|
||||
py_st_nn.o \
|
||||
)
|
||||
|
||||
LIBS += -l:NetworkRuntime410_CM7_GCC.a -Lstm32cubeai/AI/Lib -lc -lm
|
||||
LIBS += -l:NetworkRuntime_CM7_GCC.a -Lstm32cubeai/AI/Lib -lc -lm
|
||||
|
||||
Loading…
Reference in New Issue
Block a user