Merge pull request #2681 from openmv/add_stedge_tools
Some checks failed
🔥 Firmware Build / build-firmware (ARDUINO_GIGA) (push) Has been cancelled
🔥 Firmware Build / build-firmware (ARDUINO_NANO_33_BLE_SENSE) (push) Has been cancelled
🔥 Firmware Build / build-firmware (ARDUINO_NANO_RP2040_CONNECT) (push) Has been cancelled
🔥 Firmware Build / build-firmware (ARDUINO_NICLA_VISION) (push) Has been cancelled
🔥 Firmware Build / build-firmware (ARDUINO_PORTENTA_H7) (push) Has been cancelled
🔥 Firmware Build / build-firmware (DOCKER) (push) Has been cancelled
🔥 Firmware Build / build-firmware (OPENMV2) (push) Has been cancelled
🔥 Firmware Build / build-firmware (OPENMV3) (push) Has been cancelled
🔥 Firmware Build / build-firmware (OPENMV4) (push) Has been cancelled
🔥 Firmware Build / build-firmware (OPENMV4P) (push) Has been cancelled
🔥 Firmware Build / build-firmware (OPENMVPT) (push) Has been cancelled
🔥 Firmware Build / build-firmware (OPENMV_AE3) (push) Has been cancelled
🔥 Firmware Build / build-firmware (OPENMV_RT1060) (push) Has been cancelled
🔥 Firmware Build / code-size-report (push) Has been cancelled
🔥 Firmware Build / stable-release (push) Has been cancelled
🔥 Firmware Build / development-release (push) Has been cancelled

tools: Add STEdge AI tools download.
This commit is contained in:
Ibrahim Abdelkader 2025-05-03 21:53:44 +03:00 committed by GitHub
commit 0d0d5cfac0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 406 additions and 207 deletions

View File

@ -68,13 +68,14 @@ jobs:
~/cache/gcc
~/cache/llvm
~/cache/make
key: 'toolchain_gcc-13.2.rel1_llvm-18.1.3_make_4.4.1'
~/cache/stedgeai
key: 'gcc-13.2.rel1_llvm-18.1.3_make-4.4.1_stedgeai-2.1'
- name: '🐍 Install Python'
uses: actions/setup-python@v5
with:
cache: 'pip'
python-version: "3.12.0"
python-version: "3.12.4"
- name: '🛠 Install dependencies'
run: |
@ -95,6 +96,10 @@ jobs:
if: steps.cache.outputs.cache-hit != 'true'
run: source tools/ci.sh && ci_install_arm_llvm
- name: '🛠 Install STEdge AI tools '
if: steps.cache.outputs.cache-hit != 'true'
run: source tools/ci.sh && ci_install_stedgeai ${HOME}/cache/stedgeai
- name: '🏗 Build firmware'
run: source tools/ci.sh && ci_build_target ${{ matrix.target }}
@ -124,7 +129,8 @@ jobs:
~/cache/gcc
~/cache/llvm
~/cache/make
key: 'toolchain_gcc-13.2.rel1_llvm-18.1.3_make_4.4.1'
~/cache/stedgeai
key: 'gcc-13.2.rel1_llvm-18.1.3_make-4.4.1_stedgeai-2.1'
- name: '🐍 Install Python'
uses: actions/setup-python@v5

View File

@ -3,3 +3,5 @@ pytest==7.4.0
ethos-u-vela==4.2.0
tabulate==0.9.0
cryptography==44.0.1
pyelftools==0.27
colorama==0.4.6

1
.gitignore vendored
View File

@ -36,3 +36,4 @@ tools/alif/**/*.ds
tools/alif/**/*.bin
tools/alif/**/*.bin.sign
tools/alif/build/*-map.txt
tools/st/stedgeai

View File

@ -5,12 +5,6 @@
GCC_TOOLCHAIN_PATH=${HOME}/cache/gcc
GCC_TOOLCHAIN_URL="https://developer.arm.com/-/media/Files/downloads/gnu/13.2.rel1/binrel/arm-gnu-toolchain-13.2.rel1-x86_64-arm-none-eabi.tar.xz"
LLVM_TOOLCHAIN_PATH=${HOME}/cache/llvm
LLVM_TOOLCHAIN_URL="https://github.com/ARM-software/LLVM-embedded-toolchain-for-Arm/releases/download/release-18.1.3/LLVM-ET-Arm-18.1.3-Linux-x86_64.tar.xz"
GNU_MAKE_PATH=${HOME}/cache/make
GNU_MAKE_URL="https://ftp.gnu.org/gnu/make/make-4.4.1.tar.gz"
ci_install_arm_gcc() {
mkdir -p ${GCC_TOOLCHAIN_PATH}
wget --no-check-certificate -O - ${GCC_TOOLCHAIN_URL} | tar --strip-components=1 -Jx -C ${GCC_TOOLCHAIN_PATH}
@ -18,6 +12,11 @@ ci_install_arm_gcc() {
arm-none-eabi-gcc --version
}
########################################################################################
# Install ARM LLVM.
LLVM_TOOLCHAIN_PATH=${HOME}/cache/llvm
LLVM_TOOLCHAIN_URL="https://github.com/ARM-software/LLVM-embedded-toolchain-for-Arm/releases/download/release-18.1.3/LLVM-ET-Arm-18.1.3-Linux-x86_64.tar.xz"
ci_install_arm_llvm() {
mkdir -p ${LLVM_TOOLCHAIN_PATH}
wget --no-check-certificate -O - ${LLVM_TOOLCHAIN_URL} | tar --strip-components=1 -Jx -C ${LLVM_TOOLCHAIN_PATH}
@ -25,6 +24,11 @@ ci_install_arm_llvm() {
clang --version
}
########################################################################################
# Install GNU Make.
GNU_MAKE_PATH=${HOME}/cache/make
GNU_MAKE_URL="https://ftp.gnu.org/gnu/make/make-4.4.1.tar.gz"
ci_install_gnu_make() {
mkdir -p ${GNU_MAKE_PATH}
wget --no-check-certificate -O - ${GNU_MAKE_URL} | tar --strip-components=1 -zx -C ${GNU_MAKE_PATH}
@ -35,7 +39,6 @@ ci_install_gnu_make() {
########################################################################################
# Update Submodules.
ci_update_submodules() {
git submodule update --init --depth=1 --no-single-branch
git -C lib/micropython/ submodule update --init --depth=1
@ -43,7 +46,6 @@ ci_update_submodules() {
########################################################################################
# Build Targets.
ci_build_target() {
export LLVM_PATH=${LLVM_TOOLCHAIN_PATH}/bin
export PATH=${GNU_MAKE_PATH}:${GCC_TOOLCHAIN_PATH}/bin:${PATH}
@ -59,7 +61,6 @@ ci_build_target() {
########################################################################################
# Prepare Firmware Packages.
ci_package_firmware_release() {
# Add WiFi firmware blobs
cp -rf drivers/cyw4343/firmware firmware/CYW4343
@ -93,7 +94,6 @@ ci_install_code_format_deps() {
########################################################################################
# Run code formatter
ci_run_code_format_check() {
export PATH=${CODEFORMAT_PATH}/bin:${PATH}
UNCRUSTIFY_CONFIG=tools/uncrustify.cfg
@ -110,3 +110,52 @@ ci_run_code_format_check() {
done
exit $exit_code
}
########################################################################################
# Install STEdgeAI tools
STEDGEAI_URL="https://upload.openmv.io/stedgeai/STEdgeAI-2.1.0.tar.gz"
STEDGEAI_SHA256="888e71715127ff6384e38fcde96eea28f53f8370b2bb9cf0d2f6f939001b350c"
STEDGEAI_CACHE="${HOME}/cache/stedgeai"
ci_install_stedgeai() {
STEDGEAI_PATH="${1}"
# If cached in CI, copy from cache to build.
if [ -d "${STEDGEAI_CACHE}" ]; then
mkdir -p "${STEDGEAI_PATH}"
cp -r "${STEDGEAI_CACHE}/." "${STEDGEAI_PATH}"
touch "${STEDGEAI_PATH}/stedgeai.stamp"
return 0
fi
# Download and install to STEDGEAI_PATH
echo "Downloading STEdge AI tools..."
mkdir -p "${STEDGEAI_PATH}"
# Create temporary file
tmpfile=$(mktemp)
trap 'rm -f "$tmpfile"' EXIT
# Download and verify checksum
wget --no-check-certificate -O "$tmpfile" "$STEDGEAI_URL" || {
echo "Download failed!"
return 1
}
echo "${STEDGEAI_SHA256} ${tmpfile}" | sha256sum -c - || {
echo "Checksum failed!"
return 1
}
# Extract the tools
echo "Extracting to ${STEDGEAI_PATH}..."
tar -xzf "$tmpfile" -C "${STEDGEAI_PATH}" --strip-components=1 || {
echo "Extraction failed!"
return 1
}
touch "${STEDGEAI_PATH}/stedgeai.stamp"
echo "STEdgeAI installed successfully to ${STEDGEAI_PATH}"
return 0
}

View File

@ -13,7 +13,8 @@ import os
import json
import argparse
import struct
from tflite2c import vela_compile
from modelc import vela_compile
from modelc import stedge_compile
from haar2c import cascade_binary_universal
CG = '\033[92m'
@ -89,6 +90,10 @@ def romfs_build(romfs_cfg, p, args):
vela_args = args.vela_args + " --optimise " + entry["optimize"]
vela_compile(file_path, args.build_dir, vela_args.split())
file_path = os.path.join(args.build_dir, file_name + ".tflite")
if args.stedge_args:
# Compile the model using Vela.
stedge_compile(file_path, args.build_dir, entry["profile"], args.stedge_args.split())
file_path = os.path.join(args.build_dir, file_name + ".tflite")
# If tflite has a labels file add it as a new entry
labels_path = os.path.splitext(_file_path)[0] + ".txt"
if os.path.exists(labels_path):
@ -146,6 +151,7 @@ def main():
parser.add_argument('--out-dir', action = 'store', help='Output directory', required=True)
parser.add_argument('--build-dir', action = 'store', help='Build directory', required=True)
parser.add_argument('--vela-args', action = 'store', help='Vela compiler args', default='')
parser.add_argument('--stedge-args', action = 'store', help='STEdgeAI compiler args', default='')
parser.add_argument('--partition', action = 'store', help = 'romfs partition to build. Default=all.', default=None)
# Parse arguments

147
tools/modelc.py Executable file
View File

@ -0,0 +1,147 @@
#!/usr/bin/env python3
# This file is part of the OpenMV project.
#
# Copyright (C) 2025 OpenMV, LLC.
#
# This work is licensed under the MIT license, see the file LICENSE for details.
#
# AI models converter.
import sys
import os
import csv
import glob
import argparse
import binascii
import subprocess
import re
C_GREEN = '\033[92m'
C_RED = '\033[91m'
C_BLUE = '\033[94m'
C_RESET = '\033[0m'
def vela_compile(model_path, build_dir, vela_args):
vela_ini = os.path.dirname(os.path.abspath(__file__))
model = os.path.basename(os.path.splitext(model_path)[0])
# Construct the command
command = [
'vela',
*vela_args,
'--output-dir', build_dir,
'--config', f'{vela_ini}/vela.ini',
model_path
]
# Call the command and capture the output
try:
result = subprocess.run(command, check=True, text=True, capture_output=True)
except subprocess.CalledProcessError as e:
print(e.stderr, file=sys.stderr)
print(vela_args, file=sys.stderr)
csv_file_path = glob.glob(f"{build_dir}/{model}_summary_*.csv")[0]
with open(csv_file_path, mode='r') as file:
row = next(csv.DictReader(file))
stoi = lambda x, d=1: str(int(float(x) / d))
color = lambda c,x: c + x + C_RESET
summary = {
C_BLUE + "Network:": C_BLUE + row["network"],
C_BLUE + "Accelerator Configuration:": C_GREEN + row["accelerator_configuration"],
C_BLUE + "System Configuration:": C_BLUE + row["system_config"],
C_BLUE + "Memory Mode:": C_BLUE + row["memory_mode"],
C_BLUE + "Compiler Mode: ": C_RED + vela_args[-1],
C_BLUE + "Accelerator Clock:": C_BLUE + stoi(row["core_clock"], 10**6) + " MHz",
C_BLUE + "SRAM Usage:": C_RED + stoi(row["sram_memory_used"]) + " KiB",
C_BLUE + "Flash Usage:": C_RED + stoi(row["off_chip_flash_memory_used"]) + " KiB",
C_BLUE + "Inference Time:": C_GREEN + "%.2f ms, %.2f inferences/s"%
(float(row["inference_time"]) * 1000, float(row["inferences_per_second"])),
}
for key, value in summary.items():
print(f"{key:<{35}} {value:<{50}}", file=sys.stderr)
print(C_RESET, file=sys.stderr)
os.rename(f"{build_dir}/{model}_vela.tflite", f"{build_dir}/{model}.tflite")
def stedge_compile(model_path, build_dir, profile, stedge_args=None):
core_dir = os.path.realpath("tools/st/stedgeai/2.1")
config = os.path.realpath("tools/st/scripts/neuralart.json")
model_name = os.path.basename(os.path.splitext(model_path)[0])
model_ext = os.path.splitext(model_path)[1]
output_dir = os.path.join(build_dir, model_name)
# Remove any Make-related variables that could leak
env = os.environ.copy()
env["STEDGEAI_CORE_DIR"] = core_dir
for var in ["RM", "CFLAGS", "CPPFLAGS", "CXXFLAGS", "LDFLAGS", 'MAKEFLAGS']:
env.pop(var, None)
print(f"{C_GREEN}Creating relocatable binary model {model_name}{C_RESET}")
# Step 1: stedgeai generate
generate_command = [
os.path.join(core_dir, "Utilities/linux/stedgeai"),
"generate",
*stedge_args,
"--model", model_path,
"--relocatable",
"--st-neural-art", f"{profile}@{config}",
"--no-workspace",
"--output", os.path.join(output_dir, "gen"),
"--verbosity", "1",
]
try:
result = subprocess.run(generate_command, check=True, text=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except subprocess.CalledProcessError as e:
print(f"stedgeai command failed with exit code {e.returncode}", file=sys.stderr)
print(" ".join(generate_command), file=sys.stderr)
raise(e)
# Step 2: Python relocation script
reloc_command = [
sys.executable, # Uses current Python interpreter
os.path.join(core_dir, "scripts/N6_reloc/npu_driver.py"),
"--input", os.path.join(output_dir, "gen", "network.c"),
"--output", output_dir,
"--verbosity", "1",
]
try:
result = subprocess.run(reloc_command, check=True, text=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except subprocess.CalledProcessError as e:
print(f"Relocation script failed with exit code {e.returncode}", file=sys.stderr)
print(" ".join(reloc_command), file=sys.stderr)
raise(e)
match = re.search(
r"([ \t]+XIP size.*?Table: mempool.*?\n)", result.stdout, re.DOTALL | re.MULTILINE
)
if match:
print(C_BLUE + match.group(1).rstrip() + C_RESET + "\n")
os.rename(f"{output_dir}/network_rel.bin", f"{build_dir}/{model_name}{model_ext}")
if __name__ == '__main__':
# python tools/tflite2c.py --input lib/models/fomo_face_detection.tflite --build-dir /tmp/build_st --stedge-args "--target stm32n6"
# python tools/tflite2c.py --input lib/models/fomo_face_detection.tflite --build-dir /tmp/build --vela-args "--system-config RTSS_HP_DTCM_MRAM --accelerator-config ethos-u55-256 --memory-mode Shared_Sram"
parser = argparse.ArgumentParser(description='AI models converter.')
parser.add_argument('--input', action = 'store', help = 'Input model.', required=True)
parser.add_argument('--build-dir', action = 'store', help='Build directory.', default='build')
parser.add_argument('--vela-args', action = 'store', help='Vela compiler args.')
parser.add_argument('--stedge-args', action = 'store', help='STEdge AI tools args.')
parser.add_argument('--stedge-profile', action = 'store', help='STEdge AI tools profile.', default="default")
args = parser.parse_args()
if args.vela_args:
# Compile the model using Vela.
vela_compile(args.input, args.build_dir, args.vela_args.split())
elif args.stedge_args:
# Compile the model using STEdge AI tools.
stedge_compile(args.input, args.build_dir, args.stedge_profile, args.stedge_args.split())
else:
parser.print_help(sys.stderr)

View File

@ -0,0 +1,10 @@
{
"Globals": {
},
"Profiles": {
"default": {
"memory_pool": "stm32n6.mpool",
"options": "--native-float --mvei --cache-maintenance --Ocache-opt --enable-virtual-mem-pools --Os --optimization 3 --Oauto-sched --all-buffers-info --csv-file network.csv --enable-epoch-controller"
}
}
}

171
tools/st/scripts/stm32n6.mpool Executable file
View File

@ -0,0 +1,171 @@
{
"params": {
"param": [
{
"paramname": "max_onchip_sram_size",
"value": "1024",
"magnitude": "KBYTES"
}
]
},
"memory": {
"cacheinfo": [
{
"nlines": 512,
"linesize": 64,
"associativity": 8,
"bypass_enable": 1,
"prop": {
"rights": "ACC_WRITE",
"throughput": "MID",
"latency": "MID",
"byteWidth": 8,
"freqRatio": 2.50,
"read_power": 13.584,
"write_power": 12.645
}
}
],
"mem_file_prefix": "atonbuf",
"mempools": [
{
"fname": "AXISRAM3",
"name": "npuRAM3",
"fformat": "FORMAT_RAW",
"prop": {
"rights": "ACC_WRITE",
"throughput": "HIGH",
"latency": "LOW",
"byteWidth": 8,
"freqRatio": 1.25,
"read_power": 18.531,
"write_power": 16.201
},
"offset": {
"value": "0x34200000",
"magnitude": "BYTES"
},
"size": {
"value": "448",
"magnitude": "KBYTES"
}
},
{
"fname": "AXISRAM4",
"name": "npuRAM4",
"fformat": "FORMAT_RAW",
"prop": {
"rights": "ACC_WRITE",
"throughput": "HIGH",
"latency": "LOW",
"byteWidth": 8,
"freqRatio": 1.25,
"read_power": 18.531,
"write_power": 16.201
},
"offset": {
"value": "0x34270000",
"magnitude": "BYTES"
},
"size": {
"value": "448",
"magnitude": "KBYTES"
}
},
{
"fname": "AXISRAM5",
"name": "npuRAM5",
"fformat": "FORMAT_RAW",
"prop": {
"rights": "ACC_WRITE",
"throughput": "HIGH",
"latency": "LOW",
"byteWidth": 8,
"freqRatio": 1.25,
"read_power": 18.531,
"write_power": 16.201
},
"offset": {
"value": "0x342e0000",
"magnitude": "BYTES"
},
"size": {
"value": "448",
"magnitude": "KBYTES"
}
},
{
"fname": "AXISRAM6",
"name": "npuRAM6",
"fformat": "FORMAT_RAW",
"prop": {
"rights": "ACC_WRITE",
"throughput": "HIGH",
"latency": "LOW",
"byteWidth": 8,
"freqRatio": 1.25,
"read_power": 18.531,
"write_power": 16.201
},
"offset": {
"value": "0x34350000",
"magnitude": "BYTES"
},
"size": {
"value": "448",
"magnitude": "KBYTES"
}
},
{
"fname": "xSPI1",
"name": "hyperRAM",
"fformat": "FORMAT_RAW",
"prop": {
"rights": "ACC_WRITE",
"throughput": "MID",
"latency": "HIGH",
"byteWidth": 2,
"freqRatio": 5.00,
"cacheable": "CACHEABLE_ON",
"read_power": 380,
"write_power": 340.0,
"constants_preferred": "true"
},
"offset": {
"value": "0x92000000",
"magnitude": "BYTES"
},
"size": {
"value": "16",
"magnitude": "MBYTES"
},
"mode": "USEMODE_RELATIVE"
},
{
"fname": "xSPI2",
"name": "octoFlash",
"fformat": "FORMAT_RAW",
"prop": {
"rights": "ACC_READ",
"throughput": "MID",
"latency": "HIGH",
"byteWidth": 1,
"freqRatio": 6.00,
"cacheable": "CACHEABLE_ON",
"read_power": 110,
"write_power": 400.0,
"constants_preferred": "true"
},
"offset": {
"value": "0x71000000",
"magnitude": "BYTES"
},
"size": {
"value": "16",
"magnitude": "MBYTES"
},
"mode": "USEMODE_RELATIVE"
}
]
}
}

View File

@ -1,159 +0,0 @@
#!/usr/bin/env python3
# This file is part of the OpenMV project.
#
# Copyright (c) 2013-2022 Ibrahim Abdelkader <iabdalkader@openmv.io>
# Copyright (c) 2013-2022 Kwabena W. Agyeman <kwagyeman@openmv.io>
#
# This work is licensed under the MIT license, see the file LICENSE for details.
#
# This script converts tflite models and labels to C structs.
import sys
import os
import csv
import glob
import argparse
import binascii
import subprocess
def vela_compile(model_path, build_dir, vela_args):
vela_ini = os.path.dirname(os.path.abspath(__file__))
model = os.path.basename(os.path.splitext(model_path)[0])
# Construct the command
command = [
'vela',
*vela_args,
'--output-dir', build_dir,
'--config', f'{vela_ini}/vela.ini',
model_path
]
# Call the command and capture the output
try:
result = subprocess.run(command, check=True, text=True, capture_output=True)
except subprocess.CalledProcessError as e:
print(e.stderr, file=sys.stderr)
print(vela_args, file=sys.stderr)
C_GREEN = '\033[92m'
C_RED = '\033[91m'
C_BLUE = '\033[94m'
C_RESET = '\033[0m'
csv_file_path = glob.glob(f"{build_dir}/{model}_summary_*.csv")[0]
with open(csv_file_path, mode='r') as file:
row = next(csv.DictReader(file))
stoi = lambda x, d=1: str(int(float(x) / d))
color = lambda c,x: c + x + C_RESET
summary = {
C_BLUE + "Network:": C_BLUE + row["network"],
C_BLUE + "Accelerator Configuration:": C_GREEN + row["accelerator_configuration"],
C_BLUE + "System Configuration:": C_BLUE + row["system_config"],
C_BLUE + "Memory Mode:": C_BLUE + row["memory_mode"],
C_BLUE + "Compiler Mode: ": C_RED + vela_args[-1],
C_BLUE + "Accelerator Clock:": C_BLUE + stoi(row["core_clock"], 10**6) + " MHz",
C_BLUE + "SRAM Usage:": C_RED + stoi(row["sram_memory_used"]) + " KiB",
C_BLUE + "Flash Usage:": C_RED + stoi(row["off_chip_flash_memory_used"]) + " KiB",
C_BLUE + "Inference Time:": C_GREEN + "%.2f ms, %.2f inferences/s"%
(float(row["inference_time"]) * 1000, float(row["inferences_per_second"])),
}
for key, value in summary.items():
print(f"{key:<{35}} {value:<{50}}", file=sys.stderr)
print(C_RESET, file=sys.stderr)
os.rename(f"{build_dir}/{model}_vela.tflite", f"{build_dir}/{model}.tflite")
def main():
parser = argparse.ArgumentParser(description='Converts TFLite models to C file.')
parser.add_argument('--input', action = 'store', help = 'Input tflite models directory.', required=True)
parser.add_argument('--header', action = 'store_true', help = 'Generate header file.', required=False, default=False)
parser.add_argument('--build-dir', action = 'store', help='Build directory', default='build', )
parser.add_argument('--vela-args', action = 'store', help='Vela compiler args', default='')
args = parser.parse_args()
tflm_builtin_models = []
tflm_builtin_index = {}
print('/* NOTE: This file is auto-generated. */\n')
index_headers = ['model', 'optimise']
# Open the file and parse it using DictReader
with open(os.path.join(args.input, "index.csv"), 'r') as file:
for row in csv.reader((line for line in file if not line.startswith('#'))):
model = os.path.splitext(row[0])[0]
tflm_builtin_index[model] = dict(zip(index_headers[1:], row[1:]))
models_list = glob.glob(os.path.join(args.input, "*tflite"))
if (args.header):
# Generate the header file
print('// Built-in TFLite Models.')
print('typedef struct {')
print(' const char *name;')
print(' const unsigned int n_labels;')
print(' const char **labels;')
print(' const unsigned int size;')
print(' const unsigned char *data;')
print('}tflm_builtin_model_t;\n')
print('extern const tflm_builtin_model_t tflm_builtin_models[];')
else:
# Generate the C file
print('#include "imlib_config.h"')
print('#include "tflm_builtin_models.h"')
for model_path in models_list:
model_size = os.path.getsize(model_path)
model_name = os.path.basename(os.path.splitext(model_path)[0])
labels_file = os.path.splitext(model_path)[0]+'.txt'
if (args.vela_args):
# Add model-specific Vela args.
if model_name not in tflm_builtin_index:
args.vela_args += " --optimise Performance"
else:
args.vela_args += " --optimise " + tflm_builtin_index[model_name]["optimise"]
# Compile the model using Vela and switch path to the new model.
vela_compile(model_path, args.build_dir, args.vela_args.split())
model_path = os.path.join(args.build_dir, model_name + ".tflite")
model_size = os.path.getsize(model_path)
# Generate model labels.
labels = []
n_labels = 0
if os.path.exists(labels_file):
with open(labels_file, 'r') as f:
labels = ['"{:s}"'.format(l.strip()) for l in f.readlines()]
n_labels = len(labels)
print('static const char *tflm_{:s}_labels[] = {{{:s}}};'.format(model_name, ', '.join(labels)))
# Generate model data.
print('static const unsigned char tflm_{:s}_data[] __attribute__((aligned(16))) = {{'.format(model_name))
with open(model_path, 'rb') as f:
for chunk in iter(lambda: f.read(12), b''):
print(' ', end='')
print(' '.join(['0x{:02x},'.format(x) for x in chunk]))
print('};')
# Store model info in builtin models table.
tflm_builtin_models.append([
model_name,
n_labels,
'tflm_{:s}_labels'.format(model_name),
model_size,
'tflm_{:s}_data'.format(model_name)]
)
# Generate built-in models table.
print('const tflm_builtin_model_t tflm_builtin_models[] = {')
for model in tflm_builtin_models:
if model[0] in tflm_builtin_index:
print(' #if defined(IMLIB_ENABLE_TFLM_BUILTIN_{:s})'.format(model[0].upper()))
print(' {{ "{:s}", {:d}, {:s}, {:d}, {:s} }},'.format(*model))
if model[0] in tflm_builtin_index:
print(' #endif')
print(' {0, 0, 0, 0, 0}')
print('};')
if __name__ == '__main__':
main()

View File

@ -1,34 +0,0 @@
#!/usr/bin/env python3
# This file is part of the OpenMV project.
#
# Copyright (c) 2024 Ibrahim Abdelkader <iabdalkader@openmv.io>
# Copyright (c) 2024 Kwabena W. Agyeman <kwagyeman@openmv.io>
#
# This work is licensed under the MIT license, see the file LICENSE for details.
import tflite
import struct
import sys
with open(sys.argv[1], "rb") as f:
model = tflite.Model.GetRootAs(f.read())
opcodes = []
graph = model.Subgraphs(0)
for i in range(graph.OperatorsLength()):
op = graph.Operators(i)
opcode = model.OperatorCodes(op.OpcodeIndex()).BuiltinCode()
opcodes.append(opcode)
# print(tflite.opcode2name(opcode))
def hash_djb2(s):
hash = 5381
for x in s:
hash = ((hash << 5) + hash) + x
return hash & 0xFFFFFFFF
packed = struct.pack("I" * len(opcodes), *opcodes)
print(hex(hash_djb2(packed)))