1
0
mirror of https://github.com/RIOT-OS/RIOT.git synced 2025-12-29 16:31:18 +01:00

Merge pull request #17908 from aabadie/pr/pkg/tflite-micro

pkg/tflite-micro: add support and deprecate tensorflow-lite
This commit is contained in:
Alexandre Abadie 2022-04-08 18:37:25 +02:00 committed by GitHub
commit 4c125db8ba
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
39 changed files with 371 additions and 67 deletions

View File

@ -1,6 +1,6 @@
PKG_NAME=flatbuffers
PKG_URL=https://github.com/google/flatbuffers
PKG_VERSION=v1.11.0
PKG_VERSION=a9a295fecf3fbd5a4f571f53b01f63202a3e2113 # v2.0.0
PKG_LICENSE=Apache2.0
include $(RIOTBASE)/pkg/pkg.mk

View File

@ -1,17 +1,17 @@
From 1f739fa4e49839c63ef2831ed454965746ed104d Mon Sep 17 00:00:00 2001
From: Marian Buschsieweke <marian.buschsieweke@ovgu.de>
Date: Sat, 13 Nov 2021 09:17:04 +0100
Subject: [PATCH] Silence -Wcast-align in public headers
From 6ede404cf13233bd75f6af4fc487649c7bfd27c7 Mon Sep 17 00:00:00 2001
From: Alexandre Abadie <alexandre.abadie@inria.fr>
Date: Thu, 7 Apr 2022 11:14:49 +0200
Subject: [PATCH 1/1] Silence Wcast-align in public headers
---
include/flatbuffers/flatbuffers.h | 30 ++++++++++++++++++++++++++++++
1 file changed, 30 insertions(+)
include/flatbuffers/flatbuffers.h | 42 +++++++++++++++++++++++++++++++
1 file changed, 42 insertions(+)
diff --git a/include/flatbuffers/flatbuffers.h b/include/flatbuffers/flatbuffers.h
index a1a95f00..8c50576e 100644
index ee34d54e..52cad4c6 100644
--- a/include/flatbuffers/flatbuffers.h
+++ b/include/flatbuffers/flatbuffers.h
@@ -83,7 +83,10 @@ template<typename T> struct IndirectHelper {
@@ -107,7 +107,10 @@ template<typename T> struct IndirectHelper {
typedef T mutable_return_type;
static const size_t element_stride = sizeof(T);
static return_type Read(const uint8_t *p, uoffset_t i) {
@ -22,7 +22,7 @@ index a1a95f00..8c50576e 100644
}
};
template<typename T> struct IndirectHelper<Offset<T>> {
@@ -92,7 +95,10 @@ template<typename T> struct IndirectHelper<Offset<T>> {
@@ -116,7 +119,10 @@ template<typename T> struct IndirectHelper<Offset<T>> {
static const size_t element_stride = sizeof(uoffset_t);
static return_type Read(const uint8_t *p, uoffset_t i) {
p += i * sizeof(uoffset_t);
@ -33,7 +33,7 @@ index a1a95f00..8c50576e 100644
}
};
template<typename T> struct IndirectHelper<const T *> {
@@ -807,12 +813,18 @@ class vector_downward {
@@ -1034,12 +1040,18 @@ class vector_downward {
// Specialized version of push() that avoids memcpy call for small data.
template<typename T> void push_small(const T &little_endian_t) {
make_space(sizeof(T));
@ -52,7 +52,27 @@ index a1a95f00..8c50576e 100644
scratch_ += sizeof(T);
}
@@ -1217,7 +1229,10 @@ class FlatBufferBuilder {
@@ -1121,13 +1133,19 @@ const T *data(const std::vector<T, Alloc> &v) {
// Eventually the returned pointer gets passed down to memcpy, so
// we need it to be non-null to avoid undefined behavior.
static uint8_t t;
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-align"
return v.empty() ? reinterpret_cast<const T *>(&t) : &v.front();
+#pragma GCC diagnostic pop
}
template<typename T, typename Alloc> T *data(std::vector<T, Alloc> &v) {
// Eventually the returned pointer gets passed down to memcpy, so
// we need it to be non-null to avoid undefined behavior.
static uint8_t t;
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-align"
return v.empty() ? reinterpret_cast<T *>(&t) : &v.front();
+#pragma GCC diagnostic pop
}
/// @endcond
@@ -1465,7 +1483,10 @@ class FlatBufferBuilder {
// Write the offsets into the table
for (auto it = buf_.scratch_end() - num_field_loc * sizeof(FieldLoc);
it < buf_.scratch_end(); it += sizeof(FieldLoc)) {
@ -63,7 +83,7 @@ index a1a95f00..8c50576e 100644
auto pos = static_cast<voffset_t>(vtableoffsetloc - field_location->off);
// If this asserts, it means you've set a field twice.
FLATBUFFERS_ASSERT(
@@ -1225,7 +1240,10 @@ class FlatBufferBuilder {
@@ -1473,7 +1494,10 @@ class FlatBufferBuilder {
WriteScalar<voffset_t>(buf_.data() + field_location->id, pos);
}
ClearOffsets();
@ -74,7 +94,7 @@ index a1a95f00..8c50576e 100644
auto vt1_size = ReadScalar<voffset_t>(vt1);
auto vt_use = GetSize();
// See if we already have generated a vtable with this exact same
@@ -1233,8 +1251,11 @@ class FlatBufferBuilder {
@@ -1481,8 +1505,11 @@ class FlatBufferBuilder {
if (dedup_vtables_) {
for (auto it = buf_.scratch_data(); it < buf_.scratch_end();
it += sizeof(uoffset_t)) {
@ -83,10 +103,10 @@ index a1a95f00..8c50576e 100644
auto vt_offset_ptr = reinterpret_cast<uoffset_t *>(it);
auto vt2 = reinterpret_cast<voffset_t *>(buf_.data_at(*vt_offset_ptr));
+#pragma GCC diagnostic pop
auto vt2_size = *vt2;
auto vt2_size = ReadScalar<voffset_t>(vt2);
if (vt1_size != vt2_size || 0 != memcmp(vt2, vt1, vt1_size)) continue;
vt_use = *vt_offset_ptr;
@@ -1889,8 +1910,11 @@ protected:
@@ -2184,8 +2211,11 @@ class FlatBufferBuilder {
struct StringOffsetCompare {
StringOffsetCompare(const vector_downward &buf) : buf_(&buf) {}
bool operator()(const Offset<String> &a, const Offset<String> &b) const {
@ -95,10 +115,10 @@ index a1a95f00..8c50576e 100644
auto stra = reinterpret_cast<const String *>(buf_->data_at(a.o));
auto strb = reinterpret_cast<const String *>(buf_->data_at(b.o));
+#pragma GCC diagnostic pop
return StringLessThan(stra->data(), stra->size(),
strb->data(), strb->size());
return StringLessThan(stra->data(), stra->size(), strb->data(),
strb->size());
}
@@ -2272,8 +2296,11 @@ class Table {
@@ -2578,8 +2608,11 @@ class Table {
template<typename P> P GetPointer(voffset_t field) {
auto field_offset = GetOptionalFieldOffset(field);
auto p = data_ + field_offset;
@ -110,7 +130,7 @@ index a1a95f00..8c50576e 100644
}
template<typename P> P GetPointer(voffset_t field) const {
return const_cast<Table *>(this)->GetPointer<P>(field);
@@ -2282,7 +2309,10 @@ class Table {
@@ -2588,7 +2621,10 @@ class Table {
template<typename P> P GetStruct(voffset_t field) const {
auto field_offset = GetOptionalFieldOffset(field);
auto p = const_cast<uint8_t *>(data_ + field_offset);
@ -120,7 +140,29 @@ index a1a95f00..8c50576e 100644
+#pragma GCC diagnostic pop
}
template<typename T> bool SetField(voffset_t field, T val, T def) {
template<typename Raw, typename Face>
@@ -2691,7 +2727,10 @@ inline flatbuffers::Optional<bool> Table::GetOptional<uint8_t, bool>(
template<typename T>
void FlatBufferBuilder::Required(Offset<T> table, voffset_t field) {
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-align"
auto table_ptr = reinterpret_cast<const Table *>(buf_.data_at(table.o));
+#pragma GCC diagnostic pop
bool ok = table_ptr->GetOptionalFieldOffset(field) != 0;
// If this fails, the caller will show what field needs to be set.
FLATBUFFERS_ASSERT(ok);
@@ -2703,7 +2742,10 @@ void FlatBufferBuilder::Required(Offset<T> table, voffset_t field) {
/// This may be useful if you want to pass on a root and have the recipient
/// delete the buffer afterwards.
inline const uint8_t *GetBufferStartFromRootPointer(const void *root) {
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-align"
auto table = reinterpret_cast<const Table *>(root);
+#pragma GCC diagnostic pop
auto vtable = table->GetVTable();
// Either the vtable is before the root or after the root.
auto start = (std::min)(vtable, reinterpret_cast<const uint8_t *>(root));
--
2.33.1
2.32.0

View File

@ -1,6 +1,6 @@
PKG_NAME=gemmlowp
PKG_URL=https://github.com/google/gemmlowp
PKG_VERSION=dc69acdf61d7a64260ae0eb9c17421fef0488c02
PKG_VERSION=e844ffd17118c1e17d94e1ba4354c075a4577b88
PKG_LICENSE=Apache2.0
include $(RIOTBASE)/pkg/pkg.mk

9
pkg/ruy/Kconfig Normal file
View File

@ -0,0 +1,9 @@
# Copyright (c) 2022 Inria
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
config PACKAGE_RUY
bool "The ruy matrix multiplication library"
depends on TEST_KCONFIG

9
pkg/ruy/Makefile Normal file
View File

@ -0,0 +1,9 @@
PKG_NAME=ruy
PKG_URL=https://github.com/google/ruy
PKG_VERSION=7ef39c5745a61f43071e699c6a96da41701ae59f
PKG_LICENSE=Apache 2.0
include $(RIOTBASE)/pkg/pkg.mk
all:
@:

3
pkg/ruy/Makefile.dep Normal file
View File

@ -0,0 +1,3 @@
FEATURES_REQUIRED += cpp
USEMODULE += cpp11-compat

5
pkg/ruy/Makefile.include Normal file
View File

@ -0,0 +1,5 @@
INCLUDES += -I$(PKGDIRBASE)/ruy
PSEUDOMODULES += ruy
CFLAGS += -DTHIRD_PARTY_RUY_RUY_GTEST_WRAPPER_H_

11
pkg/ruy/doc.txt Normal file
View File

@ -0,0 +1,11 @@
/**
* @defgroup pkg_ruy The ruy matrix multiplication library
* @ingroup pkg
* @brief ruy is a matrix multiplication library
*
* # License
*
* Licensed under Apache 2.0.
*
* @see https://github.com/google/ruy
*/

9
pkg/ruy/ruy.mk Normal file
View File

@ -0,0 +1,9 @@
MODULE = ruy
SRCXXEXT = cc
SRCXXEXCLUDE = $(wildcard *_test.$(SRCXXEXT))
SRCXXEXCLUDE += benchmark.cc pack_%.cc test_%.cc
CFLAGS += -Wno-unused-variable
include $(RIOTBASE)/Makefile.base

View File

@ -1,6 +1,6 @@
PKG_NAME=tensorflow-lite
PKG_URL=https://github.com/tensorflow/tensorflow
PKG_VERSION=1768c8f2fa155d4c6406e8ff7addf374c83de7ad
PKG_VERSION=d745ff2a48cebf18e847e8b602a744e97e058946 # 2.2.2
PKG_LICENSE=Apache2.0
include $(RIOTBASE)/pkg/pkg.mk
@ -12,6 +12,7 @@ TF_USEMODULE = $(filter $(TF_MODULES),$(USEMODULE))
CFLAGS += -Wno-pedantic
CFLAGS += -Wno-cast-align
CXXEXFLAGS += -Wno-maybe-uninitialized
CFLAGS += -DTF_LITE_STATIC_MEMORY
CFLAGS += -DTF_LITE_USE_GLOBAL_ROUND

View File

@ -11,5 +11,5 @@ USEMODULE += tensorflow-lite-kernels-internal
USEMODULE += tensorflow-lite-memory
USEMODULE += tensorflow-lite-micro-kernels
# C++ support on ESP32 in RIOT doesn't work with TensorFlow-Lite for the moment
FEATURES_BLACKLIST += arch_esp32
# Tensorflow Lite doesn't build on riscv because of missing math functions
FEATURES_BLACKLIST += arch_riscv

View File

@ -3,5 +3,8 @@
* @ingroup pkg
* @brief Provides a RIOT support for TensorFlow Lite AI library
*
* @deprecated Use @ref pkg_tflite-micro instead. Tensorflow-lite package is outdated
* and will be removed after 2022.07 release.
*
* @see https://www.tensorflow.org/lite/microcontrollers
*/

43
pkg/tflite-micro/Makefile Normal file
View File

@ -0,0 +1,43 @@
PKG_NAME=tflite-micro
PKG_URL=https://github.com/tensorflow/tflite-micro
PKG_VERSION=1501b574b74fd7877aba30aa9d8b667f41b139c3 # sync from 2022.04.07
PKG_LICENSE=Apache 2.0
include $(RIOTBASE)/pkg/pkg.mk
CFLAGS += -Wno-cast-align
CFLAGS += -Wno-maybe-uninitialized
CFLAGS += -Wno-pedantic
CFLAGS += -Wno-unused-parameter
TFLITE_MODULES := \
tflite-c \
tflite-core-api \
tflite-kernels \
tflite-kernels-internal \
tflite-kernels-internal-reference \
tflite-micro \
tflite-micro-kernels \
tflite-micro-memory-planner \
tflite-schema \
#
DIR_tflite-c := tensorflow/lite/c
DIR_tflite-core-api := tensorflow/lite/core/api
DIR_tflite-kernels := tensorflow/lite/kernels
DIR_tflite-kernels-internal := tensorflow/lite/kernels/internal
DIR_tflite-kernels-internal-reference := tensorflow/lite/kernels/internal/reference
DIR_tflite-micro := tensorflow/lite/micro
DIR_tflite-micro-kernels := tensorflow/lite/micro/kernels
DIR_tflite-micro-memory-planner := tensorflow/lite/micro/memory_planner
DIR_tflite-schema := tensorflow/lite/schema
TFLITE_MODULES_USED := $(filter $(TFLITE_MODULES),$(USEMODULE))
all: $(TFLITE_MODULES_USED)
@:
.PHONY: tflite-%
tflite-%:
$(QQ)"$(MAKE)" -C $(PKG_SOURCE_DIR)/$(DIR_$@) -f $(CURDIR)/$@.mk

View File

@ -0,0 +1,19 @@
FEATURES_REQUIRED += cpp
USEMODULE += cpp11-compat
USEPKG += flatbuffers
USEPKG += gemmlowp
USEPKG += ruy
USEMODULE += tflite-c
USEMODULE += tflite-core-api
USEMODULE += tflite-kernels
USEMODULE += tflite-kernels-internal
USEMODULE += tflite-kernels-internal-reference
USEMODULE += tflite-micro
USEMODULE += tflite-micro-kernels
USEMODULE += tflite-micro-memory-planner
USEMODULE += tflite-schema
# This package doesn't work on riscv and mips
FEATURES_BLACKLIST += arch_mips32r2 arch_riscv

View File

@ -0,0 +1,8 @@
INCLUDES += -I$(PKGDIRBASE)/tflite-micro
CFLAGS += -DTF_LITE_STATIC_MEMORY
CFLAGS += -DTF_LITE_DISABLE_X86_NEON
CFLAGS += -DTF_LITE_USE_GLOBAL_CMATH_FUNCTIONS
CFLAGS += -DTF_LITE_USE_GLOBAL_MIN
CFLAGS += -DTF_LITE_USE_GLOBAL_MAX
CFLAGS += -DFLATBUFFERS_LOCALE_INDEPENDENT=0

11
pkg/tflite-micro/doc.txt Normal file
View File

@ -0,0 +1,11 @@
/**
* @defgroup pkg_tflite-micro Tensorflow Lite Micro
* @ingroup pkg
* @brief Portable C++ library for signal processing and machine learning inferencing
*
* # License
*
* Licensed under Apache 2.0.
*
* @see https://github.com/tensorflow/tflite-micro
*/

View File

@ -0,0 +1,40 @@
From 4e9723bbedf1317dec204397c6777f90ff76646d Mon Sep 17 00:00:00 2001
From: Alexandre Abadie <alexandre.abadie@inria.fr>
Date: Thu, 7 Apr 2022 17:07:58 +0200
Subject: [PATCH 1/1] fix build with private descriptor
---
tensorflow/lite/micro/memory_planner/greedy_memory_planner.h | 2 --
tensorflow/lite/micro/micro_error_reporter.h | 3 ---
2 files changed, 5 deletions(-)
diff --git a/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h b/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h
index a34f3c5..b8269be 100644
--- a/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h
+++ b/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h
@@ -158,8 +158,6 @@ class GreedyMemoryPlanner : public MicroMemoryPlanner {
// Whether buffers have been added since the last plan was calculated.
bool need_to_calculate_offsets_;
-
- TF_LITE_REMOVE_VIRTUAL_DELETE
};
} // namespace tflite
diff --git a/tensorflow/lite/micro/micro_error_reporter.h b/tensorflow/lite/micro/micro_error_reporter.h
index 0e3b0c3..18a0d89 100644
--- a/tensorflow/lite/micro/micro_error_reporter.h
+++ b/tensorflow/lite/micro/micro_error_reporter.h
@@ -46,9 +46,6 @@ class MicroErrorReporter : public ErrorReporter {
public:
~MicroErrorReporter() override {}
int Report(const char* format, va_list args) override;
-
- private:
- TF_LITE_REMOVE_VIRTUAL_DELETE
};
} // namespace tflite
--
2.32.0

View File

@ -0,0 +1,5 @@
MODULE = tflite-c
SRCXXEXT = cc
include $(RIOTBASE)/Makefile.base

View File

@ -0,0 +1,5 @@
MODULE = tflite-core-api
SRCXXEXT = cc
include $(RIOTBASE)/Makefile.base

View File

@ -0,0 +1,5 @@
MODULE = tflite-kernels-internal-reference
SRCXXEXT = cc
include $(RIOTBASE)/Makefile.base

View File

@ -0,0 +1,5 @@
MODULE = tflite-kernels-internal
SRCXXEXT = cc
include $(RIOTBASE)/Makefile.base

View File

@ -0,0 +1,5 @@
MODULE = tflite-kernels
SRCXXEXT = cc
include $(RIOTBASE)/Makefile.base

View File

@ -0,0 +1,6 @@
MODULE = tflite-micro-kernels
SRCXXEXT = cc
SRCXXEXCLUDE = $(wildcard *_test.$(SRCXXEXT))
include $(RIOTBASE)/Makefile.base

View File

@ -0,0 +1,6 @@
MODULE = tflite-micro-memory-planner
SRCXXEXT = cc
SRCXXEXCLUDE = $(wildcard *_test.$(SRCXXEXT))
include $(RIOTBASE)/Makefile.base

View File

@ -0,0 +1,6 @@
MODULE = tflite-micro
SRCXXEXT = cc
SRCXXEXCLUDE = $(wildcard *_test.$(SRCXXEXT))
include $(RIOTBASE)/Makefile.base

View File

@ -0,0 +1,5 @@
MODULE = tflite-schema
SRCXXEXT = cc
include $(RIOTBASE)/Makefile.base

View File

@ -1,23 +0,0 @@
# Ensure minimal size by default
DEVELHELP ?= 0
include ../Makefile.tests_common
# Other available example: hello_world
EXAMPLE ?= mnist
USEPKG += tensorflow-lite
# internal mnist example is available as an external module
ifeq (mnist,$(EXAMPLE))
# TensorFlow-Lite crashes on M4/M7 CPUs when FPU is enabled, so disable it by
# default for now
DISABLE_MODULE += cortexm_fpu
USEMODULE += $(EXAMPLE)
EXTERNAL_MODULE_DIRS += external_modules
else
# Use upstream example
USEMODULE += tensorflow-lite-$(EXAMPLE)
endif
include $(RIOTBASE)/Makefile.include

View File

@ -0,0 +1,14 @@
# Ensure minimal size by default
DEVELHELP ?= 0
include ../Makefile.tests_common
USEPKG += tflite-micro
# TensorFlow-Lite crashes on M4/M7 CPUs when FPU is enabled, so disable it by
# default for now
DISABLE_MODULE += cortexm_fpu
USEMODULE += mnist
EXTERNAL_MODULE_DIRS += external_modules
include $(RIOTBASE)/Makefile.include

View File

@ -4,15 +4,37 @@ BOARD_INSUFFICIENT_MEMORY := \
arduino-mkrfox1200 \
arduino-mkrwan1300 \
arduino-mkrzero \
arduino-nano-33-iot \
arduino-zero \
b-l072z-lrwan1 \
bastwan \
blackpill \
blackpill-128kib \
bluepill \
bluepill-128kib \
bluepill-stm32f030c8 \
calliope-mini \
cc1350-launchpad \
cc2650-launchpad \
cc2650stk \
e104-bt5010a-tb \
e104-bt5011a-tb \
e180-zg120b-tb \
esp8266-esp-12x \
esp8266-olimex-mod \
esp8266-sparkfun-thing \
feather-m0 \
feather-m0-lora \
feather-m0-wifi \
frdm-kl43z \
frdm-kw41z \
hamilton \
i-nucleo-lrwan1 \
ikea-tradfri \
im880b \
lobaro-lorabox \
lsn50 \
maple-mini \
microbit \
nrf51dk \
nrf51dongle \
@ -23,16 +45,28 @@ BOARD_INSUFFICIENT_MEMORY := \
nucleo-f070rb \
nucleo-f072rb \
nucleo-f091rc \
nucleo-f103rb \
nucleo-f302r8 \
nucleo-f303k8 \
nucleo-f334r8 \
nucleo-f410rb \
nucleo-g070rb \
nucleo-g071rb \
nucleo-g431kb \
nucleo-g431rb \
nucleo-l011k4 \
nucleo-l031k6 \
nucleo-l053r8 \
nucleo-l073rz \
nucleo-l412kb \
nucleo-wl55jc \
olimexino-stm32 \
opencm904 \
openlabs-kw41z-mini-256kib \
pba-d-01-kw2x \
phynode-kw41z \
samd10-xmini \
samd20-xpro \
samd21-xpro \
saml10-xpro \
saml11-xpro \
@ -40,19 +74,31 @@ BOARD_INSUFFICIENT_MEMORY := \
samr21-xpro \
samr30-xpro \
samr34-xpro \
seeeduino_xiao \
sensebox_samd21 \
serpente \
slstk3400a \
slstk3401a \
sltb001a \
slwstk6000b-slwrb4150a \
slwstk6220a \
sodaq-autonomo \
sodaq-explorer \
sodaq-one \
sodaq-sara-aff \
sodaq-sara-sff \
spark-core \
stk3200 \
stk3600 \
stm32f030f4-demo \
stm32f0discovery \
stm32g0316-disco \
stm32l0538-disco \
stm32mp157c-dk2 \
teensy31 \
usb-kw41z \
weact-f401cc \
wemos-zero \
yarm \
yunjia-nrf51822 \
#

View File

@ -16,12 +16,19 @@
*/
#include <stdio.h>
#include "kernel_defines.h"
#if IS_USED(MODULE_TENSORFLOW_LITE)
#include "tensorflow/lite/micro/kernels/all_ops_resolver.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_interpreter.h"
#include "tensorflow/lite/micro/kernels/micro_ops.h"
#include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/version.h"
#else
#include "tensorflow/lite/micro/all_ops_resolver.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_interpreter.h"
#include "tensorflow/lite/micro/system_setup.h"
#endif
#include "tensorflow/lite/schema/schema_generated.h"
#include "blob/digit.h"
#include "blob/model.tflite.h"
@ -45,14 +52,20 @@ namespace {
// The name of this function is important for Arduino compatibility.
void setup()
{
#if IS_USED(MODULE_TFLITE_MICRO)
tflite::InitializeTarget();
#endif
// Set up logging. Google style is to avoid globals or statics because of
// lifetime uncertainty, but since this has a trivial destructor it's okay.
// NOLINTNEXTLINE(runtime-global-variables)
static tflite::MicroErrorReporter micro_error_reporter;
error_reporter = &micro_error_reporter;
// Map the model into a usable data structure. This doesn't involve any
// copying or parsing, it's a very lightweight operation.
model = tflite::GetModel(model_tflite);
if (model->version() != TFLITE_SCHEMA_VERSION) {
printf("Model provided is schema version %d not equal "
"to supported version %d.",
@ -60,24 +73,17 @@ void setup()
return;
}
// Explicitly load required operators
static tflite::MicroMutableOpResolver micro_mutable_op_resolver;
micro_mutable_op_resolver.AddBuiltin(
tflite::BuiltinOperator_FULLY_CONNECTED,
tflite::ops::micro::Register_FULLY_CONNECTED(), 1, 4);
micro_mutable_op_resolver.AddBuiltin(
tflite::BuiltinOperator_SOFTMAX,
tflite::ops::micro::Register_SOFTMAX(), 1, 2);
micro_mutable_op_resolver.AddBuiltin(
tflite::BuiltinOperator_QUANTIZE,
tflite::ops::micro::Register_QUANTIZE());
micro_mutable_op_resolver.AddBuiltin(
tflite::BuiltinOperator_DEQUANTIZE,
tflite::ops::micro::Register_DEQUANTIZE(), 1, 2);
// This pulls in all the operation implementations we need.
// NOLINTNEXTLINE(runtime-global-variables)
#if IS_USED(MODULE_TFLITE_MICRO)
static tflite::AllOpsResolver resolver;
#else
static tflite::ops::micro::AllOpsResolver resolver;
#endif
// Build an interpreter to run the model with.
static tflite::MicroInterpreter static_interpreter(
model, micro_mutable_op_resolver, tensor_arena, kTensorArenaSize, error_reporter);
model, resolver, tensor_arena, kTensorArenaSize, error_reporter);
interpreter = &static_interpreter;
// Allocate memory from the tensor_arena for the model's tensors.