#include <TensorFlowLite_ESP32.h>
#include <tensorflow/lite/micro/all_ops_resolver.h>
#include <tensorflow/lite/micro/micro_interpreter.h>
#include <tensorflow/lite/schema/schema_generated.h>
#include <tensorflow/lite/version.h>
namespace {
tflite::MicroErrorReporter micro_error_reporter;
tflite::ErrorReporter* error_reporter = µ_error_reporter;
const tflite::Model* model = nullptr;
tflite::MicroInterpreter* interpreter = nullptr;
TfLiteTensor* input = nullptr;
TfLiteTensor* output = nullptr;
// Create an area of memory to use for input, output, and intermediate arrays.
constexpr int kTensorArenaSize = 10 * 1024;
uint8_t tensor_arena[kTensorArenaSize];
} // namespace
void setup() {
Serial.begin(115200);
// Load the model
model = tflite::GetModel(g_model);
if (model->version() != TFLITE_SCHEMA_VERSION) {
Serial.println("Model schema version does not match");
while (1);
}
// Create an interpreter to run the model
static tflite::MicroMutableOpResolver<10> micro_op_resolver;
tflite::MicroInterpreter static_interpreter(
model, micro_op_resolver, tensor_arena, kTensorArenaSize, error_reporter);
interpreter = &static_interpreter;
// Allocate memory from the tensor_arena for the model's tensors
interpreter->AllocateTensors();
// Obtain pointers to the model's input and output tensors
input = interpreter->input(0);
output = interpreter->output(0);
}
void loop() {
// Example input: Fill the input tensor with your data
for (int i = 0; i < input->bytes; i++) {
input->data.uint8[i] = /* your data */;
}
// Run the model on this input and make sure it succeeds
if (interpreter->Invoke() != kTfLiteOk) {
Serial.println("Invoke failed");
while (1);
}
// Output the results
for (int i = 0; i < output->dims->data[1]; i++) {
Serial.print("Output[");
Serial.print(i);
Serial.print("]: ");
Serial.println(output->data.uint8[i]);
}
delay(1000);
}