#include <Arduino.h>
#include <TensorFlowLite.h>
#include "tensorflow/lite/micro/all_ops_resolver.h"
#include "tensorflow/lite/micro/micro_interpreter.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/version.h"
// Include your TFLite model
#include "tinyml_model.h"
// Tensor arena size (adjust based on your model)
constexpr int tensor_arena_size = 10 * 1024;
uint8_t tensor_arena[tensor_arena_size];
// Global interpreter and tensors
tflite::MicroInterpreter* interpreter;
TfLiteTensor* input;
TfLiteTensor* output;
void setup() {
Serial.begin(115200);
delay(1000);
Serial.println("Initializing TinyML model...");
// Load the model
const tflite::Model* model = tflite::GetModel(tinyml_model_tflite);
if (model->version() != TFLITE_SCHEMA_VERSION) {
Serial.println("Model schema version mismatch!");
while (1);
}
// Resolver for all ops
static tflite::AllOpsResolver resolver;
// Create interpreter
static tflite::MicroInterpreter static_interpreter(
model, resolver, tensor_arena, tensor_arena_size);
interpreter = &static_interpreter;
// Allocate memory for tensors
if (interpreter->AllocateTensors() != kTfLiteOk) {
Serial.println("Tensor allocation failed!");
while (1);
}
// Get input and output tensors
input = interpreter->input(0);
output = interpreter->output(0);
Serial.println("Model ready!");
}
void loop() {
// Example input: replace with your actual input values
float sample_input[] = {0.5, 1.2, 0.3}; // match your model's input size
// Feed input to tensor
for (int i = 0; i < input->dims->data[1]; i++) {
input->data.f[i] = sample_input[i];
}
// Run inference
if (interpreter->Invoke() != kTfLiteOk) {
Serial.println("Model invocation failed!");
return;
}
// Read and print output
Serial.print("Output: ");
for (int i = 0; i < output->dims->data[1]; i++) {
Serial.print(output->data.f[i], 6);
Serial.print(" ");
}
Serial.println();
delay(1000); // Run every 1 second
}