#include "TensorFlowLite.h"
#include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
#include "tensorflow/lite/micro/tflite_bridge/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_log.h"
#include "tensorflow/lite/micro/micro_interpreter.h"
#include "tensorflow/lite/micro/system_setup.h"
#include "tensorflow/lite/schema/schema_generated.h"
// Our model
#include "imu_model.h"
#include <Arduino_LSM9DS1.h>
// Figure out what's going on in our model
#define DEBUG 1
// Some settings
constexpr int led_pin = 2;
constexpr float pi = 3.14159265; // Some pi
constexpr float freq = 0.5; // Frequency (Hz) of sinewave
constexpr float period = (1 / freq) * (1000000); // Period (microseconds)
// TFLite globals, used for compatibility with Arduino-style sketches
namespace {
tflite::ErrorReporter* error_reporter = nullptr;
const tflite::Model* model = nullptr;
tflite::MicroInterpreter* interpreter = nullptr;
TfLiteTensor* model_input = nullptr;
TfLiteTensor* model_output = nullptr;
// Create an area of memory to use for input, output, and other TensorFlow
// arrays. You'll need to adjust this by combiling, running, and looking
// for errors.
constexpr int kTensorArenaSize = 10 * 1024;
uint8_t tensor_arena[kTensorArenaSize];
} // namespace
void setup() {
// put your setup code here, to run once:
while(!Serial);
// Set up logging (will report to Serial, even within TFLite functions)
static tflite::MicroErrorReporter micro_error_reporter;
error_reporter = µ_error_reporter;
// Map the model into a usable data structure
model = tflite::GetModel(g_imu_model);
if (model->version() != TFLITE_SCHEMA_VERSION) {
error_reporter->Report(
"Model provided is schema version %d not equal "
"to supported version %d.",
model->version(), TFLITE_SCHEMA_VERSION);
return;
}
// Pull in only needed operations (should match NN layers)
// Available ops:
// https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/micro/kernels/micro_ops.h
static tflite::MicroMutableOpResolver<7> micro_op_resolver;
micro_op_resolver.AddFullyConnected();
micro_op_resolver.AddSoftmax();
micro_op_resolver.AddConv2D();
micro_op_resolver.AddMaxPool2D();
micro_op_resolver.AddReshape();
micro_op_resolver.AddQuantize();
micro_op_resolver.AddDequantize();
// Build an interpreter to run the model
static tflite::MicroInterpreter static_interpreter(
model, micro_op_resolver, tensor_arena, kTensorArenaSize);
interpreter = &static_interpreter;
// Allocate memory from the tensor_arena for the model's tensors
TfLiteStatus allocate_status = interpreter->AllocateTensors();
if (allocate_status != kTfLiteOk) {
Serial.print("AllocateTensors() failed");
error_reporter->Report("AllocateTensors() failed");
while(1);
}
// Assign model input and output buffers (tensors) to pointers
model_input = interpreter->input(0);
model_output = interpreter->output(0);
}
void loop() {
// put your main code here, to run repeatedly:
float x, y, z;
if (IMU.accelerationAvailable()) {
Serial.print("Available");
IMU.readAcceleration(x, y, z);
Serial.print(x);
Serial.print('\t');
Serial.print(y);
Serial.print('\t');
Serial.println(z);
}else{
Serial.print("Not availble ");
}
// Serial.print("Number of dimensions: ");
// Serial.println(model_input->dims->size);
// Serial.print("Dim 1 size: ");
// Serial.println(model_input->dims->data[0]);
// Serial.print("Dim 2 size: ");
// Serial.println(model_input->dims->data[1]);
// Serial.println(model_input->dims->data[2]);
// Serial.print("Input type: ");
// Serial.println(model_input->type);
delay(1000);
// int data[128*3];
// for(int i=0;i<128;i++){
// for (int j=0;j<3;j++){
// model_input->data.int8[i*3+j]=i*10;
// data[i*3+j]=i*10;
// }
// }
// // model_input->data.=data;
// TfLiteStatus invoke_status = interpreter->Invoke();
// if (invoke_status != kTfLiteOk) {
// error_reporter->Report("Invoke failed on input: %d\n", data);
// }
// float y_val = model_output->data.f[0];
// Serial.println(y_val);
}