-
Notifications
You must be signed in to change notification settings - Fork 57
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
support Cortex-M + ESP, support [email protected] on Cortex-M, support [email protected].…
…1 on ESP32
- Loading branch information
1 parent
1cfd8b6
commit 5fb8f3f
Showing
509 changed files
with
102,741 additions
and
4,338 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,5 +1,5 @@ | ||
name=EloquentTinyML | ||
version=0.0.10 | ||
version=2.4.0 | ||
author=Simone Salerno,[email protected] | ||
maintainer=Simone Salerno,[email protected] | ||
sentence=An eloquent interface to Tensorflow Lite for Microcontrollers | ||
|
Binary file not shown.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,237 +1,22 @@ | ||
#pragma once | ||
|
||
#include <Arduino.h> | ||
#include <math.h> | ||
|
||
#ifdef max | ||
#define REDEFINE_MAX | ||
#undef max | ||
#undef min | ||
#endif | ||
|
||
#include <math.h> | ||
#include "tensorflow/lite/version.h" | ||
#include "tensorflow/lite/micro/kernels/all_ops_resolver.h" | ||
#include "tensorflow/lite/micro/micro_error_reporter.h" | ||
#include "tensorflow/lite/micro/micro_interpreter.h" | ||
|
||
#if defined(ESP32) | ||
#include "TfLiteESP32.h" | ||
#else | ||
#include "TfLiteARM.h" | ||
#endif | ||
|
||
#ifdef REDEFINE_MAX | ||
#define max(a,b) ((a)>(b)?(a):(b)) | ||
#define min(a,b) ((a)<(b)?(a):(b)) | ||
#endif | ||
|
||
|
||
namespace Eloquent { | ||
namespace TinyML { | ||
|
||
enum TfLiteError { | ||
OK, | ||
VERSION_MISMATCH, | ||
CANNOT_ALLOCATE_TENSORS, | ||
NOT_INITIALIZED, | ||
INVOKE_ERROR | ||
}; | ||
|
||
/** | ||
* Eloquent interface to Tensorflow Lite for Microcontrollers | ||
* | ||
* @tparam inputSize | ||
* @tparam outputSize | ||
* @tparam tensorArenaSize how much memory to allocate to the tensors | ||
*/ | ||
template<size_t inputSize, size_t outputSize, size_t tensorArenaSize> | ||
class TfLite { | ||
public: | ||
/** | ||
* Contructor | ||
* @param modelData a model as exported by tinymlgen | ||
*/ | ||
TfLite() : | ||
failed(false) { | ||
} | ||
|
||
~TfLite() { | ||
delete reporter; | ||
delete interpreter; | ||
} | ||
|
||
/** | ||
* Inizialize NN | ||
* | ||
* @param modelData | ||
* @return | ||
*/ | ||
bool begin(const unsigned char *modelData) { | ||
tflite::ops::micro::AllOpsResolver resolver; | ||
reporter = new tflite::MicroErrorReporter(); | ||
|
||
model = tflite::GetModel(modelData); | ||
|
||
// assert model version and runtime version match | ||
if (model->version() != TFLITE_SCHEMA_VERSION) { | ||
failed = true; | ||
error = VERSION_MISMATCH; | ||
|
||
reporter->Report( | ||
"Model provided is schema version %d not equal " | ||
"to supported version %d.", | ||
model->version(), TFLITE_SCHEMA_VERSION); | ||
|
||
return false; | ||
} | ||
|
||
interpreter = new tflite::MicroInterpreter(model, resolver, tensorArena, tensorArenaSize, reporter); | ||
|
||
if (interpreter->AllocateTensors() != kTfLiteOk) { | ||
failed = true; | ||
error = CANNOT_ALLOCATE_TENSORS; | ||
|
||
return false; | ||
} | ||
|
||
input = interpreter->input(0); | ||
output = interpreter->output(0); | ||
error = OK; | ||
|
||
return true; | ||
} | ||
|
||
/** | ||
* Test if the initialization completed fine | ||
*/ | ||
bool initialized() { | ||
return !failed; | ||
} | ||
|
||
/** | ||
* | ||
* @param input | ||
* @param output | ||
* @return | ||
*/ | ||
uint8_t predict(uint8_t *input, uint8_t *output = NULL) { | ||
// abort if initialization failed | ||
if (!initialized()) | ||
return sqrt(-1); | ||
|
||
memcpy(this->input->data.uint8, input, sizeof(uint8_t) * inputSize); | ||
|
||
if (interpreter->Invoke() != kTfLiteOk) { | ||
reporter->Report("Inference failed"); | ||
|
||
return sqrt(-1); | ||
} | ||
|
||
// copy output | ||
if (output != NULL) { | ||
for (uint16_t i = 0; i < outputSize; i++) | ||
output[i] = this->output->data.uint8[i]; | ||
} | ||
|
||
return this->output->data.uint8[0]; | ||
} | ||
|
||
/** | ||
* Run inference | ||
* @return output[0], so you can use it directly if it's the only output | ||
*/ | ||
float predict(float *input, float *output = NULL) { | ||
// abort if initialization failed | ||
if (!initialized()) { | ||
error = NOT_INITIALIZED; | ||
|
||
return sqrt(-1); | ||
} | ||
|
||
// copy input | ||
for (size_t i = 0; i < inputSize; i++) | ||
this->input->data.f[i] = input[i]; | ||
|
||
if (interpreter->Invoke() != kTfLiteOk) { | ||
error = INVOKE_ERROR; | ||
reporter->Report("Inference failed"); | ||
|
||
return sqrt(-1); | ||
} | ||
|
||
// copy output | ||
if (output != NULL) { | ||
for (uint16_t i = 0; i < outputSize; i++) | ||
output[i] = this->output->data.f[i]; | ||
} | ||
|
||
return this->output->data.f[0]; | ||
} | ||
|
||
/** | ||
* Predict class | ||
* @param input | ||
* @return | ||
*/ | ||
uint8_t predictClass(float *input) { | ||
float output[outputSize]; | ||
|
||
predict(input, output); | ||
|
||
return probaToClass(output); | ||
} | ||
|
||
/** | ||
* Get class with highest probability | ||
* @param output | ||
* @return | ||
*/ | ||
uint8_t probaToClass(float *output) { | ||
uint8_t classIdx = 0; | ||
float maxProba = output[0]; | ||
|
||
for (uint8_t i = 1; i < outputSize; i++) { | ||
if (output[i] > maxProba) { | ||
classIdx = i; | ||
maxProba = output[i]; | ||
} | ||
} | ||
|
||
return classIdx; | ||
} | ||
|
||
/** | ||
* Get error | ||
* @return | ||
*/ | ||
TfLiteError getError() { | ||
return error; | ||
} | ||
|
||
/** | ||
* Get error message | ||
* @return | ||
*/ | ||
const char* errorMessage() { | ||
switch (error) { | ||
case OK: | ||
return "No error"; | ||
case VERSION_MISMATCH: | ||
return "Version mismatch"; | ||
case CANNOT_ALLOCATE_TENSORS: | ||
return "Cannot allocate tensors"; | ||
case NOT_INITIALIZED: | ||
return "Interpreter has not been initialized"; | ||
case INVOKE_ERROR: | ||
return "Interpreter invoke() returned an error"; | ||
default: | ||
return "Unknown error"; | ||
} | ||
} | ||
|
||
protected: | ||
bool failed; | ||
TfLiteError error; | ||
uint8_t tensorArena[tensorArenaSize]; | ||
tflite::ErrorReporter *reporter; | ||
tflite::MicroInterpreter *interpreter; | ||
TfLiteTensor *input; | ||
TfLiteTensor *output; | ||
const tflite::Model *model; | ||
}; | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,28 @@ | ||
// | ||
// Created by Simone on 28/10/2021. | ||
// | ||
|
||
#ifndef ELOQUENTTINYML_TFLITEARM_H | ||
#define ELOQUENTTINYML_TFLITEARM_H | ||
|
||
#include "tensorflow_arm/tensorflow/lite/version.h" | ||
#include "tensorflow_arm/tensorflow/lite/schema/schema_generated.h" | ||
#include "tensorflow_arm/tensorflow/lite/micro/all_ops_resolver.h" | ||
#include "tensorflow_arm/tensorflow/lite/micro/micro_error_reporter.h" | ||
#include "tensorflow_arm/tensorflow/lite/micro/micro_interpreter.h" | ||
#include "TfLiteAbstract.h" | ||
|
||
|
||
namespace Eloquent { | ||
namespace TinyML { | ||
|
||
/** | ||
* Run TensorFlow Lite models on ARM | ||
*/ | ||
template<size_t inputSize, size_t outputSize, size_t tensorArenaSize> | ||
class TfLite : public TfLiteAbstract<tflite::AllOpsResolver, inputSize, outputSize, tensorArenaSize> { | ||
}; | ||
} | ||
} | ||
|
||
#endif //ELOQUENTTINYML_TFLITEESP32_H |
Oops, something went wrong.