#include <TensorFlowLite_ESP32.h>
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/


#include "tensorflow/lite/micro/all_ops_resolver.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_interpreter.h"
#include "tensorflow/lite/micro/system_setup.h"
#include "tensorflow/lite/schema/schema_generated.h"

#include "main_functions.h"
#include "model.h"
#include "constants.h"
#include "output_handler.h"
#include <Arduino.h>

// Globals, used for compatibility with Arduino-style sketches.
// 翻译：全局变量，用于与Arduino风格的程序兼容。
namespace {
tflite::ErrorReporter* error_reporter = nullptr;
const tflite::Model* model = nullptr;
tflite::MicroInterpreter* interpreter = nullptr;
TfLiteTensor* input = nullptr;
TfLiteTensor* output = nullptr;
int inference_count = 0;

constexpr int kTensorArenaSize = 2000; // 这是分配给张量的内存大小。
uint8_t tensor_arena[kTensorArenaSize];
}  // namespace

// The name of this function is important for Arduino compatibility.
// 翻译：这个函数的名称对于Arduino兼容性很重要。
void setup() {

  // 翻译：设置日志记录。Google风格是避免全局或静态变量，因为生命周期不确定，但由于此处析构函数很简单，所以可以。
  static tflite::MicroErrorReporter micro_error_reporter; 
  error_reporter = &micro_error_reporter;

  // Map the model into a usable data structure. This doesn't involve any
  // copying or parsing, it's a very lightweight operation.
  // 翻译：将模型映射到可用的数据结构中。这不涉及任何复制或解析，这是一个非常轻量级的操作。
  model = tflite::GetModel(g_model);
  if (model->version() != TFLITE_SCHEMA_VERSION) {
    TF_LITE_REPORT_ERROR(error_reporter,
                         "Model provided is schema version %d not equal "
                         "to supported version %d.",
                         model->version(), TFLITE_SCHEMA_VERSION);
    return;
  }

  // This pulls in all the operation implementations we need.
  // NOLINTNEXTLINE(runtime-global-variables)
  // 翻译：此处包含所有所需的操作实现。
  static tflite::AllOpsResolver resolver;

  // Build an interpreter to run the model with.
  // 翻译：构建一个解释器来运行模型。
  static tflite::MicroInterpreter static_interpreter(
      model, resolver, tensor_arena, kTensorArenaSize, error_reporter);
  interpreter = &static_interpreter;

  // Allocate memory from the tensor_arena for the model's tensors.
  // 翻译：为模型的张量分配内存。
  TfLiteStatus allocate_status = interpreter->AllocateTensors();
  if (allocate_status != kTfLiteOk) {
    TF_LITE_REPORT_ERROR(error_reporter, "AllocateTensors() failed");
    return;
  }

  // 翻译：获取模型的输入和输出张量的指针。
  input = interpreter->input(0);
  output = interpreter->output(0);

  // 设置量化参数
  // input->params.scale = 2 * M_PI / 255; // 0.002464
  // input->params.zero_point = 0;

  // output->params.scale = 2.0 / 255; // 0.07843
  // output->params.zero_point = 128;

  // Keep track of how many inferences we have performed.
  // 翻译：跟踪我们已经进行了多少次推理。
  inference_count = 0;
}


// The name of this function is important for Arduino compatibility.
void loop() {
  // Calculate an x value to feed into the model. We compare the current
  // inference_count to the number of inferences per cycle to determine
  // our position within the range of possible x values the model was
  // trained on, and use this to calculate a value.
  // 翻译：计算一个x值，将其输入模型中。
  // 我们将当前的inference_count与每周期的推理次数进行比较，
  // 以确定模型训练时可能的x值的范围，并使用此值计算一个值。
  float position = static_cast<float>(inference_count) /
                   static_cast<float>(kInferencesPerCycle);
  float x = position * kXrange;

  // Place the input directly in the model's input tensor
  // input->data.f[0] = x;

  // Quantize the input from floating-point to integer
  int8_t x_quantized = x / input->params.scale + input->params.zero_point;
  // Place the quantized input in the model's input tensor
  input->data.int8[0] = x_quantized;

  // Run inference, and report any error
  // 翻译：运行推理，并报告任何错误
  TfLiteStatus invoke_status = interpreter->Invoke();
  if (invoke_status != kTfLiteOk) {
    TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed on x: %f\n",
                         static_cast<double>(x));
    return;
  }

  // Obtain the output from model's output tensor
  // float y = output->data.f[0];

  // Obtain the quantized output from model's output tensor
  int8_t y_quantized = output->data.int8[0];
  // Dequantize the output from integer to floating-point
  float y = (y_quantized - output->params.zero_point) * output->params.scale;


  // 翻译：输出结果。对于支持的硬件目标，可以实现自定义HandleOutput函数。
  HandleOutput(error_reporter, x, y);

  // Increment the inference_counter, and reset it if we have reached
  // the total number per cycle 
  // 翻译：递增inference_counter，并在我们达到每周期的总次数时重置它
  inference_count += 1;
  if (inference_count >= kInferencesPerCycle) inference_count = 0;
}
