#include <iostream>
#include <cstdio>
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/kernels/register.h"
#include "tensorflow/lite/model.h"
#include "tensorflow/lite/optional_debug_tools.h"
#include "tensorflow/lite/delegates/gpu/gl_delegate.h"


using namespace tflite;

#define TFLITE_MINIMAL_CHECK(x)                              \
  if (!(x)) {                                                \
    fprintf(stderr, "Error at %s:%d\n", __FILE__, __LINE__); \
    exit(1);                                                 \
  }

long getTimeInUs()
{
    long time;
    struct timeval tv;
    gettimeofday(&tv, nullptr);
    time = tv.tv_sec * 1000000 + tv.tv_usec;
    return time;
}


int main(int argc, char *argv[])
{
    std::cout << "Hello, World!" << std::endl;
    if (argc != 2)
    {
        fprintf(stderr, "minimal <tflite model>\n");
        return 1;
    }
    const char *filename = argv[1];
    bool gpu = false;
    int run_times = 20;

    if (!gpu)
    {
        // Load model
        std::unique_ptr<tflite::FlatBufferModel> model = tflite::FlatBufferModel::BuildFromFile(filename);
        TFLITE_MINIMAL_CHECK(model != nullptr);

        // Build the interpreter
        tflite::ops::builtin::BuiltinOpResolver resolver;
        InterpreterBuilder builder(*model, resolver);
        std::unique_ptr<Interpreter> interpreter;
        builder(&interpreter);
        TFLITE_MINIMAL_CHECK(interpreter != nullptr);

        // Allocate tensor buffers.
        TFLITE_MINIMAL_CHECK(interpreter->AllocateTensors() == kTfLiteOk);
//        printf("=== Pre-invoke Interpreter State ===\n");
//        tflite::PrintInterpreterState(interpreter.get());
        interpreter->SetNumThreads(4);

        // Fill input buffers
        // TODO(user): Insert code to fill input tensors

        // Run inference
        for (int i = 0; i < run_times; ++i)
        {
            auto start = getTimeInUs();
            TFLITE_MINIMAL_CHECK(interpreter->Invoke() == kTfLiteOk);
            std::cout << (getTimeInUs() - start) / 1000 << "ms" << std::endl;
        }
        //    printf("\n\n=== Post-invoke Interpreter State ===\n");
        //    tflite::PrintInterpreterState(interpreter.get());

        // Read output buffers
        // TODO(user): Insert getting data out code.
    }
    else
    {
        auto model_path = filename;
        // Set up interpreter.
        auto model = FlatBufferModel::BuildFromFile(model_path);
        TFLITE_MINIMAL_CHECK(model != nullptr);
        ops::builtin::BuiltinOpResolver op_resolver;
        std::unique_ptr<Interpreter> interpreter;
        InterpreterBuilder(*model, op_resolver)(&interpreter);
        interpreter->SetNumThreads(4);

        // NEW: Prepare GPU delegate.
        const TfLiteGpuDelegateOptions options = {
                .metadata = NULL,
                .compile_options = {
                        .precision_loss_allowed = 1,  // FP16
                        .preferred_gl_object_type = TFLITE_GL_OBJECT_TYPE_FASTEST,
                        .dynamic_batch_enabled = 0,   // Not fully functional yet
                },
        };
        auto *delegate = TfLiteGpuDelegateCreate(&options);
        TFLITE_MINIMAL_CHECK (interpreter->ModifyGraphWithDelegate(delegate) == kTfLiteOk);

        // Run inference.
        //    WriteToInputTensor(interpreter->typed_input_tensor<float>(0));
        for (int i = 0; i < run_times; ++i)
        {
            auto start = getTimeInUs();
            TFLITE_MINIMAL_CHECK(interpreter->Invoke() == kTfLiteOk);
            std::cout << (getTimeInUs() - start) / 1000 << "ms" << std::endl;
        }
        //    ReadFromOutputTensor(interpreter->typed_output_tensor<float>(0));

        // NEW: Clean up.
        TfLiteGpuDelegateDelete(delegate);
    }

    return 0;
}
