//
// Created by wumingli on 2023/2/10.
//

#include "espcn.h"

#include "tensorflow/lite/core/c/c_api.h"
#include <tensorflow/lite/delegates/gpu/api.h>
#include <tensorflow/lite/delegates/gpu/delegate.h>
#include <tensorflow/lite/delegates/gpu/gl_delegate.h>

#define dlog(msg) __android_log_print(ANDROID_LOG_DEBUG, "", msg)
//#define dlog(tag, msg) __android_log_print(ANDROID_LOG_DEBUG, tag, msg)

namespace espcn {

    class ModelHolderGLImpl : public ModelHolder {
    public:
        ModelHolderGLImpl(const void *modelData, size_t modelBytes) :
                modelData(modelData),
                modelBytes(modelBytes),
                model(TfLiteModelCreate(modelData, modelBytes)),
                options(TfLiteInterpreterOptionsCreate()),
                delegateOptions(TfLiteGpuDelegateOptionsV2Default()) {

            delegateOptions.experimental_flags |= TFLITE_GPU_EXPERIMENTAL_FLAGS_GL_ONLY;

            delegate = TfLiteGpuDelegateV2Create(&delegateOptions);
            TfLiteInterpreterOptionsAddDelegate(options, delegate);
        }

        ~ModelHolderGLImpl() {
            modelData = nullptr;

            dlog("TfLiteInterpreterDelete");
            if (interpreter)
                TfLiteInterpreterDelete(interpreter);
            interpreter = nullptr;

            dlog("TfLiteInterpreterOptionsDelete");
            if (options)
                TfLiteInterpreterOptionsDelete(options);
            options = nullptr;

            dlog("TfLiteGpuDelegateDelete");
            if (delegate)
                TfLiteGpuDelegateDelete(delegate);
            delegate = nullptr;

            dlog("TfLiteModelDelete");
            if (model)
                TfLiteModelDelete(model);
            model = nullptr;
        }

        bool configure() {
            return true;
        }

        bool runModel(int hInSsbo, int hOutSsbo) {
//            TfLiteGpuDelegateBindBufferToTensor(delegate, hInSsbo, 0);
//            TfLiteGpuDelegateBindBufferToTensor(delegate, hOutSsbo, 1);

            interpreter = TfLiteInterpreterCreate(model, options);

            TfLiteInterpreterAllocateTensors(interpreter);

            TfLiteInterpreterGetInputTensor(interpreter, 0);
            TfLiteInterpreterGetOutputTensor(interpreter, 0);

            return TfLiteInterpreterInvoke(interpreter) == kTfLiteOk;
        }

    private:
        const void *modelData = nullptr;
        size_t modelBytes;
        TfLiteModel *model = nullptr;
        TfLiteGpuDelegateOptionsV2 delegateOptions;
        TfLiteDelegate *delegate = nullptr;
        TfLiteInterpreterOptions *options = nullptr;
        TfLiteInterpreter *interpreter = nullptr;
    };

#ifdef MODEL_HOLDER_GLV2_IMPL

    ModelHolder *ModelHolder::createModelHolder(const void *modalData, size_t modelBytes) {
        return new ModelHolderGLImpl(modalData, modelBytes);
    }

    void ModelHolder::deleteModelHolder(ModelHolder *holder) {
        delete (ModelHolderGLImpl *) holder;
    }

#endif
}
