#include "evdeploy/runtime/trt/trt_common.h"
#include "evdeploy/utils/report/tool_report.h"
#include <chrono>
#include <numeric>

namespace ev
{
    TrtModel::TrtModel() { EVLOG(INFO) << "this is a common engine"; }

    TrtModel::~TrtModel() { UnInit(); }

    EVStatus TrtModel::UnInit()
    {
        if (m_init_flag)
        {
            EVLOG(INFO) << "uninit TrtModel";
            for (int i = 0; i < m_input_names.size(); ++i)
            {
                if (m_name_2_gpu_buffer[m_input_names[i]])
                {
                    cudaFree(m_name_2_gpu_buffer[m_input_names[i]]);
                    m_name_2_gpu_buffer[m_input_names[i]] = nullptr;
                }
            }
            for (int i = 0; i < m_output_names.size(); ++i)
            {
                if (m_name_2_gpu_buffer[m_output_names[i]])
                {
                    cudaFree(m_name_2_gpu_buffer[m_output_names[i]]);
                    m_name_2_gpu_buffer[m_output_names[i]] = nullptr;
                }
                if (m_name_2_host_buffer[m_output_names[i]])
                {
                    free(m_name_2_host_buffer[m_output_names[i]]);
                    m_name_2_host_buffer[m_output_names[i]] = nullptr;
                }
            }

            cudaStreamDestroy(m_cuda_stream);
            m_init_flag = false;
            return EV_SUCCESS;
        }
        return EV_REPEATED_OP;
    }

    // init model and infos
    EVStatus TrtModel::Init(const char *net, void **config)
    {
        if (m_init_flag)
        {
            EVLOG(WARNING) << "call init mutiple times";
            return EV_REPEATED_OP;
        }

        ToolReport report;
        // report info
        int code =
            report.report(3, "ev_model_common-x86-GPU-Tensorrt8", "V1.0.0", "common model interface, tensorrt version");
        if (code == 0)
        {
            EVLOG(INFO) << "report done successully";
        }
        std::string strTrtName = net;
        EVLOG(INFO) << "model path:" << net;
        size_t sep_pos = strTrtName.find_last_of(".");
        strTrtName = strTrtName.substr(0, sep_pos) + ".trt";
        if (CheckFileType(strTrtName.c_str()) == EV_FILE)
        {
            LoadTrt(strTrtName);
        }
        else if (CheckFileType(net) == EV_FILE)
        {
            ConvertEngine(net);
            EVLOG(INFO) << "model convert done!";
            LoadTrt(strTrtName);
        }
        else
        {
            EVLOG(ERROR) << "invalid model or not exist:" << net;
            exit(-1);
        }

        if (m_ev_engine == nullptr)
        {
            EVLOG(INFO) << "wrong++";
        }

        //
        for (int i = 0; i < m_input_names.size(); ++i)
        {
            EVLOG(INFO) << "Input names are given: " << m_input_names[i];
        }
        for (int i = 0; i < m_output_names.size(); ++i)
        {
            EVLOG(INFO) << "Output names are given: " << m_output_names[i];
        }
        for (int i = 0; i < m_input_names.size(); ++i)
        {
            int input_index = m_ev_engine->getBindingIndex(m_input_names[i].c_str());
            if (input_index < 0)
            {
                EVLOG(ERROR) << "failed to find input named: " << m_input_names[i];
                auto bindings_num = m_ev_engine->getNbBindings();
                EVLOG(INFO) << "binding num:" << bindings_num;
                for (int j = 0; j < bindings_num; ++j)
                {
                    auto name = m_ev_engine->getBindingName(j);
                    EVLOG(INFO) << "layer:" << j << " name:" << name;
                }
                //
                exit(-1);
            }
            m_name_2_index[m_input_names[i]] = input_index;

            Dims input_dim = m_ev_engine->getBindingDimensions(input_index);
            m_name_2_dims[m_input_names[i]] = input_dim;

            DataType dt = m_ev_engine->getBindingDataType(input_index);

            int data_size = 1;
            std::string dims_info = "Model input " + std::to_string(i) + " has shape : (";
            for (int k = 0; k < input_dim.nbDims; ++k)
            {
                dims_info += std::to_string(input_dim.d[k]) + ",";
                data_size *= input_dim.d[k];
            }
            dims_info[dims_info.size() - 1] = ')';
            dims_info = dims_info + " and data size " + std::to_string(data_size * sizeof(dt)) + " Bytes";
            EVLOG(INFO) << dims_info;

            m_name_2_size[m_input_names[i]] = data_size * sizeof(dt);

            void *gpu_buffer = nullptr;
            cudaMalloc(&gpu_buffer, data_size * sizeof(dt));
            m_name_2_gpu_buffer[m_input_names[i]] = gpu_buffer;
        }
        for (int i = 0; i < m_output_names.size(); ++i)
        {
            int output_index = m_ev_engine->getBindingIndex(m_output_names[i].c_str());
            if (output_index < 0)
            {
                EVLOG(ERROR) << "failed to find input named: " << m_output_names[i];
                auto bindings_num = m_ev_engine->getNbBindings();
                EVLOG(INFO) << "binding num:" << bindings_num;
                for (int j = 0; j < bindings_num; ++j)
                {
                    auto name = m_ev_engine->getBindingName(j);
                    EVLOG(INFO) << "layer:" << j << " name:" << name;
                }
                exit(-1);
            }
            m_name_2_index[m_output_names[i]] = output_index;

            Dims output_dim = m_ev_engine->getBindingDimensions(output_index);
            m_name_2_dims[m_output_names[i]] = output_dim;

            DataType dt = m_ev_engine->getBindingDataType(output_index);

            int data_size = 1;
            std::string dims_info = "Model output " + std::to_string(i) + " has shape : (";
            for (int k = 0; k < output_dim.nbDims; ++k)
            {
                dims_info += std::to_string(output_dim.d[k]) + ",";
                data_size *= output_dim.d[k];
            }
            dims_info[dims_info.size() - 1] = ')';
            dims_info = dims_info + " and data size " + std::to_string(data_size * sizeof(dt)) + " Bytes";
            EVLOG(INFO) << dims_info;

            m_name_2_size[m_output_names[i]] = data_size * sizeof(dt);

            void *gpu_buffer = nullptr;
            cudaMalloc(&gpu_buffer, data_size * sizeof(dt));
            m_name_2_gpu_buffer[m_output_names[i]] = gpu_buffer;

            void *host_buffer = nullptr;
            host_buffer = calloc(data_size, sizeof(dt));
            m_name_2_host_buffer[m_output_names[i]] = host_buffer;
        }
        m_init_flag = true;
        return EV_SUCCESS;
    }

    // run inference
    EVStatus TrtModel::Infer(EVModelData *in, EVModelData *out)
    {
        REC_TIME(t0);
        if (in->mat_num != m_input_names.size())
        {
            EVLOG(ERROR) << "Model requires: " << m_input_names.size() << " inputs but " << in->mat_num
                         << " is provided.";
            exit(-1);
        }

        void *bindings[m_input_names.size() + m_output_names.size()];

        for (int i = 0; i < m_input_names.size(); ++i)
        {
            if (m_name_2_size[m_input_names[i]] != in->mat[i].data_size)
            {
                EVLOG(ERROR) << i << " input size mismatch: " << in->mat[i].data_size << " vs "
                             << m_name_2_size[m_input_names[i]];
                exit(-1);
            }
            auto ret = cudaMemcpyAsync(m_name_2_gpu_buffer[m_input_names[i]], in->mat[i].data,
                                       m_name_2_size[m_input_names[i]], cudaMemcpyHostToDevice, m_cuda_stream);
            bindings[m_name_2_index[m_input_names[i]]] = m_name_2_gpu_buffer[m_input_names[i]];
        }

        for (int i = 0; i < m_output_names.size(); ++i)
        {
            bindings[m_name_2_index[m_output_names[i]]] = m_name_2_gpu_buffer[m_output_names[i]];
        }

        auto ret = m_ev_context->enqueueV2(bindings, m_cuda_stream, nullptr);

        for (int i = 0; i < m_output_names.size(); ++i)
        {
            auto ret = cudaMemcpyAsync(m_name_2_host_buffer[m_output_names[i]], m_name_2_gpu_buffer[m_output_names[i]],
                                       m_name_2_size[m_output_names[i]], cudaMemcpyDeviceToHost, m_cuda_stream);
        }
        cudaStreamSynchronize(m_cuda_stream);
        out->mat_num = m_output_names.size();
        out->mat = new EVMatData[m_output_names.size()];

        for (int i = 0; i < m_output_names.size(); ++i)
        {
            out->mat[i].loc = EV_DATA_HOST;
            out->mat[i].data_size = m_name_2_size[m_output_names[i]];
            out->mat[i].data = m_name_2_host_buffer[m_output_names[i]];
            Dims dims = m_name_2_dims[m_output_names[i]];
            for (int j = 0; j < dims.nbDims; ++j)
            {
                out->mat[i].dims.push_back(dims.d[j]);
            }
            out->mat[i].desc = m_output_names[i];
        }

        REC_TIME(t1);
        EVLOG(INFO) << "inference time(ms):" << RUN_TIME(t1 - t0);
        return EV_SUCCESS;
    }

    EVStatus TrtModel::OptimizeModel(const char *model_name) { return EV_SUCCESS; }

    EVStatus TrtModel::SetIO(const std::vector<std::string> &inputs, const std::vector<std::string> &outputs)
    {
        m_input_names = inputs;
        m_output_names = outputs;
        return EV_SUCCESS;
    }

    CREATE_INFER_INS(TrtModel)
    DESTROY_INFER_INS(TrtModel)

} // namespace ev