#include "lynxi_common.h"
#include "evdeploy/utils/report/tool_report.h"
#include <chrono>
#include <numeric>

namespace ev
{

    LynxiModel::LynxiModel() { EVLOG(INFO) << "this is a Lynxi engine"; }

    LynxiModel::~LynxiModel() { UnInit(); }

    EVStatus LynxiModel::UnInit()
    {
        if (m_init_flag)
        {
            EVLOG(INFO) << "uninit LynxiModel";
            CHECK_LYNXI_API(lynFree(m_dev_inputs));
            m_dev_inputs = nullptr;
            if (m_dev_outputs)
            {
                CHECK_LYNXI_API(lynFree(m_dev_outputs));
                m_dev_outputs = nullptr;
            }
            free(m_host_outputs);
            m_host_outputs = nullptr;
            CHECK_LYNXI_API(lynUnloadModel(m_lynxi_model));
            CHECK_LYNXI_API(lynDestroyStream(m_stream));
            CHECK_LYNXI_API(lynDestroyContext(m_ctx));
            m_init_flag = false;
            return EV_SUCCESS;
        }
        return EV_REPEATED_OP;
    }

    // init model and infos
    EVStatus LynxiModel::Init(const char *net, void **config)
    {
        if (m_init_flag)
        {
            EVLOG(WARNING) << "call init mutiple times";
            return EV_REPEATED_OP;
        }

        ToolReport report;
        // report info
        int code = report.report(3, "ev_model_common-arm-lynxi", "V1.1.0", "common model interface, lynxi version");
        if (code == 0)
        {
            EVLOG(INFO) << "report done successully";
        }
        std::string model_path = std::string(net);
        LoadLynxiModel(std::string(model_path));
        CHECK_LYNXI_API(lynGetRunMode(&m_run_mode));
        EVLOG(INFO) << "run mode:" << (m_run_mode == lynRunMode_t::EP ? "EP" : "RC");
        // 获取tensor信息
        CHECK_LYNXI_API(lynModelGetDesc(m_lynxi_model, &m_model_desc));
        uint32_t t_num = m_model_desc->inputTensorAttrArrayNum;
        for (int i = 0; i < t_num; i++)
        {
            auto dims_count = m_model_desc->inputTensorAttrArray[i].dimCount;
            std::string dims_info = "[";
            for (int j = 0; j < dims_count; ++j)
            {
                dims_info += std::to_string(m_model_desc->inputTensorAttrArray[i].dims[j]);
                if (j != dims_count - 1)
                {
                    dims_info += ",";
                }
                else
                {
                    dims_info += "]";
                }
            }
            EVLOG(INFO) << "input " << i << " shape:" << dims_info
                        << " dataLen:" << m_model_desc->inputTensorAttrArray[i].dataLen;
        }
        t_num = m_model_desc->outputTensorAttrArrayNum;
        for (int i = 0; i < t_num; i++)
        {
            auto dims_count = m_model_desc->outputTensorAttrArray[i].dimCount;
            std::string dims_info = "[";
            for (int j = 0; j < dims_count; ++j)
            {
                dims_info += std::to_string(m_model_desc->outputTensorAttrArray[i].dims[j]);
                if (j != dims_count - 1)
                {
                    dims_info += ",";
                }
                else
                {
                    dims_info += "]";
                }
            }
            EVLOG(INFO) << "output " << i << " shape:" << dims_info
                        << " dataLen:" << m_model_desc->outputTensorAttrArray[i].dataLen;
        }
        m_host_outputs = malloc(m_model_desc->outputDataLen);
        CHECK_LYNXI_API(lynMalloc((void **)&m_dev_inputs, m_model_desc->inputDataLen));
        if (m_run_mode == lynRunMode_t::EP)
        {
            CHECK_LYNXI_API(lynMalloc((void **)&m_dev_outputs, m_model_desc->outputDataLen));
        }
        CHECK_LYNXI_API(lynCreateContext(&m_ctx, 0));
        CHECK_LYNXI_API(lynCreateStream(&m_stream));
        m_init_flag = true;
        return EV_SUCCESS;
    }

    // run inference
    EVStatus LynxiModel::Infer(EVModelData *in, EVModelData *out)
    {
        if (!m_init_flag)
        {
            EVLOG(ERROR) << "infer failed, model was not initialized";
            return EV_FAIL;
        }
        REC_TIME(t0);
        if (static_cast<size_t>(in->mat_num) != m_model_desc->inputTensorAttrArrayNum)
        {
            EVLOG(ERROR) << "Model requires: " << m_model_desc->inputTensorAttrArrayNum << " inputs but " << in->mat_num
                         << " is provided.";
            return EV_FAIL;
        }

        void *i_data = m_dev_inputs;
        for (size_t i = 0; i < m_model_desc->inputTensorAttrArrayNum; i++)
        {
            if (static_cast<size_t>(in->mat[i].data_size) != m_model_desc->inputTensorAttrArray[i].dataLen)
            {
                EVLOG(ERROR) << "input size mismatch, got " << i << "," << in->mat[i].data_size << " but requests "
                             << m_model_desc->inputTensorAttrArray[i].dataLen;
                return EV_FAIL;
            }
            if (m_run_mode == lynRunMode_t::EP)
            {
                CHECK_LYNXI_API(
                    lynMemcpyAsync(m_stream, i_data, in->mat[i].data, in->mat[i].data_size, ClientToServer));
            }
            else
            {
                memcpy(i_data, in->mat[i].data, in->mat[i].data_size);
            }
            i_data = (uint8_t *)i_data + m_model_desc->inputTensorAttrArray[i].dataLen;
        }

        void *o_data = m_host_outputs;
        if (m_run_mode == lynRunMode_t::EP)
        {
            o_data = m_dev_outputs;
        }
        // 6.模型推理
        CHECK_LYNXI_API(lynExecuteModelAsync(m_stream, m_lynxi_model, m_dev_inputs, o_data, 1));
        if (m_run_mode == lynRunMode_t::EP)
        {
            CHECK_LYNXI_API(
                lynMemcpyAsync(m_stream, m_host_outputs, m_dev_outputs, m_model_desc->outputDataLen, ServerToClient));
        }
        CHECK_LYNXI_API(lynSynchronizeStream(m_stream));
        out->mat = new EVMatData[m_model_desc->outputTensorAttrArrayNum];
        out->mat_num = m_model_desc->outputTensorAttrArrayNum;
        uint8_t *buffer = (uint8_t *)m_host_outputs;
        for (size_t i = 0; i < m_model_desc->outputTensorAttrArrayNum; ++i)
        {

            out->mat[i].type = EVDataFormat::EV_FLOAT;
            out->mat[i].loc = EV_DATA_HOST;
            out->mat[i].data_size = m_model_desc->outputTensorAttrArray[i].dataLen;
            out->mat[i].data = buffer;
            out->mat[i].desc = m_output_names[i];
            out->mat[i].dims.assign(m_model_desc->outputTensorAttrArray[i].dims,
                                    m_model_desc->outputTensorAttrArray[i].dims +
                                        m_model_desc->outputTensorAttrArray[i].dimCount);
            buffer = buffer + m_model_desc->outputTensorAttrArray[i].dataLen;
        }
        REC_TIME(t1);
        EVLOG(INFO) << "inference time(ms):" << RUN_TIME(t1 - t0);
        return EV_SUCCESS;
    }

    EVStatus LynxiModel::OptimizeModel(const char *model_name) { return EV_SUCCESS; }

    EVStatus LynxiModel::SetIO(const std::vector<std::string> &inputs, const std::vector<std::string> &outputs)
    {
        m_input_names = inputs;
        m_output_names = outputs;
        EVLOG(INFO) << "set IO done";
        return EV_SUCCESS;
    }

    CREATE_INFER_INS(LynxiModel)
    DESTROY_INFER_INS(LynxiModel)

} // namespace ev
