#include "taiic_vsr.h"
#include <vector>
#include <algorithm>
using namespace std;

int vsr_rknn_toolkit_config_init(VSR_TOOLKIT_MODEL_CTX_S *ctx)
{

    // Load RKNN Model
    int ret = rknn_init(&ctx->context, ctx->modelPath, 0, 0, NULL); // 初始化上下文
    RK_LOGI("===init model===\n");

    if (ret < 0)
    {
        RK_LOGE("rknn_init fail! ret=%d\n", ret);
        return -1;
    }

    // Get Model Input Output Info
    ret = rknn_query(ctx->context, RKNN_QUERY_IN_OUT_NUM, &ctx->io_num, sizeof(rknn_input_output_num));
    if (ret != RKNN_SUCC)
    {
        RK_LOGE("rknn_query fail! ret=%d\n", ret);
        return -1;
    }
    RK_LOGI("model input num: %d, output num: %d\n", ctx->io_num.n_input, ctx->io_num.n_output);

    // Get Input Tensor Attrs
    RK_LOGI("input tensors:\n");
    for (uint32_t i = 0; i < ctx->io_num.n_input; i++)
    {
        ctx->input_attrs[i].index = i;
        // query info
        ret = rknn_query(ctx->context, RKNN_QUERY_INPUT_ATTR, &(ctx->input_attrs[i]), sizeof(rknn_tensor_attr));
        if (ret < 0)
        {
            RK_LOGE("rknn_init error! ret=%d\n", ret);
            return -1;
        }
        dump_tensor_attr(&ctx->input_attrs[i]);
    }
    // Get Output Tensor Attrs
    RK_LOGI("output tensors:\n");
    for (uint32_t i = 0; i < ctx->io_num.n_output; i++)
    {
        ctx->output_attrs[i].index = i;
        // query info
        ret = rknn_query(ctx->context, RKNN_QUERY_NATIVE_OUTPUT_ATTR, &(ctx->output_attrs[i]), sizeof(rknn_tensor_attr));
        if (ret != RKNN_SUCC)
        {
            RK_LOGE("rknn_query fail! ret=%d\n", ret);
            return -1;
        }
        dump_tensor_attr(&ctx->output_attrs[i]);
    }

    return 0;
}

int vsr_rknn_toolkit_io_init(VSR_TOOLKIT_MODEL_CTX_S *ctx)
{
    // Create input tensor memory
    ctx->input_attrs[0].type = INPUT_TYPE;
    // default fmt is NHWC, npu only support NHWC in zero copy mode
    ctx->input_attrs[0].fmt = INPUT_LAYOUT;
    ctx->input_mems[0] = rknn_create_mem(ctx->context, ctx->input_attrs[0].size_with_stride);

    // Create output tensor memory
    for (uint32_t i = 0; i < ctx->io_num.n_output; ++i)
    {
        ctx->output_mems[i] = rknn_create_mem(ctx->context, ctx->output_attrs[i].size_with_stride);
    }

    // Set output tensor memory
    for (uint32_t i = 0; i < ctx->io_num.n_output; ++i)
    {
        // set output memory and attribute
        int ret = rknn_set_io_mem(ctx->context, ctx->output_mems[i], &ctx->output_attrs[i]);
        if (ret < 0)
        {
            RK_LOGE("rknn_set_io_mem fail! ret=%d\n", ret);
            return -1;
        }
    }

    return 0;
}

int vsr_rknn_toolkit_data_refresh(VSR_TOOLKIT_MODEL_CTX_S *ctx, unsigned char *input_data)
{
    // Copy input data to input tensor memory
    RK_LOGD("===\n");
    int width = ctx->input_attrs[0].dims[2];
    int stride = ctx->input_attrs[0].w_stride;

    //  int width = 32;
    // int stride = 32;
    RK_LOGD("===width is %d, stride is %d====\n", width, stride);

    if (width == stride)
    {
        RK_LOGD("--==-\n");
        memcpy(ctx->input_mems[0]->virt_addr, input_data, width * ctx->input_attrs[0].dims[1] * ctx->input_attrs[0].dims[3]);
        RK_LOGD("===input data len is %d===\n", width * ctx->input_attrs[0].dims[1] * ctx->input_attrs[0].dims[3]);
    }
    else
    {
        RK_LOGD("--!=-\n");
        int height = ctx->input_attrs[0].dims[1];
        int channel = ctx->input_attrs[0].dims[3];
        // copy from src to dst with stride
        uint8_t *src_ptr = input_data;
        uint8_t *dst_ptr = (uint8_t *)ctx->input_mems[0]->virt_addr;
        // width-channel elements
        int src_wc_elems = width * channel;
        int dst_wc_elems = stride * channel;
        for (int h = 0; h < height; ++h)
        {
            memcpy(dst_ptr, src_ptr, src_wc_elems);
            src_ptr += src_wc_elems;
            dst_ptr += dst_wc_elems;
        }
    }
    // Set input tensor memory
    int ret = rknn_set_io_mem(ctx->context, ctx->input_mems[0], &ctx->input_attrs[0]);
    if (ret < 0)
    {
        RK_LOGE("rknn_set_io_mem fail! ret=%d\n", ret);
        return -1;
    }

    return 0;
}

MODEL_RESULT_S vsr_rknn_toolkit_result_int8(VSR_TOOLKIT_MODEL_CTX_S *ctx)
{
    RK_LOGD("output origin tensors:\n");
    float sumTmpData = 0;

    float *output_mems_nchw[ctx->io_num.n_output]; // 原始输出
    int size = ctx->output_attrs[0].size_with_stride * sizeof(float);
    output_mems_nchw[0] = (float *)malloc(size); // 赋予内存大小

    float *expTmpData[ctx->io_num.n_output]; // 进行softmax之后的数据
    expTmpData[0] = (float *)malloc(size);

    if (ctx->output_attrs[0].fmt == RKNN_TENSOR_NC1HWC2)
    {
        RK_LOGD("==RKNN_TENSOR_NC1HWC2==\n");
        int channel = ctx->output_attrs[0].dims[1];
        int h = ctx->output_attrs[0].n_dims > 2 ? ctx->output_attrs[0].dims[2] : 1;
        int w = ctx->output_attrs[0].n_dims > 3 ? ctx->output_attrs[0].dims[3] : 1;
        int zp = ctx->output_attrs[0].zp;
        float scale = ctx->output_attrs[0].scale;
        NC1HWC2_int8_to_NCHW_float((int8_t *)ctx->output_mems[0]->virt_addr, (float *)output_mems_nchw[0], (int *)ctx->output_attrs[0].dims,
                                   channel, h, w, zp, scale);
    }
    else
    {
        RK_LOGD("==RKNN_TENSOR_NCHW==\n");
        int8_t *src = (int8_t *)ctx->output_mems[0]->virt_addr;

        for(int index = 0; index < ctx->output_attrs[0].n_elems; index++)
        {
            output_mems_nchw[0][index] = (float)(src[index] - ctx->output_attrs[0].zp) * ctx->output_attrs[0].scale;
            RK_LOGD("===float num is %d, data is %f==\n", index, output_mems_nchw[0][index]);
            sumTmpData += exp(output_mems_nchw[0][index]);
        }

        for (int index = 0; index < ctx->output_attrs[0].n_elems; index++)
        {
            expTmpData[0][index] = exp(output_mems_nchw[0][index]) / sumTmpData;
            RK_LOGD("==softmax num %d,data is %8.6f", index, expTmpData[0][index]);
        }
    }

    // Get top
    uint32_t topNum = 3;
    uint32_t MaxClass[topNum];
    float fMaxProb[topNum];

    uint32_t sz = ctx->output_attrs[0].n_elems;
    uint32_t top_count = sz > topNum ? topNum : sz;

    float *buffer = (float *)expTmpData[0]; // 送入softmax的数据，进行排序
    rknn_GetTopN(buffer, fMaxProb, MaxClass, sz, top_count);

    RK_LOGD("---- Top%d ----\n", top_count);
    for (int j = 0; j < top_count; j++)
    {
        RK_LOGD("%8.6f - %d\n", fMaxProb[j], MaxClass[j]);
    }
    MODEL_RESULT_S rknn_result;
    rknn_result.label = MaxClass[0];
    rknn_result.prob = fMaxProb[0];
    return rknn_result;
}

MODEL_RESULT_S vsr_rknn_toolkit_result_int8_opt(VSR_TOOLKIT_MODEL_CTX_S *ctx)
{
    RK_LOGD("output origin tensors:\n");
    float sumTmpData = 0;

    float *output_mems_nchw[ctx->io_num.n_output]; // 原始输出
    int size = ctx->output_attrs[0].size_with_stride * sizeof(float);
    output_mems_nchw[0] = (float *)malloc(size); // 赋予内存大小

    float *expTmpData[ctx->io_num.n_output]; // 进行softmax之后的数据
    expTmpData[0] = (float *)malloc(size);
    
    // 初始化最大值为最小浮点数  
    float maxValue = std::numeric_limits<float>::lowest();  

    if (ctx->output_attrs[0].fmt == RKNN_TENSOR_NC1HWC2)
    {
        RK_LOGD("==RKNN_TENSOR_NC1HWC2==\n");
        int channel = ctx->output_attrs[0].dims[1];
        int h = ctx->output_attrs[0].n_dims > 2 ? ctx->output_attrs[0].dims[2] : 1;
        int w = ctx->output_attrs[0].n_dims > 3 ? ctx->output_attrs[0].dims[3] : 1;
        int zp = ctx->output_attrs[0].zp;
        float scale = ctx->output_attrs[0].scale;
        NC1HWC2_int8_to_NCHW_float((int8_t *)ctx->output_mems[0]->virt_addr, (float *)output_mems_nchw[0], (int *)ctx->output_attrs[0].dims,
                                   channel, h, w, zp, scale);
    }
    else
    {
        RK_LOGD("==RKNN_TENSOR_NCHW==\n");
        int8_t *src = (int8_t *)ctx->output_mems[0]->virt_addr;
         
        //compute max value
        for(int index = 0; index < ctx->output_attrs[0].n_elems; index++)
        {
            output_mems_nchw[0][index] = (float)(src[index] - ctx->output_attrs[0].zp) * ctx->output_attrs[0].scale;
            if (output_mems_nchw[0][index] > maxValue) 
            {  
                maxValue = output_mems_nchw[0][index]; // 更新最大值  
            }  
        }
        //compute sum
        for(int index = 0; index < ctx->output_attrs[0].n_elems; index++)
        {
            output_mems_nchw[0][index] = (float)(src[index] - ctx->output_attrs[0].zp) * ctx->output_attrs[0].scale;
            RK_LOGD("===float num is %d, data is %f==\n", index, output_mems_nchw[0][index]);
            sumTmpData += exp(output_mems_nchw[0][index] - maxValue);
        }
        //softmax
        for (int index = 0; index < ctx->output_attrs[0].n_elems; index++)
        {
            expTmpData[0][index] = exp(output_mems_nchw[0][index]-maxValue) / sumTmpData;
            RK_LOGD("==softmax num %d,data is %8.6f", index, expTmpData[0][index]);
        }
    }

    // Get top
    uint32_t topNum = 3;
    uint32_t MaxClass[topNum];
    float fMaxProb[topNum];

    uint32_t sz = ctx->output_attrs[0].n_elems;
    uint32_t top_count = sz > topNum ? topNum : sz;

    float *buffer = (float *)expTmpData[0]; // 送入softmax的数据，进行排序
    rknn_GetTopN(buffer, fMaxProb, MaxClass, sz, top_count);

    RK_LOGD("---- Top%d ----\n", top_count);
    for (int j = 0; j < top_count; j++)
    {
        RK_LOGD("%8.6f - %d\n", fMaxProb[j], MaxClass[j]);
    }
    MODEL_RESULT_S rknn_result;
    rknn_result.label = MaxClass[0];
    rknn_result.prob = fMaxProb[0];
    return rknn_result;
}

MODEL_RESULT_S vsr_rknn_toolkit_result_float32(VSR_TOOLKIT_MODEL_CTX_S *ctx)
{
    RK_LOGD("output origin tensors:\n");
    float sumTmpData = 0;

    float *output_mems_nchw[ctx->io_num.n_output]; // 原始输出
    int size = ctx->output_attrs[0].size_with_stride * sizeof(float);
    output_mems_nchw[0] = (float *)malloc(size); // 赋予内存大小

    float *expTmpData[ctx->io_num.n_output]; // 进行softmax之后的数据
    expTmpData[0] = (float *)malloc(size);

    if (ctx->output_attrs[0].fmt == RKNN_TENSOR_NC1HWC2)
    {
        RK_LOGD("==RKNN_TENSOR_NC1HWC2==\n");
        int channel = ctx->output_attrs[0].dims[1];
        int h = ctx->output_attrs[0].n_dims > 2 ? ctx->output_attrs[0].dims[2] : 1;
        int w = ctx->output_attrs[0].n_dims > 3 ? ctx->output_attrs[0].dims[3] : 1;
        int zp = ctx->output_attrs[0].zp;
        float scale = ctx->output_attrs[0].scale;
        NC1HWC2_int8_to_NCHW_float((int8_t *)ctx->output_mems[0]->virt_addr, (float *)output_mems_nchw[0], (int *)ctx->output_attrs[0].dims,
                                   channel, h, w, zp, scale);
    }
    else
    {
        RK_LOGD("==RKNN_TENSOR_NCHW==\n");
        float *src = (float *)ctx->output_mems[0]->virt_addr;

        for (int index = 0; index < ctx->output_attrs[0].n_elems; index++)
        {
            // output_mems_nchw[0][index] = (float)(src[index] - ctx->output_attrs[0].zp) * ctx->output_attrs[0].scale;
            RK_LOGD("===float num is %d, data is %f==\n", src[index]);
            sumTmpData += exp(src[index]);
        }

        for (int index = 0; index < ctx->output_attrs[0].n_elems; index++)
        {
            expTmpData[0][index] = exp(src[index]) / sumTmpData;
            RK_LOGD("==softmax num %d,data is %8.6f", index, expTmpData[0][index]);
        }
    }

    // Get top
    uint32_t topNum = 3;
    uint32_t MaxClass[topNum];
    float fMaxProb[topNum];

    uint32_t sz = ctx->output_attrs[0].n_elems;
    uint32_t top_count = sz > topNum ? topNum : sz;

    float *buffer = (float *)expTmpData[0]; // 送入softmax的数据，进行排序
    rknn_GetTopN(buffer, fMaxProb, MaxClass, sz, top_count);

    RK_LOGD("---- Top%d ----\n", top_count);
    for (int j = 0; j < top_count; j++)
    {
        RK_LOGD("%8.6f - %d\n", fMaxProb[j], MaxClass[j]);
    }
    MODEL_RESULT_S rknn_result;
    rknn_result.label = MaxClass[0];
    rknn_result.prob = fMaxProb[0];
    return rknn_result;
}

//2024.12.05 add
void softmax_stab(const int8_t* input, float* output, int dataSize, const int zp, const float scale)
{

    // 1. 找到输入向量的最大值，用于数值稳定化  
    static float maxVal = (input[0] - zp) * scale;  
    for (int i = 1; i < dataSize; ++i) { 
        output[i] = (input[i] - zp) * scale;
        if (output[i] > maxVal) {  
            maxVal = output[i];  
        }  
    }

    // 2. 计算每个元素的 e^(x_i - maxVal) 并累加分母  
    static float sumExp = 0.0f;  
    for (int i = 0; i < dataSize; ++i) {  
        output[i] = std::exp(output[i] - maxVal); // 稳定化处理  
        sumExp += output[i];  
    } 

    // 3. 归一化每个元素  
    for (int i = 0; i < dataSize; ++i) {  
        output[i] /= sumExp;  
    }  
}