#include "rknn_api.h"
#include "taiic_avsr.h"
#include "rk_defines.h"
#include "rk_debug.h"

int avsr_rknn_toolkit_config_init(AVSR_TOOLKIT_MODEL_CTX_S *ctx)
{
    
    // Load RKNN Model
    int ret = rknn_init(&ctx->context, ctx->modelPath, 0, 0, NULL);
    if (ret < 0) {
        RK_LOGE("rknn_init fail! ret=%d\n", ret);
        return -1;
    }
    // Get Model Input Output Info
    ret = rknn_query(ctx->context, RKNN_QUERY_IN_OUT_NUM, &ctx->io_num, sizeof(ctx->io_num));
    if (ret != RKNN_SUCC) {
        RK_LOGE("rknn_query fail! ret=%d\n", ret);
        return -1;
    }
    RK_LOGI("model input num: %d, output num: %d\n", ctx->io_num.n_input, ctx->io_num.n_output);
    
    RK_LOGI("input tensors:\n");
    memset(ctx->input_attrs, 0, ctx->io_num.n_input * sizeof(rknn_tensor_attr));
    for (uint32_t i = 0; i <ctx-> io_num.n_input; i++) 
    {
        ctx->input_attrs[i].index = i;
        // query info
        ret = rknn_query(ctx->context, RKNN_QUERY_INPUT_ATTR, &(ctx->input_attrs[i]), sizeof(rknn_tensor_attr));
        if (ret < 0) {
            RK_LOGE("rknn_init error! ret=%d\n", ret);
        return -1;
    }
    dump_tensor_attr(&ctx->input_attrs[i]);
    }
    

    RK_LOGI("output tensors:\n");
    memset(ctx->output_attrs, 0, ctx->io_num.n_output * sizeof(rknn_tensor_attr));
    for (uint32_t i = 0; i < ctx->io_num.n_output; i++) {
        ctx->output_attrs[i].index = i;
        // query info
        ret = rknn_query(ctx->context, RKNN_QUERY_NATIVE_OUTPUT_ATTR, &(ctx->output_attrs[i]), sizeof(rknn_tensor_attr));
        if (ret != RKNN_SUCC) {
        RK_LOGE("rknn_query fail! ret=%d\n", ret);
        return -1;
        }
        dump_tensor_attr(&ctx->output_attrs[i]);
    }
    return 1;
}

int avsr_rknn_toolkit_io_init(AVSR_TOOLKIT_MODEL_CTX_S *ctx)
{
     // set input tensor type
    rknn_tensor_type input_type = RKNN_TENSOR_UINT8;
    rknn_tensor_format input_layout = RKNN_TENSOR_NHWC;
    // Create input tensor memory
    for(uint32_t i = 0; i < ctx->io_num.n_input; i++)
    {
      ctx->input_attrs[i].type = input_type; //attrs set
      ctx->input_attrs[i].fmt = input_layout;
      ctx->input_mems[i] = rknn_create_mem(ctx->context, ctx->input_attrs[i].size_with_stride);// size
    }
     // Creat output tensor memory
    ctx->output_mems[0] = rknn_create_mem(ctx->context, ctx->output_attrs[0].size_with_stride);
    //  Set output tensor memory
    if(rknn_set_io_mem(ctx->context, ctx->output_mems[0], &ctx->output_attrs[0]) < 0)
    {
        RK_LOGE("rknn_set_io_mem fail! ret=%d\n", -1);
        return -1;
    }
    return 1;
}

int avsr_rknn_toolkit_data_refresh(AVSR_TOOLKIT_MODEL_CTX_S *ctx,  unsigned char* input_data[2])
{
    // Copy input data to input tensor memory
    for(uint32_t i = 0; i < ctx->io_num.n_input; i++)
    {
    //   printf("ctx->io_num.n_input=%d\n", ctx->io_num.n_input);
      int width = ctx->input_attrs[i].dims[2];
      int stride = ctx->input_attrs[i].w_stride;
      RK_LOGD("====width is %d, stride is %d", width, stride);
      RK_U8 *input_ptr = input_data[i];
      if(width == stride)
      {
        RK_LOGD("===== == ======\n");
        memcpy(ctx->input_mems[i]->virt_addr, input_ptr, width * ctx->input_attrs[i].dims[1] * ctx->input_attrs[i].dims[3]);
        RK_LOGD("==data length is %d===\n", (width *ctx-> input_attrs[i].dims[1] * ctx->input_attrs[i].dims[3]));
      }else
      {
        RK_LOGD("===== != ========\n");
        int height = ctx->input_attrs[i].dims[1];
        int channel = ctx->input_attrs[i].dims[3];
        RK_U8 *src_ptr = input_ptr;
        RK_U8 *dst_ptr = (RK_U8 *)ctx->input_mems[i]->virt_addr;
        // width-channel elements
        int src_wc_elems = width * channel;
        int dst_wc_elems = stride * channel;
        for (int h = 0; h < height; ++h)
        {
            memcpy(dst_ptr, src_ptr, src_wc_elems);
            src_ptr += src_wc_elems;
            dst_ptr += dst_wc_elems;
        }
      }

      // Set input tensor memory
      if(rknn_set_io_mem(ctx->context, ctx->input_mems[i],  &ctx->input_attrs[i]) < 0 )
      {
        RK_LOGE("rknn_set_io_mem fail! ");
        return -1;
      }
    }
    return 1;
}

MODEL_RESULT_S avsr_rknn_toolkit_result_int8(AVSR_TOOLKIT_MODEL_CTX_S *ctx)
{
    RK_LOGD("output origin tensors:\n");
    float sumTmpData = 0;

    float *output_mems_nchw[ctx->io_num.n_output]; // 原始输出
    int size = ctx->output_attrs[0].size_with_stride * sizeof(float);
    output_mems_nchw[0] = (float *)malloc(size); // 赋予内存大小

    float *expTmpData[ctx->io_num.n_output]; // 进行softmax之后的数据
    expTmpData[0] = (float *)malloc(size);

    if (ctx->output_attrs[0].fmt == RKNN_TENSOR_NC1HWC2)
    {
        RK_LOGD("==RKNN_TENSOR_NC1HWC2==\n");
        int channel = ctx->output_attrs[0].dims[1];
        int h = ctx->output_attrs[0].n_dims > 2 ? ctx->output_attrs[0].dims[2] : 1;
        int w = ctx->output_attrs[0].n_dims > 3 ? ctx->output_attrs[0].dims[3] : 1;
        int zp = ctx->output_attrs[0].zp;
        float scale = ctx->output_attrs[0].scale;
        NC1HWC2_int8_to_NCHW_float((int8_t *)ctx->output_mems[0]->virt_addr, (float *)output_mems_nchw[0], (int *)ctx->output_attrs[0].dims,
                                   channel, h, w, zp, scale);
    }
    else
    {
        RK_LOGD("==RKNN_TENSOR_NCHW==\n");
        int8_t *src = (int8_t *)ctx->output_mems[0]->virt_addr;

        for (int index = 0; index < ctx->output_attrs[0].n_elems; index++)
        {
            output_mems_nchw[0][index] = (float)(src[index] - ctx->output_attrs[0].zp) * ctx->output_attrs[0].scale;
            RK_LOGD("===float num is %d, data is %f==\n", index, output_mems_nchw[0][index]);
            sumTmpData += exp(output_mems_nchw[0][index]);
        }

        for (int index = 0; index < ctx->output_attrs[0].n_elems; index++)
        {
            expTmpData[0][index] = exp(output_mems_nchw[0][index]) / sumTmpData;
            RK_LOGI("==softmax num %d,data is %8.6f", index, expTmpData[0][index]);
        }
    }

    // Get top
    uint32_t topNum = 3;
    uint32_t MaxClass[topNum];
    float fMaxProb[topNum];

    uint32_t sz = ctx->output_attrs[0].n_elems;
    uint32_t top_count = sz > topNum ? topNum : sz;

    float *buffer = (float *)expTmpData[0]; // 送入softmax的数据，进行排序
    rknn_GetTopN(buffer, fMaxProb, MaxClass, sz, top_count);

    RK_LOGD("---- Top%d ----\n", top_count);
    for (int j = 0; j < top_count; j++)
    {
        RK_LOGD("%8.6f - %d\n", fMaxProb[j], MaxClass[j]);
        // printf("%8.6f - %d\n", fMaxProb[j], MaxClass[j]);
    }
    MODEL_RESULT_S rknn_result;
    rknn_result.label = MaxClass[0];
    rknn_result.prob = fMaxProb[0];
    return rknn_result;
}

MODEL_RESULT_S avsr_rknn_toolkit_result_int8_opt(AVSR_TOOLKIT_MODEL_CTX_S *ctx)
{
    RK_LOGD("output origin tensors:\n");
    float sumTmpData = 0;

    float *output_mems_nchw[ctx->io_num.n_output]; // 原始输出
    int size = ctx->output_attrs[0].size_with_stride * sizeof(float);
    output_mems_nchw[0] = (float *)malloc(size); // 赋予内存大小

    float *expTmpData[ctx->io_num.n_output]; // 进行softmax之后的数据
    expTmpData[0] = (float *)malloc(size);

    // 初始化最大值为最小浮点数  
    float maxValue = std::numeric_limits<float>::lowest(); 

    if (ctx->output_attrs[0].fmt == RKNN_TENSOR_NC1HWC2)
    {
        RK_LOGD("==RKNN_TENSOR_NC1HWC2==\n");
        int channel = ctx->output_attrs[0].dims[1];
        int h = ctx->output_attrs[0].n_dims > 2 ? ctx->output_attrs[0].dims[2] : 1;
        int w = ctx->output_attrs[0].n_dims > 3 ? ctx->output_attrs[0].dims[3] : 1;
        int zp = ctx->output_attrs[0].zp;
        float scale = ctx->output_attrs[0].scale;
        NC1HWC2_int8_to_NCHW_float((int8_t *)ctx->output_mems[0]->virt_addr, (float *)output_mems_nchw[0], (int *)ctx->output_attrs[0].dims,
                                   channel, h, w, zp, scale);
    }
    else
    {
        RK_LOGD("==RKNN_TENSOR_NCHW==\n");
        int8_t *src = (int8_t *)ctx->output_mems[0]->virt_addr;

         //compute max value
        for(int index = 0; index < ctx->output_attrs[0].n_elems; index++)
        {
            output_mems_nchw[0][index] = (float)(src[index] - ctx->output_attrs[0].zp) * ctx->output_attrs[0].scale;
            if (output_mems_nchw[0][index] > maxValue) 
            {  
                maxValue = output_mems_nchw[0][index]; // 更新最大值  
            }  
        }
        //compute sum
        for (int index = 0; index < ctx->output_attrs[0].n_elems; index++)
        {
            output_mems_nchw[0][index] = (float)(src[index] - ctx->output_attrs[0].zp) * ctx->output_attrs[0].scale;
            RK_LOGD("===float num is %d, data is %f==\n", index, output_mems_nchw[0][index]);
            sumTmpData += exp(output_mems_nchw[0][index]- maxValue);
        }
        //softmax
        for (int index = 0; index < ctx->output_attrs[0].n_elems; index++)
        {
            expTmpData[0][index] = exp(output_mems_nchw[0][index]-maxValue) / sumTmpData;
            RK_LOGI("==softmax num %d,data is %8.6f", index, expTmpData[0][index]);
        }
    }

    // Get top
    uint32_t topNum = 3;
    uint32_t MaxClass[topNum];
    float fMaxProb[topNum];

    uint32_t sz = ctx->output_attrs[0].n_elems;
    uint32_t top_count = sz > topNum ? topNum : sz;

    float *buffer = (float *)expTmpData[0]; // 送入softmax的数据，进行排序
    rknn_GetTopN(buffer, fMaxProb, MaxClass, sz, top_count);

    RK_LOGD("---- Top%d ----\n", top_count);
    for (int j = 0; j < top_count; j++)
    {
        RK_LOGD("%8.6f - %d\n", fMaxProb[j], MaxClass[j]);
    }
    MODEL_RESULT_S rknn_result;
    rknn_result.label = MaxClass[0];
    rknn_result.prob = fMaxProb[0];
    return rknn_result;
}
