
#include "taiic_rknn.h"
// #include "../librknn/rknn_api.h"
#include "stft.h"
#include <float.h>
#include <cmath>
#include <algorithm>

// 获取sdk及驱动版本
void get_sdk_driver_version(rknn_context ctx)
{
    // Get sdk and driver version
    rknn_sdk_version sdk_ver;
    if (rknn_query(ctx, RKNN_QUERY_SDK_VERSION, &sdk_ver, sizeof(sdk_ver)) != RKNN_SUCC)
    {
        RK_LOGE("get_sdk_driver_version rknn_query fail! ret=%d\n", -1);
    }
    RK_LOGD("rknn_api/rknnrt version: %s, driver version: %s\n", sdk_ver.api_version, sdk_ver.drv_version);
}

// 打印输入输出属性列表
void dump_tensor_attr(rknn_tensor_attr *attr)
{
    char dims[128] = {0};
    for (int i = 0; i < attr->n_dims; ++i)
    {
        int idx = strlen(dims);
        sprintf(&dims[idx], "%d%s", attr->dims[i], (i == attr->n_dims - 1) ? "" : ", ");
    }
    printf("  index=%d, name=%s, n_dims=%d, dims=[%s], n_elems=%d, size=%d, fmt=%s, type=%s, qnt_type=%s, "
            "zp=%d, scale=%f\n",
            attr->index, attr->name, attr->n_dims, dims, attr->n_elems, attr->size, get_format_string(attr->fmt),
            get_type_string(attr->type), get_qnt_type_string(attr->qnt_type), attr->zp, attr->scale);
}

int rknn_toolkit_config_init(TOOLKIT_MODEL_CTX_S *ctx)
{

    // ctx->modelPath = "/oem/usr/model/lip.rknn";
    ctx->modelPath = RKNN_MODEL_PATH; //"/oem/usr/model/lip.rknn"
    // ctx->modelPath = "/data/lip.rknn";
    rknn_init(&ctx->context, ctx->modelPath, 0, 0, NULL);
    sleep(1);
    get_sdk_driver_version(ctx->context);
    // Get Model Input Output Info
    if (rknn_query(ctx->context, RKNN_QUERY_IN_OUT_NUM, &ctx->io_num, sizeof(ctx->io_num)) != RKNN_SUCC)
    {
        RK_LOGE("get model io info rknn_query fail! ret=%d\n", 0);
        return RK_FAILURE;
    }
    RK_LOGD("=====model input num: %d, output num: %d======\n", ctx->io_num.n_input, ctx->io_num.n_output);

    // input tensor attr
    ctx->input_attrs[0].index = 0;
    // query info
    if (rknn_query(ctx->context, RKNN_QUERY_INPUT_ATTR, &(ctx->input_attrs[0]), sizeof(rknn_tensor_attr)) < 0)
    {
        RK_LOGE("rknn_init error! ret=%d\n", -1);
        return RK_FAILURE;
    }
    dump_tensor_attr(&ctx->input_attrs[0]);

    // output tensor attr
    ctx->output_attrs[0].index = 0;
    // query info
    if (rknn_query(ctx->context, RKNN_QUERY_OUTPUT_ATTR, &(ctx->output_attrs[0]), sizeof(rknn_tensor_attr)) != RKNN_SUCC)
    {
        RK_LOGE("output tensor attr rknn_query fail! ret=%d\n", -1);
        return RK_FAILURE;
    }
    dump_tensor_attr(&ctx->output_attrs[0]);

    // Get custom string
    rknn_custom_string custom_string;
    if (rknn_query(ctx->context, RKNN_QUERY_CUSTOM_STRING, &custom_string, sizeof(custom_string)) != RKNN_SUCC)
    {
        RK_LOGE("get custom rknn_query fail! ret=%d\n", -1);
        return -1;
    }
    RK_LOGD("custom string: %s\n", custom_string.string);

    // set input tensor type
    ctx->input_type = RKNN_TENSOR_UINT8;
    ctx->input_layout = RKNN_TENSOR_NHWC;
    // Create input tensor memory
    ctx->input_attrs[0].type = ctx->input_type;
    ctx->input_attrs[0].fmt = ctx->input_layout;
    ctx->input_mems[0] = rknn_create_mem(ctx->context, ctx->input_attrs[0].size_with_stride); // size

    // Create output tensor memory
    ctx->output_mems[0] = rknn_create_mem(ctx->context, ctx->output_attrs[0].size_with_stride);
    //  Set output tensor memory
    ctx->output_attrs[0].type = RKNN_TENSOR_FLOAT32;
    if (rknn_set_io_mem(ctx->context, ctx->output_mems[0], &ctx->output_attrs[0]) < 0)
    {
        RK_LOGE("rknn_set_io_mem fail! ret=%d\n", -1);
        return RK_FAILURE;
    }
    return RK_SUCCESS;
}

int rknn_GetTopN(float *pfProb, float *pfMaxProb, uint32_t *pMaxClass, uint32_t outputCount, uint32_t topNum)
{
    uint32_t i, j;
    uint32_t top_count = outputCount > topNum ? topNum : outputCount;

    for (i = 0; i < topNum; ++i)
    {
        pfMaxProb[i] = -FLT_MAX;
        pMaxClass[i] = -1;
    }

    for (j = 0; j < top_count; j++)
    {
        for (i = 0; i < outputCount; i++)
        {
            if ((i == *(pMaxClass + 0)) || (i == *(pMaxClass + 1)) || (i == *(pMaxClass + 2)) || (i == *(pMaxClass + 3)) ||
                (i == *(pMaxClass + 4)))
            {
                continue;
            }

            float prob = pfProb[i];
            if (prob > *(pfMaxProb + j))
            {
                *(pfMaxProb + j) = prob;
                *(pMaxClass + j) = i;
            }
        }
    }

    return 1;
}
int NC1HWC2_int8_to_NCHW_float(const int8_t *src, float *dst, int *dims, int channel, int h, int w, int zp, float scale)
{
    int batch = dims[0];
    int C1 = dims[1];
    int C2 = dims[4];
    int hw_src = dims[2] * dims[3];
    int hw_dst = h * w;
    for (int i = 0; i < batch; i++)
    {
        src = src + i * C1 * hw_src * C2;
        dst = dst + i * channel * hw_dst;
        for (int c = 0; c < channel; ++c)
        {
            int plane = c / C2;
            const int8_t *src_c = plane * hw_src * C2 + src;
            int offset = c % C2;
            for (int cur_h = 0; cur_h < h; ++cur_h)
                for (int cur_w = 0; cur_w < w; ++cur_w)
                {
                    int cur_hw = cur_h * w + cur_w;
                    dst[c * hw_dst + cur_h * w + cur_w] = (src_c[C2 * cur_hw + offset] - zp) * scale; // int8-->float
                }
        }
    }

    return 0;
}

int NC1HWC2_int8_to_NCHW_int8(const int8_t *src, int8_t *dst, int *dims, int channel, int h, int w)
{
    int batch = dims[0];
    int C1 = dims[1];
    int C2 = dims[4];
    int hw_src = dims[2] * dims[3];
    int hw_dst = h * w;
    for (int i = 0; i < batch; i++)
    {
        src = src + i * C1 * hw_src * C2;
        dst = dst + i * channel * hw_dst;
        for (int c = 0; c < channel; ++c)
        {
            int plane = c / C2;
            const int8_t *src_c = plane * hw_src * C2 + src;
            int offset = c % C2;
            for (int cur_h = 0; cur_h < h; ++cur_h)
                for (int cur_w = 0; cur_w < w; ++cur_w)
                {
                    int cur_hw = cur_h * w + cur_w;
                    dst[c * hw_dst + cur_h * w + cur_w] = src_c[C2 * cur_hw + offset];
                }
        }
    }

    return 0;
}

DCCRN_RESULT_S dccrn_rknn_toolkit_result(TOOLKIT_MODEL_CTX_S *ctx)
{
    RK_LOGI("output origin tensors:\n");

    ctx->orig_output_attrs[0].index = 0;
    // query info

    if (rknn_query(ctx->context, RKNN_QUERY_OUTPUT_ATTR, &(ctx->orig_output_attrs[0]), sizeof(rknn_tensor_attr)) != RKNN_SUCC)
    {
        RK_LOGE("rknn_query fail! ret=%d\n", -1);
    }
    dump_tensor_attr(&ctx->orig_output_attrs[0]);

    float *output_mems_nchw[ctx->io_num.n_output];
    int size = ctx->orig_output_attrs[0].size_with_stride * sizeof(float);
    output_mems_nchw[0] = (float *)malloc(size);

    if (ctx->output_attrs[0].fmt == RKNN_TENSOR_NC1HWC2)
    {
        RK_LOGI("===    nc1hwc2   ====\n");
        int channel = ctx->orig_output_attrs[0].dims[1];
        int h = ctx->orig_output_attrs[0].n_dims > 2 ? ctx->orig_output_attrs[0].dims[2] : 1;
        int w = ctx->orig_output_attrs[0].n_dims > 3 ? ctx->orig_output_attrs[0].dims[3] : 1;
        int zp = ctx->output_attrs[0].zp;
        float scale = ctx->output_attrs[0].scale;
        NC1HWC2_int8_to_NCHW_float((int8_t *)ctx->output_mems[0]->virt_addr, (float *)output_mems_nchw[0], (int *)ctx->output_attrs[0].dims,
                                   channel, h, w, zp, scale);
    }
    else
    {
        RK_LOGI("===    nchw   ====\n");
        int8_t *src = (int8_t *)ctx->output_mems[0]->virt_addr;
        float *dst = output_mems_nchw[0];
        for (int index = 0; index < ctx->output_attrs[0].n_elems; index++)
        {
            dst[index] = (src[index] - ctx->output_attrs[0].zp) * ctx->output_attrs[0].scale;
        }
    }
    uint32_t sz = ctx->orig_output_attrs[0].n_elems;
    float *buffer = (float *)output_mems_nchw[0];

    DCCRN_RESULT_S rknn_result;
    rknn_result.result = buffer;
    rknn_result.sz = sz;
    return rknn_result;
}

MODEL_RESULT_S rknn_toolkit_result(TOOLKIT_MODEL_CTX_S *ctx)
{
    RK_LOGI("output origin tensors:\n");

    ctx->orig_output_attrs[0].index = 0;
    float sumTmpData = 0;
    // query info

    if (rknn_query(ctx->context, RKNN_QUERY_OUTPUT_ATTR, &(ctx->orig_output_attrs[0]), sizeof(rknn_tensor_attr)) != RKNN_SUCC)
    {
        RK_LOGE("rknn_query fail! ret=%d\n", -1);
    }
    dump_tensor_attr(&ctx->orig_output_attrs[0]);

    float *output_mems_nchw[ctx->io_num.n_output]; // 原始输出
    int size = ctx->orig_output_attrs[0].size_with_stride * sizeof(float);
    output_mems_nchw[0] = (float *)malloc(size); // 赋予内存大小

    float *expTmpData[ctx->io_num.n_output]; // 进行softmax之后的数据
    expTmpData[0] = (float *)malloc(size);

    if (ctx->output_attrs[0].fmt == RKNN_TENSOR_NC1HWC2)
    {
        RK_LOGI("==RKNN_TENSOR_NC1HWC2==\n");
        int channel = ctx->orig_output_attrs[0].dims[1];
        int h = ctx->orig_output_attrs[0].n_dims > 2 ? ctx->orig_output_attrs[0].dims[2] : 1;
        int w = ctx->orig_output_attrs[0].n_dims > 3 ? ctx->orig_output_attrs[0].dims[3] : 1;
        int zp = ctx->output_attrs[0].zp;
        float scale = ctx->output_attrs[0].scale;
        NC1HWC2_int8_to_NCHW_float((int8_t *)ctx->output_mems[0]->virt_addr, (float *)output_mems_nchw[0], (int *)ctx->output_attrs[0].dims,
                                   channel, h, w, zp, scale);
    }
    else
    {
        RK_LOGI("==RKNN_TENSOR_NCHW==\n");
        float *src = (float *)ctx->output_mems[0]->virt_addr;
        for (int index = 0; index < ctx->output_attrs[0].n_elems; index++)
        {
            sumTmpData += exp(src[index]);
        }

        for (int index = 0; index < ctx->output_attrs[0].n_elems; index++)
        {
            expTmpData[0][index] = exp(src[index]) / sumTmpData;
            // RK_LOGI("==softmax num %d,data is %8.6f",index,  expTmpData[0][index]);
        }
    }

    // Get top
    uint32_t topNum = 3;
    uint32_t MaxClass[topNum];
    float fMaxProb[topNum];

    uint32_t sz = ctx->orig_output_attrs[0].n_elems;
    uint32_t top_count = sz > topNum ? topNum : sz;
    // float *buffer = (float *)output_mems_nchw[0];
    float *buffer = (float *)expTmpData[0]; // 送入softmax的数据，进行排序
    rknn_GetTopN(buffer, fMaxProb, MaxClass, sz, top_count);

    RK_LOGE("---- Top%d ----\n", top_count);
    for (int j = 0; j < top_count; j++)
    {
        RK_LOGE("%8.6f - %d\n", fMaxProb[j], MaxClass[j]);
    }
    MODEL_RESULT_S rknn_result;
    rknn_result.label = MaxClass[0];
    rknn_result.prob = fMaxProb[0];
    return rknn_result;
}

/*static RK_S32 unit_test_mpi_ao(TEST_AO_CTX_S *ctx, RK_U32 pcm_label)
{
    // RK_LOGI("======into ao ========\n");
    TEST_AO_CTX_S params;
    pthread_t tidSend;
    char tmpPath[256];
    if (taiic_open_device_ao(ctx) != RK_SUCCESS)
    {
        return RK_FAILURE;
    }

    memcpy(&(params), ctx, sizeof(TEST_AO_CTX_S));
    params.s32ChnIndex = 0;

    // snprintf(tmpPath, sizeof(tmpPath), "/data/pcm/%d.pcm",pcm_label);
    snprintf(tmpPath, sizeof(tmpPath), "/oem/usr/pcm/long.pcm");
    params.srcFilePath = tmpPath;
    RK_RK_LOGI("======params path is %s============\n", params.srcFilePath);

    taiic_set_channel_params_ao(&params);
    taiic_init_mpi_ao(&params);
    pthread_create(&tidSend, RK_NULL, sendDataThread, reinterpret_cast<void *>(&params));

    pthread_join(tidSend, RK_NULL);
    taiic_deinit_mpi_ao(params.s32DevId, params.s32ChnIndex);

    taiic_close_device_ao(ctx);

    return RK_SUCCESS;
}*/
