// Copyright (c) 2021 by Rockchip Electronics Co., Ltd. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

/*-------------------------------------------
                Includes
-------------------------------------------*/
#include "rknn_api.h"

#include <float.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <vector>

#include "rk_debug.h"
#include "rk_defines.h"
#include <algorithm>
#include <cmath>

typedef struct rkRESULTS_LABEL
{
    /* data */
    RK_U32 label;
    float prob;

} MODEL_RESULT_S;
/*-------------------------------------------
                  Functions
-------------------------------------------*/
static inline int64_t getCurrentTimeUs()
{
    struct timeval tv;
    gettimeofday(&tv, NULL);
    return tv.tv_sec * 1000000 + tv.tv_usec;
}

static void dump_tensor_attr(rknn_tensor_attr *attr)
{
    char dims[128] = {0};
    for (int i = 0; i < attr->n_dims; ++i)
    {
        int idx = strlen(dims);
        sprintf(&dims[idx], "%d%s", attr->dims[i], (i == attr->n_dims - 1) ? "" : ", ");
    }
    printf("  index=%d, name=%s, n_dims=%d, dims=[%s], n_elems=%d, size=%d, fmt=%s, type=%s, qnt_type=%s, "
           "zp=%d, scale=%f\n",
           attr->index, attr->name, attr->n_dims, dims, attr->n_elems, attr->size, get_format_string(attr->fmt),
           get_type_string(attr->type), get_qnt_type_string(attr->qnt_type), attr->zp, attr->scale);
}

static int rknn_GetTopN(float *pfProb, float *pfMaxProb, uint32_t *pMaxClass, uint32_t outputCount, uint32_t topNum)
{
    uint32_t i, j;
    uint32_t top_count = outputCount > topNum ? topNum : outputCount;

    for (i = 0; i < topNum; ++i)
    {
        pfMaxProb[i] = -FLT_MAX;
        pMaxClass[i] = -1;
    }

    for (j = 0; j < top_count; j++)
    {
        for (i = 0; i < outputCount; i++)
        {
            if ((i == *(pMaxClass + 0)) || (i == *(pMaxClass + 1)) || (i == *(pMaxClass + 2)) || (i == *(pMaxClass + 3)) ||
                (i == *(pMaxClass + 4)))
            {
                continue;
            }

            float prob = pfProb[i];
            if (prob > *(pfMaxProb + j))
            {
                *(pfMaxProb + j) = prob;
                *(pMaxClass + j) = i;
            }
        }
    }

    return 1;
}

static int NC1HWC2_int8_to_NCHW_float(const int8_t *src, float *dst, int *dims, int channel, int h, int w, int zp, float scale)
{
    int batch = dims[0];
    int C1 = dims[1];
    int C2 = dims[4];
    int hw_src = dims[2] * dims[3];
    int hw_dst = h * w;
    for (int i = 0; i < batch; i++)
    {
        src = src + i * C1 * hw_src * C2;
        dst = dst + i * channel * hw_dst;
        for (int c = 0; c < channel; ++c)
        {
            int plane = c / C2;
            const int8_t *src_c = plane * hw_src * C2 + src;
            int offset = c % C2;
            for (int cur_h = 0; cur_h < h; ++cur_h)
                for (int cur_w = 0; cur_w < w; ++cur_w)
                {
                    int cur_hw = cur_h * w + cur_w;
                    dst[c * hw_dst + cur_h * w + cur_w] = (src_c[C2 * cur_hw + offset] - zp) * scale; // int8-->float
                }
        }
    }

    return 0;
}
/*-------------------------------------------
                  Main Functions
-------------------------------------------*/
int main(int argc, char *argv[])
{
    if (argc < 3)
    {
        printf("Usage:%s model_path input_path [loop_count]\n", argv[0]);
        return -1;
    }

    char *model_path = argv[1];
    char *input_path = argv[2];

    int loop_count = 1;
    if (argc > 3)
    {
        loop_count = atoi(argv[3]);
    }

    rknn_context ctx = 0;

    // Load RKNN Model
    // Init rknn from model path
    int ret = rknn_init(&ctx, model_path, 0, 0, NULL); // 初始化上下文
    printf("===init model===\n");
    if (ret < 0)
    {
        printf("rknn_init fail! ret=%d\n", ret);
        return -1;
    }

    // Get sdk and driver version
    rknn_sdk_version sdk_ver; // 查询sdk版本号
    ret = rknn_query(ctx, RKNN_QUERY_SDK_VERSION, &sdk_ver, sizeof(sdk_ver));
    if (ret != RKNN_SUCC)
    {
        printf("rknn_query fail! ret=%d\n", ret);
        return -1;
    }
    printf("rknn_api/rknnrt version: %s, driver version: %s\n", sdk_ver.api_version, sdk_ver.drv_version);

    // Get Model Input Output Info
    rknn_input_output_num io_num; // 查询输入输出属性
    ret = rknn_query(ctx, RKNN_QUERY_IN_OUT_NUM, &io_num, sizeof(io_num));
    if (ret != RKNN_SUCC)
    {
        printf("rknn_query fail! ret=%d\n", ret);
        return -1;
    }
    printf("model input num: %d, output num: %d\n", io_num.n_input, io_num.n_output);

    printf("input tensors:\n");
    // 输入tensor的属性
    rknn_tensor_attr input_attrs[io_num.n_input];
    memset(input_attrs, 0, io_num.n_input * sizeof(rknn_tensor_attr));
    for (uint32_t i = 0; i < io_num.n_input; i++)
    {
        input_attrs[i].index = i;
        // query info
        ret = rknn_query(ctx, RKNN_QUERY_INPUT_ATTR, &(input_attrs[i]), sizeof(rknn_tensor_attr));
        if (ret < 0)
        {
            printf("rknn_init error! ret=%d\n", ret);
            return -1;
        }
        dump_tensor_attr(&input_attrs[i]);
    }

    printf("output tensors:\n");
    // 输出tensor的属性
    rknn_tensor_attr output_attrs[io_num.n_output];
    memset(output_attrs, 0, io_num.n_output * sizeof(rknn_tensor_attr));
    for (uint32_t i = 0; i < io_num.n_output; i++)
    {
        output_attrs[i].index = i;
        // query info
        ret = rknn_query(ctx, RKNN_QUERY_NATIVE_OUTPUT_ATTR, &(output_attrs[i]), sizeof(rknn_tensor_attr));
        if (ret != RKNN_SUCC)
        {
            printf("rknn_query fail! ret=%d\n", ret);
            return -1;
        }
        dump_tensor_attr(&output_attrs[i]);
    }

    // Get custom string
    // rknn_custom_string custom_string;
    // ret = rknn_query(ctx, RKNN_QUERY_CUSTOM_STRING, &custom_string, sizeof(custom_string));
    // if (ret != RKNN_SUCC)
    // {
    //   printf("rknn_query fail! ret=%d\n", ret);
    //   return -1;
    // }
    // printf("custom string: %s\n", custom_string.string);

    unsigned char *input_data = NULL;
    rknn_tensor_type input_type = RKNN_TENSOR_UINT8;
    rknn_tensor_format input_layout = RKNN_TENSOR_NHWC;

    // load test file
    input_data = new unsigned char[input_attrs[0].size];

    FILE *fp = fopen(input_path, "rb");
    printf("==load fiel is %s==\n", input_path);
    if (fp == NULL)
    {
        perror("open failed!");
        return -1;
    }
    fread(input_data, input_attrs[0].size, 1, fp); // 读取输入数据
    fclose(fp);

    if (!input_data)
    {
        return -1;
    }

    // Create input tensor memory
    rknn_tensor_mem *input_mems[1];
    // default input type is int8 (normalize and quantize need compute in outside)
    // if set uint8, will fuse normalize and quantize to npu
    input_attrs[0].type = input_type;
    // default fmt is NHWC, npu only support NHWC in zero copy mode
    input_attrs[0].fmt = input_layout;

    input_mems[0] = rknn_create_mem(ctx, input_attrs[0].size_with_stride);

    // Copy input data to input tensor memory
    int width = input_attrs[0].dims[2];
    int stride = input_attrs[0].w_stride;

    printf("===width is %d, stride is %d====\n", width, stride);
    if (width == stride)
    {
        memcpy(input_mems[0]->virt_addr, input_data, width * input_attrs[0].dims[1] * input_attrs[0].dims[3]);
        printf("===input data len is %d===\n");
    }
    else
    {
        int height = input_attrs[0].dims[1];
        int channel = input_attrs[0].dims[3];
        // copy from src to dst with stride
        uint8_t *src_ptr = input_data;
        uint8_t *dst_ptr = (uint8_t *)input_mems[0]->virt_addr;
        // width-channel elements
        int src_wc_elems = width * channel;
        int dst_wc_elems = stride * channel;
        for (int h = 0; h < height; ++h)
        {
            memcpy(dst_ptr, src_ptr, src_wc_elems);
            src_ptr += src_wc_elems;
            dst_ptr += dst_wc_elems;
        }
    }

    // Create output tensor memory
    rknn_tensor_mem *output_mems[io_num.n_output];
    for (uint32_t i = 0; i < io_num.n_output; ++i)
    {
        output_mems[i] = rknn_create_mem(ctx, output_attrs[i].size_with_stride);
    }

    // Set input tensor memory
    ret = rknn_set_io_mem(ctx, input_mems[0], &input_attrs[0]);
    if (ret < 0)
    {
        printf("rknn_set_io_mem fail! ret=%d\n", ret);
        return -1;
    }

    // Set output tensor memory
    for (uint32_t i = 0; i < io_num.n_output; ++i)
    {
        // set output memory and attribute
        ret = rknn_set_io_mem(ctx, output_mems[i], &output_attrs[i]);
        if (ret < 0)
        {
            printf("rknn_set_io_mem fail! ret=%d\n", ret);
            return -1;
        }
    }

    // Run
    printf("Begin perf ...\n");
    for (int i = 0; i < loop_count; ++i)
    {
        int64_t start_us = getCurrentTimeUs();
        printf("===start run===\n");
        ret = rknn_run(ctx, NULL);

        printf("====end run===\n");
        int64_t elapse_us = getCurrentTimeUs() - start_us;
        if (ret < 0)
        {
            printf("rknn run error %d\n", ret);
            return -1;
        }
        printf("%4d: Elapse Time = %.2fms, FPS = %.2f\n", i, elapse_us / 1000.f, 1000.f * 1000.f / elapse_us);
    }

    printf("output origin tensors:\n");

    float sumTmpData = 0;
    float *output_mems_nchw[io_num.n_output]; // 原始输出
    int size = output_attrs[0].size_with_stride * sizeof(float);
    output_mems_nchw[0] = (float *)malloc(size); // 赋予内存大小

    float *expTmpData[io_num.n_output]; // 进行softmax之后的数据
    expTmpData[0] = (float *)malloc(size);

    if (output_attrs[0].fmt == RKNN_TENSOR_NC1HWC2)
    {
        printf("==RKNN_TENSOR_NC1HWC2==\n");
        int channel = output_attrs[0].dims[1];
        int h = output_attrs[0].n_dims > 2 ? output_attrs[0].dims[2] : 1;
        int w = output_attrs[0].n_dims > 3 ? output_attrs[0].dims[3] : 1;
        int zp = output_attrs[0].zp;
        float scale = output_attrs[0].scale;
        NC1HWC2_int8_to_NCHW_float((int8_t *)output_mems[0]->virt_addr, (float *)output_mems_nchw[0], (int *)output_attrs[0].dims,
                                   channel, h, w, zp, scale);
    }
    else
    {
        printf("==RKNN_TENSOR_NCHW==\n");
        int8_t *src = (int8_t *)output_mems[0]->virt_addr;

        for (int index = 0; index < output_attrs[0].n_elems; index++)
        {
            output_mems_nchw[0][index] = (float)(src[index] - output_attrs[0].zp) * output_attrs[0].scale;
            printf("===float num is %d, data is %f==\n", index, output_mems_nchw[0][index]);
            sumTmpData += exp(output_mems_nchw[0][index]);
        }

        for (int index = 0; index < output_attrs[0].n_elems; index++)
        {
            expTmpData[0][index] = exp(output_mems_nchw[0][index]) / sumTmpData;
            printf("==softmax num %d,data is %8.6f==\n", index, expTmpData[0][index]);
        }
    }

    // Get top
    uint32_t topNum = 3;
    uint32_t MaxClass[topNum];
    float fMaxProb[topNum];

    uint32_t sz = output_attrs[0].n_elems;
    uint32_t top_count = sz > topNum ? topNum : sz;

    float *buffer = (float *)expTmpData[0]; // 送入softmax的数据，进行排序
    rknn_GetTopN(buffer, fMaxProb, MaxClass, sz, top_count);

    printf("---- Top%d ----\n", top_count);
    for (int j = 0; j < top_count; j++)
    {
        printf("%8.6f - %d\n", fMaxProb[j], MaxClass[j]);
    }
    MODEL_RESULT_S rknn_result;
    rknn_result.label = MaxClass[0];
    rknn_result.prob = fMaxProb[0];
    // Destroy rknn memory
    rknn_destroy_mem(ctx, input_mems[0]);
    for (uint32_t i = 0; i < io_num.n_output; ++i)
    {
        rknn_destroy_mem(ctx, output_mems[i]);
        // free(output_mems_nchw[i]);
    }

    // destroy
    rknn_destroy(ctx);

    free(input_data);

    return 0;
}
