// Copyright (c) 2021 by Rockchip Electronics Co., Ltd. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

/*-------------------------------------------
                Includes
-------------------------------------------*/

#include <float.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <vector>
#include "taiic_lm.h"

/*-------------------------------------------
                  Functions
-------------------------------------------*/
static inline int64_t getCurrentTimeUs()
{
    struct timeval tv;
    gettimeofday(&tv, NULL);
    return tv.tv_sec * 1000000 + tv.tv_usec;
}

// static void dump_tensor_attr(rknn_tensor_attr *attr)
// {
//     char dims[128] = {0};
//     for (int i = 0; i < attr->n_dims; ++i)
//     {
//         int idx = strlen(dims);
//         sprintf(&dims[idx], "%d%s", attr->dims[i], (i == attr->n_dims - 1) ? "" : ", ");
//     }
//     printf("  index=%d, name=%s, n_dims=%d, dims=[%s], n_elems=%d, size=%d, fmt=%s, type=%s, qnt_type=%s, "
//            "zp=%d, scale=%f\n",
//            attr->index, attr->name, attr->n_dims, dims, attr->n_elems, attr->size, get_format_string(attr->fmt),
//            get_type_string(attr->type), get_qnt_type_string(attr->qnt_type), attr->zp, attr->scale);
// }



// 量化模型的npu输出结果为int8数据类型，后处理要按照int8数据类型处理
// 如下提供了int8排布的NC1HWC2转换成int8的nchw转换代码
// int NC1HWC2_int8_to_NCHW_int8(const int8_t *src, int8_t *dst, int *dims, int channel, int h, int w)
// {
//     int batch = dims[0];
//     int C1 = dims[1];
//     int C2 = dims[4];
//     int hw_src = dims[2] * dims[3];
//     int hw_dst = h * w;
//     for (int i = 0; i < batch; i++)
//     {
//         src = src + i * C1 * hw_src * C2;
//         dst = dst + i * channel * hw_dst;
//         for (int c = 0; c < channel; ++c)
//         {
//             int plane = c / C2;
//             const int8_t *src_c = plane * hw_src * C2 + src;
//             int offset = c % C2;
//             for (int cur_h = 0; cur_h < h; ++cur_h)
//                 for (int cur_w = 0; cur_w < w; ++cur_w)
//                 {
//                     int cur_hw = cur_h * w + cur_w;
//                     dst[c * hw_dst + cur_h * w + cur_w] = src_c[C2 * cur_hw + offset];
//                 }
//         }
//     }

//     return 0;
// }

// 量化模型的npu输出结果为int8数据类型，后处理要按照int8数据类型处理
// 如下提供了int8排布的NC1HWC2转换成float的nchw转换代码
// int NC1HWC2_int8_to_NCHW_float(const int8_t *src, float *dst, int *dims, int channel, int h, int w, int zp, float scale)
// {
//     int batch = dims[0];
//     int C1 = dims[1];
//     int C2 = dims[4];
//     int hw_src = dims[2] * dims[3];
//     int hw_dst = h * w;
//     for (int i = 0; i < batch; i++)
//     {
//         src = src + i * C1 * hw_src * C2;
//         dst = dst + i * channel * hw_dst;
//         for (int c = 0; c < channel; ++c)
//         {
//             int plane = c / C2;
//             const int8_t *src_c = plane * hw_src * C2 + src;
//             int offset = c % C2;
//             for (int cur_h = 0; cur_h < h; ++cur_h)
//                 for (int cur_w = 0; cur_w < w; ++cur_w)
//                 {
//                     int cur_hw = cur_h * w + cur_w;
//                     dst[c * hw_dst + cur_h * w + cur_w] = (src_c[C2 * cur_hw + offset] - zp) * scale; // int8-->float
//                 }
//         }
//     }

//     return 0;
// }

/*-------------------------------------------
                  Main Functions
-------------------------------------------*/
int main(int argc, char *argv[])
{
    if (argc < 3)
    {
        printf("Usage:%s model_path input_path [loop_count]\n", argv[0]);
        return -1;
    }

    char *input_path = argv[2];

    int loop_count = 1;
    if (argc > 3)
    {
        loop_count = atoi(argv[3]);
    }

    LM_TOOLKIT_MODEL_CTX_S *lm_ctx; // 模型推理相关
    lm_ctx = reinterpret_cast<LM_TOOLKIT_MODEL_CTX_S *>(malloc(sizeof(LM_TOOLKIT_MODEL_CTX_S))); // 分配内存空间
    memset(lm_ctx, 0, sizeof(LM_TOOLKIT_MODEL_CTX_S));

    lm_ctx->modelPath = argv[1];

    lm_rknn_toolkit_config_init(lm_ctx);
    lm_rknn_toolkit_io_init(lm_ctx);


    unsigned char *input_data = NULL;



    // load test file
    input_data = new unsigned char[lm_ctx->input_attrs[0].size];

    FILE *fp = fopen(input_path, "rb");
    printf("==load fiel is %s==\n", input_path);
    if (fp == NULL)
    {
        perror("open failed!");
        return -1;
    }
    fread(input_data, lm_ctx->input_attrs[0].size, 1, fp); // 读取输入数据
    fclose(fp);

    if (!input_data)
    {
        return -1;
    }

    lm_rknn_toolkit_data_refresh(lm_ctx, input_data);

 
    // Run
    printf("Begin perf ...\n");
    for (int i = 0; i < loop_count; ++i)
    {
        int64_t start_us = getCurrentTimeUs();
        printf("===start run===\n");
        int ret = rknn_run(lm_ctx->context, NULL);

        printf("====end run===\n");
        int64_t elapse_us = getCurrentTimeUs() - start_us;
        if (ret < 0)
        {
            printf("rknn run error %d\n", ret);
            return -1;
        }
        printf("%4d: Elapse Time = %.2fms, FPS = %.2f\n", i, elapse_us / 1000.f, 1000.f * 1000.f / elapse_us);
    }

    printf("output origin tensors:\n");
    // rknn_tensor_attr orig_output_attrs[lm_ctx->io_num.n_output];
    // memset(orig_output_attrs, 0, lm_ctx->io_num.n_output * sizeof(rknn_tensor_attr));

    // for (uint32_t i = 0; i < lm_ctx->io_num.n_output; i++)
    // {
    //     orig_output_attrs[i].index = i;
    //     // query info
    //     int ret = rknn_query(lm_ctx->context, RKNN_QUERY_OUTPUT_ATTR, &(orig_output_attrs[i]), sizeof(rknn_tensor_attr));
    //     if (ret != RKNN_SUCC)
    //     {
    //         printf("rknn_query fail! ret=%d\n", ret);
    //         return -1;
    //     }
    //     dump_tensor_attr(&orig_output_attrs[i]);
    // }

    // float *output_mems_nchw[lm_ctx->io_num.n_output]; // 原始输出
    // int size = lm_ctx->output_attrs[0].size_with_stride * sizeof(float);
    // output_mems_nchw[0] = (float *)malloc(size); // 赋予内存大小

    // int8_t *src = (int8_t *)lm_ctx->output_mems[0]->virt_addr;
    // for (int index = 0; index < lm_ctx->output_attrs[0].n_elems; index++)
    // {
    //     float tmp = (float)(src[index] - lm_ctx->output_attrs[0].zp) * lm_ctx->output_attrs[0].scale;
    //     output_mems_nchw[0][index] = (tmp + 0.5) * 112;
    //     printf("===float num is %d, data is %d, float data is %f==\n", index, src[index], output_mems_nchw[0][index]);
    // }

    LM_RESULT_S result = lm_toolkit_result(lm_ctx);
    printf("result is %d, %d, %d, %d", result.left_x, result.left_y, result.right_x, result.right_y);


    // Destroy rknn memory
    rknn_destroy_mem(lm_ctx->context, lm_ctx->input_mems[0]);
    for (uint32_t i = 0; i < lm_ctx->io_num.n_output; ++i)
    {
        rknn_destroy_mem(lm_ctx->context, lm_ctx->output_mems[i]);
        // free(output_mems_nchw[i]);
    }

    // destroy
    rknn_destroy(lm_ctx->context);

    free(input_data);

    return 0;
}
