#include "taiic_lm.h"
#include "rknn_api.h"
#include "rk_debug.h"
#include "rk_defines.h"
#include <limits> // 包含对 numeric_limits 的支持

int lm_rknn_toolkit_config_init(LM_TOOLKIT_MODEL_CTX_S *ctx)
{
    // load rknn model
    int ret = rknn_init(&ctx->context, ctx->modelPath, 0, 0, NULL);
    if (ret < 0)
    {
        RK_LOGE("rknn_init fail! ret=%d\n", ret);
        return -1;
    }
    // Get Model Input Output Info
    ret = rknn_query(ctx->context, RKNN_QUERY_IN_OUT_NUM, &ctx->io_num, sizeof(ctx->io_num));
    if (ret != RKNN_SUCC)
    {
        RK_LOGE("rknn_query fail! ret=%d\n", ret);
        return -1;
    }
    RK_LOGI("model input num: %d, output num: %d\n", ctx->io_num.n_input, ctx->io_num.n_output);

    RK_LOGI("input tensors:\n");
    memset(ctx->input_attrs, 0, ctx->io_num.n_input * sizeof(rknn_tensor_attr));
    for (uint32_t i = 0; i < ctx->io_num.n_input; i++)
    {
        ctx->input_attrs[i].index = i;
        // query info
        ret = rknn_query(ctx->context, RKNN_QUERY_INPUT_ATTR, &(ctx->input_attrs[i]), sizeof(rknn_tensor_attr));
        if (ret < 0)
        {
            RK_LOGE("rknn_init error! ret=%d\n", ret);
            return -1;
        }
        dump_tensor_attr(&ctx->input_attrs[i]);
    }

    RK_LOGI("output tensors:\n");
    memset(ctx->output_attrs, 0, ctx->io_num.n_output * sizeof(rknn_tensor_attr));
    for (uint32_t i = 0; i < ctx->io_num.n_output; i++)
    {
        ctx->output_attrs[i].index = i;
        // query info
        ret = rknn_query(ctx->context, RKNN_QUERY_NATIVE_OUTPUT_ATTR, &(ctx->output_attrs[i]), sizeof(rknn_tensor_attr));
        if (ret != RKNN_SUCC)
        {
            RK_LOGE("rknn_query fail! ret=%d\n", ret);
            return -1;
        }
        dump_tensor_attr(&ctx->output_attrs[i]);
    }

    return 1;
}

int lm_rknn_toolkit_io_init(LM_TOOLKIT_MODEL_CTX_S *ctx)
{
    // set input tensor type
    rknn_tensor_type input_type = RKNN_TENSOR_UINT8;
    rknn_tensor_format input_layout = RKNN_TENSOR_NHWC;
    // Create input tensor memory
    for (uint32_t i = 0; i < ctx->io_num.n_input; i++)
    {
        ctx->input_attrs[i].type = input_type; // attrs set
        ctx->input_attrs[i].fmt = input_layout;
        ctx->input_mems[i] = rknn_create_mem(ctx->context, ctx->input_attrs[i].size_with_stride); // size
    }

    // Creat output tensor memory
    ctx->output_mems[0] = rknn_create_mem(ctx->context, ctx->output_attrs[0].size_with_stride);

    //  Set output tensor memory
    if (rknn_set_io_mem(ctx->context, ctx->output_mems[0], &ctx->output_attrs[0]) < 0)
    {
        RK_LOGE("rknn_set_io_mem fail! ret=%d\n", -1);
        return -1;
    }
    return 1;
}

int lm_rknn_toolkit_data_refresh(LM_TOOLKIT_MODEL_CTX_S *ctx, unsigned char *input_data)
{

    // Copy input data to input tensor memory

    int width = ctx->input_attrs[0].dims[2];
    int stride = ctx->input_attrs[0].w_stride;
    RK_LOGD("====width is %d, stride is %d", width, stride);
    RK_U8 *input_ptr = input_data;
    if (width == stride)
    {
        RK_LOGD("===== == ======\n");
        memcpy(ctx->input_mems[0]->virt_addr, input_ptr, width * ctx->input_attrs[0].dims[1] * ctx->input_attrs[0].dims[3]);
        RK_LOGD("==data length is %d===\n", (width * ctx->input_attrs[0].dims[1] * ctx->input_attrs[0].dims[3]));
    }
    else
    {
        RK_LOGD("===== != ========\n");
        int height = ctx->input_attrs[0].dims[1];
        int channel = ctx->input_attrs[0].dims[3];
        RK_U8 *src_ptr = input_ptr;
        RK_U8 *dst_ptr = (RK_U8 *)ctx->input_mems[0]->virt_addr;
        // width-channel elements
        int src_wc_elems = width * channel;
        int dst_wc_elems = stride * channel;
        for (int h = 0; h < height; ++h)
        {
            memcpy(dst_ptr, src_ptr, src_wc_elems);
            src_ptr += src_wc_elems;
            dst_ptr += dst_wc_elems;
        }
    }

    // Set input tensor memory
    if (rknn_set_io_mem(ctx->context, ctx->input_mems[0], &ctx->input_attrs[0]) < 0)
    {
        RK_LOGE("rknn_set_io_mem fail! ");
        return -1;
    }

    return 1;
}

LM_RESULT_S lm_toolkit_result(LM_TOOLKIT_MODEL_CTX_S *ctx)
{
    RK_LOGD("output tensors:\n");
    int *output_mems_nchw[ctx->io_num.n_output]; // 原始输出
    int size = ctx->output_attrs[0].size_with_stride * sizeof(int);
    output_mems_nchw[0] = (int *)malloc(size); // 赋予内存大小

    int8_t *src = (int8_t *)ctx->output_mems[0]->virt_addr;

    for (int index = 0; index < ctx->output_attrs[0].n_elems; index++)
    {
        float tmp = (float)(src[index] - ctx->output_attrs[0].zp) * ctx->output_attrs[0].scale;

        output_mems_nchw[0][index] = (int)tmp;
        if (output_mems_nchw[0][index] > 112)
        {
            output_mems_nchw[0][index] = 112;
        }
        if (output_mems_nchw[0][index] < 0)
        {
            output_mems_nchw[0][index] = 0;
        }
        RK_LOGD("===float num is %d, data is %d, float data is %f==\n", index, src[index], output_mems_nchw[0][index]);
    }

    LM_RESULT_S lm_result;

    lm_result.left_x = output_mems_nchw[0][0];
    lm_result.left_y = output_mems_nchw[0][1];

    lm_result.right_x = output_mems_nchw[0][4];
    lm_result.right_y = output_mems_nchw[0][5];

    return lm_result;
}

LM_RESULT_S lm_toolkit_result_parameter(LM_TOOLKIT_MODEL_CTX_S *ctx)
{
    RK_LOGD("output tensors:\n");
    int *output_mems_nchw[ctx->io_num.n_output]; // 原始输出
    int size = ctx->output_attrs[0].size_with_stride * sizeof(int);
    output_mems_nchw[0] = (int *)malloc(size); // 赋予内存大小

    int8_t *src = (int8_t *)ctx->output_mems[0]->virt_addr;

    for (int index = 0; index < ctx->output_attrs[0].n_elems; index++)
    {
        float tmp = (float)(src[index] - ctx->output_attrs[0].zp) * ctx->output_attrs[0].scale; // float output [0, 7]

        output_mems_nchw[0][index] = (int)tmp * 320 / 112; // reduction 320 * 320 pixels

        RK_LOGD("===float num is %d, ori output data is %d, int data is %f==\n", index, src[index], output_mems_nchw[0][index]);
    }

    LM_RESULT_S lm_result;
    // lm_result.left_x = (output_mems_nchw[0][0] - 40)  < 0 ? 0 : (output_mems_nchw[0][0] - 40);
    // lm_result.left_y = (output_mems_nchw[0][1] - 40) < 0 ? 0: (output_mems_nchw[0][1] - 40);

    // lm_result.right_x = (output_mems_nchw[0][4] + 40) > 320 ? 320 : (output_mems_nchw[0][4] + 40);
    // lm_result.right_y = (output_mems_nchw[0][5] + 40) > 320 ? 320 : (output_mems_nchw[0][5] + 40);
    if (abs(output_mems_nchw[0][0] + output_mems_nchw[0][4] - 320) < abs(output_mems_nchw[0][0] - output_mems_nchw[0][4]) + 320 || abs(output_mems_nchw[0][1] + output_mems_nchw[0][5] - 320) < abs(output_mems_nchw[0][1] - output_mems_nchw[0][5]  + 320) )  {
        lm_result.left_x = (output_mems_nchw[0][0] - 40)  < 0 ? 0 : (output_mems_nchw[0][0] - 40);
        lm_result.left_y = (output_mems_nchw[0][1] - 40) < 0 ? 0: (output_mems_nchw[0][1] - 40);

        lm_result.right_x = (output_mems_nchw[0][4] + 40) > 320 ? 320 : (output_mems_nchw[0][4] + 40);
        lm_result.right_y = (output_mems_nchw[0][5] + 40) > 320 ? 320 : (output_mems_nchw[0][5] + 40);
    }
    else  {
        lm_result.left_x = 0;
        lm_result.left_y = 0;
        lm_result.right_x = 320;
        lm_result.right_y = 320;
    }



    return lm_result;
}

// lm后处理外扩25pixel
LM_RESULT_S lm_toolkit_result_parameter_extend25(LM_TOOLKIT_MODEL_CTX_S *ctx)
{
    int *output_mems_nchw[ctx->io_num.n_output]; // 原始输出
    int size = ctx->output_attrs[0].size_with_stride * sizeof(int);
    output_mems_nchw[0] = (int *)malloc(size); // 赋予内存大小

    int8_t *src = (int8_t *)ctx->output_mems[0]->virt_addr;

    for (int index = 0; index < ctx->output_attrs[0].n_elems; index++)
    {
        float tmp = (float)(src[index] - ctx->output_attrs[0].zp) * ctx->output_attrs[0].scale; // float output [0, 7]

        output_mems_nchw[0][index] = (int)(tmp * 320); // reduction 320 * 320 pixels

        RK_LOGD("=== %d, ori output data is %d, tmp is %f, int data is %d==\n", index, src[index], tmp, output_mems_nchw[0][index]);
    }

    LM_RESULT_S lm_result;
    //初始化最大最小值  
    int yMax = std::numeric_limits<int>::min();  
    int yMin = std::numeric_limits<int>::max();  
    int xMax = std::numeric_limits<int>::min();  
    int xMin = std::numeric_limits<int>::max();
    // 遍历数组获取奇数和偶数位的最大最小值  
    for (int i = 0; i < ctx->output_attrs[0].n_elems; ++i) {  
        if (i % 2 == 0) {  // 偶数位  x
            if (output_mems_nchw[0][i] > xMax) xMax = output_mems_nchw[0][i];  
            if (output_mems_nchw[0][i] < xMin) xMin = output_mems_nchw[0][i];  
        } else {           // 奇数位  y
            if (output_mems_nchw[0][i] > yMax) yMax = output_mems_nchw[0][i];  
            if (output_mems_nchw[0][i] < yMin) yMin = output_mems_nchw[0][i];  
        }  
    }
    RK_LOGD("x1, y1 is[%d, %d], x2 y2 is [%d, %d]", xMin, yMin, xMax, yMax);


    if(abs(xMin + xMax - 320) < abs(xMin - xMax) + 320 || abs(yMin + yMax - 320) < abs(yMin - yMax  + 320) )  {
        lm_result.left_x = (xMin - EXTEND_PIXEL)  < 0 ? 0 : (xMin  - EXTEND_PIXEL);
        lm_result.left_y = (yMin - EXTEND_PIXEL) < 0 ? 0: (yMin - EXTEND_PIXEL);

        lm_result.right_x = (xMax > 320 ? 320 : (xMax  + EXTEND_PIXEL));
        lm_result.right_y = (yMax  + EXTEND_PIXEL) > 320 ? 320 : (yMax  + EXTEND_PIXEL);

        int rec_w = lm_result.right_x - lm_result.left_x;
        int rec_h= lm_result.right_y- lm_result.left_y;
        if((rec_w<=50)||(rec_h<=50))
        {
            lm_result.left_x = 0;
            lm_result.left_y = 0;
            lm_result.right_x = 320;
            lm_result.right_y = 320;
        }
    }
    else {
        lm_result.left_x = 0;
        lm_result.left_y = 0;
        lm_result.right_x = 320;
        lm_result.right_y = 320;
    }
    return lm_result;
}
