#include "../include/model/taiic_yolo.h"
#include "rk_defines.h"
#include "rk_debug.h"

// int NC1HWC2_int8_to_NCHW_int8(const int8_t *src, int8_t *dst, int *dims, int channel, int h, int w)
// {
//   int batch = dims[0];
//   int C1 = dims[1];
//   int C2 = dims[4];
//   int hw_src = dims[2] * dims[3];
//   int hw_dst = h * w;
//   for (int i = 0; i < batch; i++)
//   {
//     src = src + i * C1 * hw_src * C2;
//     dst = dst + i * channel * hw_dst;
//     for (int c = 0; c < channel; ++c)
//     {
//       int plane = c / C2;
//       const int8_t *src_c = plane * hw_src * C2 + src;
//       int offset = c % C2;
//       for (int cur_h = 0; cur_h < h; ++cur_h)
//         for (int cur_w = 0; cur_w < w; ++cur_w)
//         {
//           int cur_hw = cur_h * w + cur_w;
//           dst[c * hw_dst + cur_h * w + cur_w] = src_c[C2 * cur_hw + offset];
//         }
//     }
//   }
//   return 1;
// }

int yolo_rknn_toolkit_config_init(YOLO_TOOLKIT_MODEL_CTX_S *ctx)
{

  // Load RKNN Model
  int ret = rknn_init(&ctx->context, ctx->modelPath, 0, 0, NULL); // 初始化上下文
  RK_LOGI("===init model===\n");

  if (ret < 0)
  {
    RK_LOGE("rknn_init fail! ret=%d\n", ret);
    return -1;
  }

  // Get Model Input Output Info
  ret = rknn_query(ctx->context, RKNN_QUERY_IN_OUT_NUM, &ctx->io_num, sizeof(rknn_input_output_num));
  if (ret != RKNN_SUCC)
  {
    RK_LOGE("rknn_query fail! ret=%d\n", ret);
    return -1;
  }
  RK_LOGI("model input num: %d, output num: %d\n", ctx->io_num.n_input, ctx->io_num.n_output);

  // Get Input Tensor Attrs
  RK_LOGI("input tensors:\n");
  for (uint32_t i = 0; i < ctx->io_num.n_input; i++)
  {
    ctx->input_attrs[i].index = i;
    // query info
    ret = rknn_query(ctx->context, RKNN_QUERY_INPUT_ATTR, &(ctx->input_attrs[i]), sizeof(rknn_tensor_attr));
    if (ret < 0)
    {
      RK_LOGE("rknn_init error! ret=%d\n", ret);
      return -1;
    }
    dump_tensor_attr(&ctx->input_attrs[i]);
  }

  // Get Output Tensor Attrs
  RK_LOGI("output tensors:\n");
  for (uint32_t i = 0; i < ctx->io_num.n_output; i++)
  {
    ctx->output_attrs[i].index = i;
    // query info
    ret = rknn_query(ctx->context, RKNN_QUERY_NATIVE_OUTPUT_ATTR, &(ctx->output_attrs[i]), sizeof(rknn_tensor_attr));
    if (ret != RKNN_SUCC)
    {
      RK_LOGE("rknn_query fail! ret=%d\n", ret);
      return -1;
    }
    dump_tensor_attr(&ctx->output_attrs[i]);
  }
  return 1;
}

int yolo_rknn_toolkit_io_init(YOLO_TOOLKIT_MODEL_CTX_S *ctx)
{
  // Create input tensor memory
  ctx->input_attrs[0].type = INPUT_TYPE;
  // default fmt is NHWC, npu only support NHWC in zero copy mode
  ctx->input_attrs[0].fmt = INPUT_LAYOUT;
  ctx->input_mems[0] = rknn_create_mem(ctx->context, ctx->input_attrs[0].size_with_stride);



  // Create output tensor memory
  for (uint32_t i = 0; i < ctx->io_num.n_output; ++i)
  {
    ctx->output_mems[i] = rknn_create_mem(ctx->context, ctx->output_attrs[i].size_with_stride);
  }



  // Set output tensor memory
  for (uint32_t i = 0; i < ctx->io_num.n_output; ++i)
  {
    // set output memory and attribute
    int ret = rknn_set_io_mem(ctx->context, ctx->output_mems[i], &ctx->output_attrs[i]);
    if (ret < 0)
    {
      RK_LOGE("rknn_set_io_mem fail! ret=%d\n", ret);
      return -1;
    }
  }

  return 1;
}

int yolo_rknn_toolkit_data_refresh(YOLO_TOOLKIT_MODEL_CTX_S *ctx, unsigned char *input_data)
{
  // Copy input data to input tensor memory
  int width = ctx->input_attrs[0].dims[2];
  int stride = ctx->input_attrs[0].w_stride;
  RK_LOGD("===width is %d, stride is %d====\n", width, stride);

  if (width == stride)
  {
    // printf("---\n");
    memcpy(ctx->input_mems[0]->virt_addr, input_data, width * ctx->input_attrs[0].dims[1] * ctx->input_attrs[0].dims[3]);
    RK_LOGD("===input data len is %d===\n", width * ctx->input_attrs[0].dims[1] * ctx->input_attrs[0].dims[3]);
  }
  else
  {
    int height = ctx->input_attrs[0].dims[1];
    int channel = ctx->input_attrs[0].dims[3];
    // copy from src to dst with stride
    uint8_t *src_ptr = input_data;
    uint8_t *dst_ptr = (uint8_t *)ctx->input_mems[0]->virt_addr;
    // width-channel elements
    int src_wc_elems = width * channel;
    int dst_wc_elems = stride * channel;
    for (int h = 0; h < height; ++h)
    {
      memcpy(dst_ptr, src_ptr, src_wc_elems);
      src_ptr += src_wc_elems;
      dst_ptr += dst_wc_elems;
    }
  }
  // Set input tensor memory
  int ret = rknn_set_io_mem(ctx->context, ctx->input_mems[0], &ctx->input_attrs[0]);
  if (ret < 0)
  {
    RK_LOGE("rknn_set_io_mem fail! ret=%d\n", ret);
    return -1;
  }
  return 1;
}
detect_result_group_t yolo_rknn_toolkit_result(YOLO_TOOLKIT_MODEL_CTX_S *ctx)
{
  RK_LOGD("output origin tensors:\n");
  detect_result_group_t detect_result_group;
  // for (uint32_t i = 0; i < ctx->io_num.n_output; i++)
  // {
  //   ctx->orig_output_attrs[i].index = i;
  //   // query info
  //   int ret = rknn_query(ctx->context, RKNN_QUERY_OUTPUT_ATTR, &(ctx->orig_output_attrs[i]), sizeof(rknn_tensor_attr));
  //   if (ret != RKNN_SUCC)
  //   {
  //     RK_LOGE("rknn_query fail! ret=%d\n", ret);
  //     return detect_result_group;
  //   }
  //   dump_tensor_attr(&ctx->orig_output_attrs[i]);
  // }

  int8_t *output_mems_nchw[ctx->io_num.n_output];
  for (uint32_t i = 0; i < ctx->io_num.n_output; ++i)
  {
    int size = ctx->output_attrs[i].size_with_stride;
    output_mems_nchw[i] = (int8_t *)malloc(size);
  }
  for (uint32_t i = 0; i < ctx->io_num.n_output; i++)
  {
    int channel = ctx->output_attrs[i].dims[1];
    int h = ctx->output_attrs[i].n_dims > 2 ? ctx->output_attrs[i].dims[2] : 1;
    int w = ctx->output_attrs[i].n_dims > 3 ? ctx->output_attrs[i].dims[3] : 1;
    int hw = h * w;
    NC1HWC2_int8_to_NCHW_int8((int8_t *)ctx->output_mems[i]->virt_addr, (int8_t *)output_mems_nchw[i], (int *)ctx->output_attrs[i].dims,
                              channel, h, w);
  }

  std::vector<float> out_scales;
  std::vector<int32_t> out_zps;
  for (int i = 0; i < ctx->io_num.n_output; ++i)
  {
    out_scales.push_back(ctx->output_attrs[i].scale);
    out_zps.push_back(ctx->output_attrs[i].zp);
    RK_LOGD("===%d : output_attr scale is %d, zp is %d===\n", i, ctx->output_attrs[i].scale, ctx->output_attrs[i].zp);
  }
  post_process((int8_t *)output_mems_nchw[0], (int8_t *)output_mems_nchw[1], (int8_t *)output_mems_nchw[2], YOLO_IMG_WIDTH, YOLO_IMG_HEIGHT,
               BOX_CONF_THRESHOLD, NMS_THRESHOLD, SCALE_W, SCALE_H, out_zps, out_scales, &detect_result_group);

  char text[256];
  RK_LOGD("=====detect count is %d===\n", detect_result_group.count);
  for (int i = 0; i < detect_result_group.count; i++)
  {
    detect_result_t *det_result = &(detect_result_group.results[i]);
    sprintf(text, "%s %.1f%%", det_result->name, det_result->prop * 100);

    // RK_LOGI("%s @ (%d %d %d %d) %f\n",
    //         det_result->name,
    //         det_result->box.left, det_result->box.top, det_result->box.right, det_result->box.bottom,
    //         det_result->prop);
  }
  for (uint32_t i = 0; i < ctx->io_num.n_output; ++i)
  {
    free(output_mems_nchw[i]);
  }
  return detect_result_group;
}