// Copyright (c) 2021 by Rockchip Electronics Co., Ltd. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

/*-------------------------------------------
                Includes
-------------------------------------------*/
#include <dlfcn.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <iostream>
#include <bits/stdc++.h>

#define _BASETSD_H

#include "RgaUtils.h"
#include "im2d.h"
#include "opencv2/opencv.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc.hpp"
#include "rknn_api.h"
#include "nanotrack.hpp"

using namespace std;

/*-------------------------------------------
                  Functions
-------------------------------------------*/

double __get_us(struct timeval t) { return (t.tv_sec * 1000000 + t.tv_usec); }



static void dump_tensor_attr(rknn_tensor_attr *attr)
{
    std::string shape_str = attr->n_dims < 1 ? "" : std::to_string(attr->dims[0]);
    for (int i = 1; i < attr->n_dims; ++i)
    {
        shape_str += ", " + std::to_string(attr->dims[i]);
    }

    printf("  index=%d, name=%s, n_dims=%d, dims=[%s], n_elems=%d, size=%d, w_stride = %d, size_with_stride=%d, fmt=%s, "
           "type=%s, qnt_type=%s, "
           "zp=%d, scale=%f\n",
           attr->index, attr->name, attr->n_dims, shape_str.c_str(), attr->n_elems, attr->size, attr->w_stride,
           attr->size_with_stride, get_format_string(attr->fmt), get_type_string(attr->type),
           get_qnt_type_string(attr->qnt_type), attr->zp, attr->scale);
}

rknnModel::rknnModel(std::string model_name, char modelType){
  /* Create the neural network */
  model_data_size = 0;
  model_data      = load_model(model_name.c_str(), &model_data_size);
  ret             = rknn_init(&ctx, model_data, model_data_size, 0, NULL);

  /* query and reserve */
  ret = rknn_query(ctx, RKNN_QUERY_IN_OUT_NUM, &io_num, sizeof(io_num));
  if (ret < 0)
  {
    cerr << "rknn _query input_output_num error! ret = " << ret << endl;
  }

  inputs.resize(io_num.n_input);
  input_attrs.resize(io_num.n_input);
  outputs.resize(io_num.n_output);
  output_attrs.resize(io_num.n_output);
  outputCvMat.resize(io_num.n_output);
  shape_range.resize(io_num.n_input);
  curr_input_attrs.resize(io_num.n_input);
  curr_output_attrs.resize(io_num.n_output);

  //查询 input_attr参数得到模型的输入各参数，用于后续设置inputs属性
  for(int i = 0; i < io_num.n_input; i++){
    input_attrs[i].index = i;
    ret= rknn_query(ctx, RKNN_QUERY_INPUT_ATTR, &(input_attrs[i]),sizeof(rknn_tensor_attr));
    if (ret < 0)
    {
      cerr << " rknn_query error! ret =  " << ret << endl; 
    }

// 预置inputs属性
    inputs[i].index        = i;
    inputs[i].fmt          = RKNN_TENSOR_NHWC; // 规定输入的排布是NHWC的顺序
    inputs[i].pass_through = 0;
      
    if(modelType == 'T'){
        inputs[i].type = RKNN_TENSOR_UINT8;
        transposeFlag = true;
    }
    else if(modelType == 'X'){
        inputs[i].type = RKNN_TENSOR_UINT8;
        dump_tensor_attr(&input_attrs[i]);
        transposeFlag = true;
        dynamicFlag = true;
    }
    else if(modelType == 'H'){
        inputs[i].type = RKNN_TENSOR_FLOAT32;
        dynamicFlag = true;
    }
    else{
        std::cout << "ERROR : check model type" << std::endl;
        exit(-1);
    }

    if (dynamicFlag)
    {
      shape_range[i].index = i;
      ret = rknn_query(ctx, RKNN_QUERY_INPUT_DYNAMIC_RANGE, &(shape_range[i]), sizeof(rknn_input_range));
      if (ret < 0)
      {
        cerr<< " rknn_query dynamic range error! ret = " << ret << endl;
      }
    }
  }

  for(int i = 0; i < io_num.n_output; i++){
    outputs[i].want_float = 1;
    outputs[i].is_prealloc = 0;
    outputs[i].index = i;
    output_attrs[i].index = i;
    ret = rknn_query(ctx, RKNN_QUERY_OUTPUT_ATTR, &(output_attrs[i]), sizeof(rknn_tensor_attr));
    if (ret < 0)
    {
      cerr << "rknn query output attrs error! ret = " << ret << endl;
    }
  }
}
  
void rknnModel::operator()(std::vector<cv::Mat> img , unsigned int mode = 2){
 
 if (!dynamicFlag){
    if (!initFlag)
    {
      initFlag = true;
      for (int i=0; i<io_num.n_input; i++)
      {
        width   = input_attrs[i].fmt == RKNN_TENSOR_NHWC ? input_attrs[i].dims[2] : input_attrs[i].dims[3];
        height  = input_attrs[i].fmt == RKNN_TENSOR_NHWC ? input_attrs[i].dims[1] : input_attrs[i].dims[2];
        channel = input_attrs[i].fmt == RKNN_TENSOR_NHWC ? input_attrs[i].dims[3] : input_attrs[i].dims[1];
        inputs[i].size = width * height * channel;
        inputs[i].buf  = (void*) img[i].data;
      }
    }
 }
 else{
    if (!initFlag || (dyn_mode!=mode)){
      initFlag = true;
      dyn_mode = mode;
      for (int i=0; i<io_num.n_input; i++)
        for (int j=0; j<input_attrs[i].n_dims; j++)
        {
          input_attrs[i].dims[j] = shape_range[i].dyn_range[mode][j];
          //cout << input_attrs[i].dims[j] << " " << endl;
        }

      ret = rknn_set_input_shapes(ctx, io_num.n_input, &input_attrs[0]);
      if (ret < 0)
      {
        cerr << " rknn set input shapes error! ret = " << ret << endl;
      }

      for (int i=0; i<io_num.n_input; i++)
      {
        curr_input_attrs[i].index = i;
        ret = rknn_query(ctx, RKNN_QUERY_CURRENT_INPUT_ATTR, &(curr_input_attrs[i]), sizeof(rknn_tensor_attr));
        if (ret < 0)
        {
          cerr << "rknn_query current input attrs error! ret = " << ret << endl;
        }
        // dump_tensor_attr(&curr_input_attrs[i]);
       
        width   = curr_input_attrs[i].fmt == RKNN_TENSOR_NHWC ? curr_input_attrs[i].dims[2] : curr_input_attrs[i].dims[3];
        height  = curr_input_attrs[i].fmt == RKNN_TENSOR_NHWC ? curr_input_attrs[i].dims[1] : curr_input_attrs[i].dims[2];
        channel = curr_input_attrs[i].fmt == RKNN_TENSOR_NHWC ? curr_input_attrs[i].dims[3] : curr_input_attrs[i].dims[1];
        
        if (curr_input_attrs[i].dims[3] == 3) // 这里不能通过curr_input_attr[i].type == UINT8来判断，因为查询到的都是FLOAT16,即使将input_attrs的type设置为UINT8也改变不了
        {
          inputs[i].size = width * height * channel;
        }
        else if (curr_input_attrs[i].dims[3] == 48)
        {
          inputs[i].size = width * height * channel * sizeof(float);
        }
        
      }
    }

    for (int i=0; i<io_num.n_input; i++)
    {
      inputs[i].buf = (void*)img[i].data;
    }

    for (int i=0; i<io_num.n_output; i++)
    {
      ret = rknn_query(ctx, RKNN_QUERY_CURRENT_OUTPUT_ATTR, &(curr_output_attrs[i]), sizeof(rknn_tensor_attr));
      if (ret < 0)
      {
          cerr << "rknn query current output attrs error! ret = " << ret << endl;
      }
    }
    
    }

  // 配置模型输入属性
  rknn_inputs_set(ctx, io_num.n_input, &inputs[0]);
  ret = rknn_run(ctx, NULL);
  ret = rknn_outputs_get(ctx, io_num.n_output, &outputs[0], NULL);
  if(transposeFlag){
    for(int i=0;i<io_num.n_output;i++)
      transposeMat(getOutputMat(i));  // 将NCHW格式转化为NHWC格式，以匹配H的输入格式
  }
}

cv::Mat& rknnModel::getOutputMat(int id){
    int cvMatType;
    if (!dynamicFlag)
    {
      // cout <<  "output_dims : " << output_attrs[id].dims[1] << " " << output_attrs[id].dims[2] <<  " " <<output_attrs[id].dims[3] << endl;
      if(output_attrs[id].dims[1] == 3) cvMatType = CV_8UC3;
      else cvMatType = CV_32FC(48);
      // 创建一个cv::Mat数据，不需要拷贝，直接使用outputs[id].buf指向的内存 （得到的cv::Mat是二维数据，顺序存放着outputs[id].buf指向的内容）
      outputCvMat[id] = cv::Mat(output_attrs[id].dims[2], output_attrs[id].dims[3], cvMatType, (void*)outputs[id].buf);  // 通用API的output格式为NCHW
    }
    else
    {
      // if (id == 1)
      // {
      //   cout << "curr_output_dims : " << curr_output_attrs[id].dims[1] << " " << curr_output_attrs[id].dims[2] <<  " " <<curr_output_attrs[id].dims[3] << endl;
      // }
      
      if(curr_output_attrs[id].dims[1] == 3) cvMatType = CV_8UC3;
      else cvMatType = CV_32FC(48);
      outputCvMat[id] = cv::Mat(curr_output_attrs[id].dims[2], curr_output_attrs[id].dims[3], cvMatType, (void*)outputs[id].buf); 
    }

    return outputCvMat[id];
}

void rknnModel::transposeMat(cv::Mat& img){
    float* src = (float*) img.data;
    vector<float> dst(img.rows * img.cols * img.channels());
    #pragma omp parallel for
    for(int i=0; i < img.channels(); i++){
        for(int j=0; j < img.rows * img.cols; j++){
            dst[j * img.channels() + i] = src[i * img.rows * img.cols + j];
        }
    }
    #pragma omp parallel for
    for(int i=0;i<dst.size();i++) src[i] = dst[i];
}

rknnModel::~rknnModel(){
  ret = rknn_outputs_release(ctx, io_num.n_output, &outputs[0]);
  ret = rknn_destroy(ctx);
  free(model_data);
}

unsigned char* rknnModel::load_data(FILE* fp, size_t ofst, size_t sz)
{
  unsigned char* data;
  int            ret;
  data = NULL;
  if (NULL == fp) {
    return NULL;
  }
  ret = fseek(fp, ofst, SEEK_SET); // SEEK_SET表示从开头计算偏移量，即将fp移动到了文件开头
  if (ret != 0) {
    printf("blob seek failure.\n");
    return NULL;
  }
  data = (unsigned char*)malloc(sz);
  if (data == NULL) {
    printf("buffer malloc failure.\n");
    return NULL;
  }
  ret = fread(data, 1, sz, fp);
  return data;
}

unsigned char* rknnModel::load_model(const char* filename, int* model_size)
{
  FILE*          fp;
  unsigned char* data;
  fp = fopen(filename, "rb");
  if (NULL == fp) {
    printf("Open file %s failed.\n", filename);
    return NULL;
  }
  fseek(fp, 0, SEEK_END); // int fseek(FILE *stream, long offset, int whence); offset为相对于whence的偏移量 SEEK_END表示从文件末尾计算偏移量，则将指针移动到文件末尾
  int size = ftell(fp); // ftell获得当前文件指针相对于开头的位置，单位为字节；与fseek结合计算文件大小
  data = load_data(fp, 0, size);
  fclose(fp);
  *model_size = size;
  return data;
}
