#ifndef NANOTRACK_H
#define NANOTRACK_H

#include <dlfcn.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <iostream>
#include <bits/stdc++.h>
#include <vector> 
#include <map>  
#include <thread>
#include "rknn_api.h"
#include "omp.h"

#include <opencv2/core/core.hpp> 
#include <opencv2/highgui/highgui.hpp> 
#include <opencv2/imgproc/imgproc.hpp> 
#include <opencv2/opencv.hpp>

#define PI 3.1415926 
#define _BASETSD_H
#define CONFIDENCE_HIGH 0.85
#define LOST_COUNT_THREAD 5


struct Config{ 
    
    std::string windowing = "cosine";
    std::vector<float> window;

    int stride = 16;
    float penalty_k = 0.15;
    float window_influence = 0.28;
    float lr = 0.5;
    int exemplar_size=127;
    int instance_size[3] = {768, 512, 255};
    int total_stride=16;
    int score_size[3] = {48, 32, 16};
    float context_amount = 0.5;
};

struct State { 
    int im_h; 
    int im_w;  
    cv::Scalar channel_ave; 
    cv::Point target_pos; 
    cv::Point2f target_sz = {0.f, 0.f}; 
    float cls_score_max; 
};

class rknnModel{
public:
  std::vector<rknn_input> inputs;
  std::vector<rknn_tensor_attr> input_attrs;
  std::vector<rknn_output> outputs;
  std::vector<rknn_tensor_attr> output_attrs;
  std::vector<cv::Mat> outputCvMat;
  std::vector<rknn_input_range> shape_range;
  std::vector<rknn_tensor_attr> curr_input_attrs;
  std::vector<rknn_tensor_attr> curr_output_attrs;
  
  rknnModel(std::string model_name, char modelType);
  rknnModel(const rknnModel&) = delete;
  void operator()(std::vector<cv::Mat> img , unsigned int mode);
  cv::Mat& getOutputMat(int id);
  void transposeMat(cv::Mat& img);
  ~rknnModel();
  
private:
  unsigned char* model_data;
  rknn_context ctx;
  rknn_input_output_num io_num;
  int channel = 3, width = 0, height = 0, ret = 0, model_data_size = 0;
  bool transposeFlag = false;
  bool initFlag = false;
  bool dynamicFlag = false;
  unsigned int dyn_mode = 2;
  unsigned char* load_data(FILE* fp, size_t ofst, size_t sz);
  unsigned char* load_model(const char* filename, int* model_size);
};

class NanoTrack {

public: 
    NanoTrack(std::string T_backbone_model, std::string X_backbone_model, std::string head_model);
    ~NanoTrack(); 
    void init(cv::Mat img, cv::Rect bbox);
    void update(const cv::Mat &x_crops, const cv::Mat &img, cv::Point &target_pos, cv::Point2f &target_sz, float scale_z, float &cls_score_max);
    std::pair<float, float> track(cv::Mat im);
    void load_model(std::string T_backbone_model, std::string X_backbone_model, std::string model_head);
    // add by xwd @20220720
    rknnModel module_T127, module_X, net_head;
  
    unsigned int mode = 2;
    int stride=16;
    // state  dynamic
    State state;
    // config static
    Config cfg; 
    const float mean_vals[3] = { 0.485f*255.f, 0.456f*255.f, 0.406f*255.f };  
    const float norm_vals[3] = {1/0.229f/255.f, 1/0.224f/255.f, 1/0.225f/255.f};
private: 
    void create_grids(); 
    void create_window();  
    cv::Mat get_subwindow_tracking(cv::Mat im, cv::Point2f pos, int model_sz, int original_sz,cv::Scalar channel_ave);
    std::vector<float> grid_to_search_x;
    std::vector<float> grid_to_search_y;
    std::vector<float> window;
    cv::Mat z_crop;
    size_t lost_count = 0;
    bool is_255 = true;
    std::vector<float> pred_x1, pred_y1, pred_x2, pred_y2, cls_score_sigmoid, hanning, w, h, pscore, penalty, s_c, r_c;
   
};

#endif 
