#include <iostream>
#include <cstdlib>
#include <string>
#include "nanotrack.hpp"
#include <cmath>


using namespace std;

inline float fast_exp(float x)
{
    union {
        uint32_t i;
        float f;
    } v{};
    v.i = (1 << 23) * (1.4426950409 * x + 126.93490512f);
    return v.f;
}

inline float softmax(float x, float y){
    return  exp(y) / (exp(x) + exp(y));
}

static float sz_whFun(cv::Point2f wh)
{
    float pad = (wh.x + wh.y) * 0.5f;
    float sz2 = (wh.x + pad) * (wh.y + pad);
    return std::sqrt(sz2);
}

void sz_change_fun(const std::vector<float>& w, const std::vector<float>& h, float sz, std::vector<float>& sz2)
{
    int rows = int(std::sqrt(w.size())); //此处按理应为h.size()，但h和w的size()相同，可以不改
    int cols = int(std::sqrt(w.size()));
    static std::vector<float> pad(rows * cols); // 创建名为pad的vector容器，并定义row*col个数据
    #pragma omp parallel for
    for (int i = 0; i < cols; i++)
    {
        for (int j = 0; j < rows; j++)
        {
            pad[i*cols+j] = (w[i * cols + j] + h[i * cols + j]) * 0.5f;
        }
    }
    
    #pragma omp parallel for
    for (int i = 0; i < cols; i++)
    {
        for (int j = 0; j < rows; j++)
        {
            float t = std::sqrt((w[i * rows + j] + pad[i*rows+j]) * (h[i * rows + j] + pad[i*rows+j])) / sz;
            sz2[i * rows + j] = std::max(t,(float)1.0/t);
        }
    }
}

void ratio_change_fun(const std::vector<float>& w, const std::vector<float>& h, const cv::Point2f& target_sz, std::vector<float>& sz2)
{
    int rows = int(std::sqrt(w.size()));
    int cols = int(std::sqrt(w.size()));
    float ratio = target_sz.x / target_sz.y;

    #pragma omp parallel for
    for (int i = 0; i < rows; i++)
    {
        for (int j = 0; j < cols; j++)
        {
            float t = ratio / (w[i * cols + j] / h[i * cols + j]);
            sz2[i*cols+j] = std::max(t, (float)1.0 / t);
        }
    }
}



NanoTrack::NanoTrack(std::string T_backbone_model, std::string X_backbone_model, std::string head_model):
                    module_T127(T_backbone_model, 'T'),
                    module_X(X_backbone_model, 'X'),
                    net_head(head_model, 'H'),
                    hanning(std::vector<float>(48)),  // 设定为48，保证采用最大搜索区域时不会超出容器访问限制
                    pred_x1(std::vector<float>(48*48)), 
                    pred_y1(std::vector<float>(48*48)),  
                    pred_x2(std::vector<float>(48*48)),  
                    pred_y2(std::vector<float>(48*48)),  
                    cls_score_sigmoid(std::vector<float>(48*48)),
                    w(std::vector<float>(48*48)),  
                    h(std::vector<float>(48*48)),
                    pscore(std::vector<float>(48*48)),
                    penalty(std::vector<float>(48*48)),
                    s_c(std::vector<float>(48*48)),
                    r_c(std::vector<float>(48*48))
{   
    
}

NanoTrack::~NanoTrack()
{  
    
}

void NanoTrack::init(cv::Mat img, cv::Rect bbox) 
{
    create_window(); 
    create_grids(); 

    cv::Point target_pos; // cx, cy
    cv::Point2f target_sz = {0.f, 0.f}; //w,h

    target_pos.x = bbox.x + bbox.width / 2;  
    target_pos.y = bbox.y + bbox.height / 2; 
    target_sz.x=bbox.width;
    target_sz.y=bbox.height;
    
    float wc_z = target_sz.x + cfg.context_amount * (target_sz.x + target_sz.y);
    float hc_z = target_sz.y + cfg.context_amount * (target_sz.x + target_sz.y);
    float s_z = round(sqrt(wc_z * hc_z));  

    cv::Scalar avg_chans = cv::mean(img);
    
    z_crop = get_subwindow_tracking(img, target_pos, cfg.exemplar_size, int(s_z), avg_chans); //cv::Mat BGR order 
    module_T127({z_crop},2);
    
    this->state.channel_ave=avg_chans;
    this->state.im_h=img.rows;
    this->state.im_w=img.cols;
    this->state.target_pos=target_pos;
    this->state.target_sz= target_sz;  
}

void NanoTrack::update(const cv::Mat &x_crops, const cv::Mat &img, cv::Point &target_pos, cv::Point2f &target_sz,  float scale_z, float &cls_score_max)
{
    module_X({x_crops}, mode);
    net_head({module_T127.getOutputMat(0), module_X.getOutputMat(0)}, mode);
    cls_score_sigmoid.clear();
    int rows = net_head.getOutputMat(0).rows , cols = net_head.getOutputMat(0).cols;

    
    #pragma omp parallel for  // 调用openMP并行化多线程执行（这里时并行化执for循环）
    
    for (int i = 0; i < rows * cols; i++){   
        // 输出结果其实就是Mat格式，输出一下对应维度看看
        // getOuputMat(0)返回Mat， cv::Mat::data为指向该Mat的第一个指针，若Mat在内存连续存储，可以将其视为一个一维数组，data即为首元素指针(数组名)，data默认为uchar类型，要想修改元素类型，对data进行强制类型转换，如本次(float*)
        cls_score_sigmoid[i] = softmax(((float*)net_head.getOutputMat(0).data)[i], ((float*)net_head.getOutputMat(0).data)[rows*cols+i]);
    }
    
    
    float* bbox_pred_data1 = &((float*)net_head.getOutputMat(1).data)[rows*cols*0];
    float* bbox_pred_data2 = &((float*)net_head.getOutputMat(1).data)[rows*cols*1];
    float* bbox_pred_data3 = &((float*)net_head.getOutputMat(1).data)[rows*cols*2];
    float* bbox_pred_data4 = &((float*)net_head.getOutputMat(1).data)[rows*cols*3];
    
    #pragma omp parallel for
    for (int i=0; i<rows; i++)
    {
        for (int j=0; j<cols; j++)
        {
            pred_x1[i*cols + j] = this->grid_to_search_x[i*cols + j] - bbox_pred_data1[i*cols + j];
            pred_y1[i*cols + j] = this->grid_to_search_y[i*cols + j] - bbox_pred_data2[i*cols + j];
            pred_x2[i*cols + j] = this->grid_to_search_x[i*cols + j] + bbox_pred_data3[i*cols + j];
            pred_y2[i*cols + j] = this->grid_to_search_y[i*cols + j] + bbox_pred_data4[i*cols + j];
        }
    }
    // size penalty  
    #pragma omp parallel for
    for (int i=0; i<rows; i++)
    {
        for (int j=0; j<cols; j++) 
        {
            w[i*cols + j] = pred_x2[i*cols + j] - pred_x1[i*cols + j];
            h[i*cols + j] = pred_y2[i*cols + j] - pred_y1[i*cols + j];
        }
    }

    float sz_wh = sz_whFun(target_sz);
    sz_change_fun(w, h, sz_wh, s_c);
    ratio_change_fun(w, h, target_sz, r_c);

    #pragma omp parallel for
    for (int i = 0; i < rows * cols; i++){
        penalty[i] = std::exp(-1 * (s_c[i] * r_c[i]-1) * cfg.penalty_k);
    }

    // window penalty
    int r_max = 0, c_max = 0; 
    float maxScore = 0; 

    for (int i = 0; i < rows * cols; i++)
    {
        pscore[i] = (penalty[i] * cls_score_sigmoid[i]) * (1 - cfg.window_influence) + this->window[i] * cfg.window_influence; 
        if (pscore[i] > maxScore) 
        {
            // get max 
            maxScore = pscore[i]; 
            r_max = std::floor(i / rows); 
            c_max = ((float)i / rows - r_max) * rows;  
        }
    }
    
    // to real size
    float pred_x1_real = pred_x1[r_max * cols + c_max]; // pred_x1[r_max, c_max]
    float pred_y1_real = pred_y1[r_max * cols + c_max];
    float pred_x2_real = pred_x2[r_max * cols + c_max];
    float pred_y2_real = pred_y2[r_max * cols + c_max];

    float pred_xs = (pred_x1_real + pred_x2_real) / 2;
    float pred_ys = (pred_y1_real + pred_y2_real) / 2;
    float pred_w = pred_x2_real - pred_x1_real;
    float pred_h = pred_y2_real - pred_y1_real;

    // diff表示预测的坐标减去检测分支输入的宽度的一半，因为预测的坐标尺度相对于检测分支输入尺度（255),而图像尺度为检测分支尺度/scale_z，
    // 且target_pos为图像尺度的检测分支绝对坐标中心，减去cfg.instance_size / 2是得到预测相对于原中心的距离（检测分支尺度）
    float diff_xs = pred_xs - cfg.instance_size[mode] / 2;
    float diff_ys = pred_ys - cfg.instance_size[mode] / 2;

    diff_xs /= scale_z;  // 转化到img尺度
    diff_ys /= scale_z;
    pred_w /= scale_z;
    pred_h /= scale_z;

    target_sz.x = target_sz.x / scale_z; // 在update前从img尺度转化为了检测分支尺度，现在在转换为img尺度
    target_sz.y = target_sz.y / scale_z;

    // size learning rate
    float lr = penalty[r_max * cols + c_max] * cls_score_sigmoid[r_max * cols + c_max] * cfg.lr;

    // result
        auto res_xs = float (target_pos.x + diff_xs);
        auto res_ys = float (target_pos.y + diff_ys);
        float res_w = pred_w * lr + (1 - lr) * target_sz.x;
        float res_h = pred_h * lr + (1 - lr) * target_sz.y;

        target_pos.x = int(res_xs);
        target_pos.y = int(res_ys);

        target_sz.x = res_w;
        target_sz.y = res_h;
    

    cls_score_max = cls_score_sigmoid[r_max * cols + c_max];
    
    // // update template
    if (cls_score_max > CONFIDENCE_HIGH)
    {
        float wc_z = target_sz.x + cfg.context_amount * (target_sz.x + target_sz.y);
        float hc_z = target_sz.y + cfg.context_amount * (target_sz.x + target_sz.y);
        float s_z = round(sqrt(wc_z * hc_z));  
        cv::Scalar avg_chans = cv::mean(img);
        z_crop = z_crop * 0.99 + get_subwindow_tracking(img, target_pos, cfg.exemplar_size, int(s_z), avg_chans) * 0.01; //cv::Mat BGR order 
        // module_T127({z_crop},2);
    }
    
}

// 改动了返回cls_score_max 
pair<float, float> NanoTrack::track(cv::Mat im) 
{
    
    cv::Point target_pos = this->state.target_pos;
    cv::Point2f target_sz = this->state.target_sz;
    
    float hc_z = target_sz.y + cfg.context_amount * (target_sz.x + target_sz.y);
    float wc_z = target_sz.x + cfg.context_amount * (target_sz.x + target_sz.y);
    float s_z = sqrt(wc_z * hc_z);  
    float scale_z = cfg.exemplar_size / s_z;  

    float d_search = (cfg.instance_size[mode] - cfg.exemplar_size) / 2; 
    float pad = d_search / scale_z; 
    float s_x = s_z + 2*pad;

    cv::Mat x_crop;  
    x_crop = get_subwindow_tracking(im, target_pos, cfg.instance_size[mode], int(s_x),state.channel_ave);

    // update
    target_sz.x = target_sz.x * scale_z;
    target_sz.y = target_sz.y * scale_z;
      

    float cls_score_max;
    
    this->update(x_crop, im, target_pos, target_sz, scale_z, cls_score_max);
    
    if (cls_score_max >= CONFIDENCE_HIGH)
    {
        mode = 2;
        lost_count = 0;
        cout << " succ score : " << cls_score_max << endl;
        if (!is_255)
        {
            create_grids();
            create_window();
        }
    }
    else{
        ++lost_count;
        cout << "lost score : " << cls_score_max << endl;
        target_pos = state.target_pos;
        target_sz = state.target_sz;
    }

    if (lost_count >= LOST_COUNT_THREAD)
    {
        mode = 1;
        create_grids();
        create_window();
        is_255 = false;
    }

    target_pos.x = std::max(0, min(state.im_w, target_pos.x));
    target_pos.y = std::max(0, min(state.im_h, target_pos.y));
    target_sz.x = float(std::max(30, min(state.im_w, int(target_sz.x))));
    target_sz.y = float(std::max(30, min(state.im_h, int(target_sz.y))));

    state.target_pos = target_pos;
    state.target_sz = target_sz;
    return {cls_score_max, s_x};
}

// 生成每一个格点的坐标 
void NanoTrack::create_window()
{
    int score_size= cfg.score_size[mode];
    this->window.resize(score_size*score_size, 0);

    #pragma omp parallel for
    for (int i = 0; i < score_size; i++)
    {
        float w = 0.5f - 0.5f * std::cos(2 * 3.1415926535898f * i / (score_size - 1));
        hanning[i] = w;
    } 
    
    #pragma omp parallel for
    for (int i = 0; i < score_size; i++)
    {
        for (int j = 0; j < score_size; j++)
        {
            this->window[i*score_size+j] = hanning[i] * hanning[j]; 
        }
    }    
}

// 生成每一个格点的坐标 
void NanoTrack::create_grids()
{
    /*
    each element of feature map on input search image
    :return: H*W*2 (position for each element)
    */
    int sz = cfg.score_size[mode];   

    this->grid_to_search_x.resize(sz * sz, 0);
    this->grid_to_search_y.resize(sz * sz, 0);

 // 最后生成的得分图大小为sz*sz，根据总步长，将坐标映射回输入图像坐标
    #pragma omp parallel for
    for (int i = 0; i < sz; i++)
    {
        for (int j = 0; j < sz; j++)
        {
            this->grid_to_search_x[i*sz+j] = j*cfg.total_stride;  
            this->grid_to_search_y[i*sz+j] = i*cfg.total_stride;
        }
    }
}

cv::Mat NanoTrack::get_subwindow_tracking(cv::Mat im, cv::Point2f pos, int model_sz, int original_sz,cv::Scalar channel_ave)
{
    float c = (float)(original_sz + 1) / 2;
    int context_xmin = std::round(pos.x - c);
    int context_xmax = context_xmin + original_sz - 1;
    int context_ymin = std::round(pos.y - c);
    int context_ymax = context_ymin + original_sz - 1;

    int left_pad = int(std::max(0, -context_xmin));
    int top_pad = int(std::max(0, -context_ymin));
    int right_pad = int(std::max(0, context_xmax - im.cols + 1));
    int bottom_pad = int(std::max(0, context_ymax - im.rows + 1));

    context_xmin += left_pad;
    context_xmax += left_pad;
    context_ymin += top_pad;
    context_ymax += top_pad;
    cv::Mat im_path_original;

    if (top_pad > 0 || left_pad > 0 || right_pad > 0 || bottom_pad > 0)
    {
        cv::Mat te_im = cv::Mat::zeros(im.rows + top_pad + bottom_pad, im.cols + left_pad + right_pad, CV_8UC3);
       
        cv::copyMakeBorder(im, te_im, top_pad, bottom_pad, left_pad, right_pad, cv::BORDER_CONSTANT, channel_ave);
        im_path_original = te_im(cv::Rect(context_xmin, context_ymin, context_xmax - context_xmin + 1, context_ymax - context_ymin + 1));
    }
    else
        im_path_original = im(cv::Rect(context_xmin, context_ymin, context_xmax - context_xmin + 1, context_ymax - context_ymin + 1));

    cv::Mat im_path;
    cv::resize(im_path_original, im_path, cv::Size(model_sz, model_sz));

    return im_path; 
}
