//
// Created by xag on 2020/12/15.
//

#include "Cotton.h"
#include "common.hpp"
#include <chrono>
#include <ros/ros.h>

using namespace std;

Cotton2::Cotton2(const std::string &config_file) {
    // YAML::Node root = YAML::LoadFile(config_file);
    // YAML::Node config = root["Cotton"];
    // engine_file = config["engine_file"].as<std::string>();
    // BATCH_SIZE = config["BATCH_SIZE"].as<int>();
    // INPUT_CHANNEL = config["INPUT_CHANNEL"].as<int>();
    // IMAGE_WIDTH =config["IMAGE_WIDTH"].as<int>();
    // IMAGE_HEIGHT = config["IMAGE_HEIGHT"].as<int>();
    // obj_threshold = config["obj_threshold"].as<float>();
    // nms_threshold = config["nms_threshold"].as<float>();
    // w_heat = config["w_heat"].as<int>();
    // h_heat = config["h_heat"].as<int>();

    cv::FileStorage files_read(config_file, cv::FileStorage::READ);
    engine_file = (std::string)files_read["engine_file"];
    BATCH_SIZE = (int)files_read["BATCH_SIZE"];
    INPUT_CHANNEL = (int)files_read["INPUT_CHANNEL"];
    IMAGE_WIDTH = (int)files_read["IMAGE_WIDTH"];
    IMAGE_HEIGHT = (int)files_read["IMAGE_HEIGHT"];
    obj_threshold = (float)files_read["obj_threshold"];
    nms_threshold = (float)files_read["nms_threshold"];
    w_heat = (int)files_read["w_heat"];
    h_heat = (int)files_read["h_heat"];

    files_read.release();
}

Cotton2::~Cotton2() = default;

void Cotton2::LoadEngine() {
    // Load engine
    std::fstream plan_file;
    plan_file.open(engine_file, std::ios::in);
    if (plan_file) {
        readTrtFile(engine_file, engine, context);
        std::cout << "can load engine file." << std::endl;
        assert(engine != nullptr);
    } else {
        std::cout << "Fail to find the tensorrt plan file !" << std::endl;
    }
}

void Cotton2::InferenceFolder_single2(cv::Mat& img_single, std::vector<cv::Point2f> &object_number) {

    // allocate input and outputs
    //get buffers
    //std::cout << "getBbBindings numbers：" << engine->getNbBindings() << std::endl;
    int target_binding = 3;
    assert(engine->getNbBindings() == target_binding);
    void *buffers[target_binding];
    std::vector<int64_t> bufferSize;
    int nbBindings = engine->getNbBindings();
    
    bufferSize.resize(nbBindings);  // bufferSize numbers size

    for (int i = 0; i < nbBindings; ++i) {
        nvinfer1::Dims dims = engine->getBindingDimensions(i);
        nvinfer1::DataType dtype = engine->getBindingDataType(i);
        int64_t totalSize = volume(dims) * 1 * getElementSize(dtype);
        bufferSize[i] = totalSize;
        // std::cout << "volume(dims)" << i << ": " << volume(dims) << std::endl;
        // std::cout << "dtype" << i << ": " << getElementSize(dtype) << std::endl;
        // std::cout << "binding" << i << ": " << totalSize << std::endl;
        cudaMalloc(&buffers[i], totalSize);   // buffers in host, take GPU address to buffers
    }

    //get stream
    cudaStream_t stream;
    cudaStreamCreate(&stream);

    // prepare output
    int outSize1 = bufferSize[1] / sizeof(float) / BATCH_SIZE;
    int outSize2 = bufferSize[2] / sizeof(float) / BATCH_SIZE;
    // std::cout << "outSize1: " << outSize1 << std::endl;
    // std::cout << "outSize2: " << outSize2 << std::endl;

    // engine inference
    object_number = EngineInference_single2(img_single, outSize1, outSize2, buffers, bufferSize, stream);

    // release the stream
    cudaStreamDestroy(stream);
    // release buffers
    cudaFree(buffers[0]);
    cudaFree(buffers[1]);
    cudaFree(buffers[2]);
}

std::vector<cv::Point2f> Cotton2::EngineInference_single2(cv::Mat &src_img, const int &outSize1, const int &outSize2, void **buffers,
                             const std::vector<int64_t> &bufferSize, cudaStream_t stream) {

    float total_time = 0;

    std::cout << "Processing image: " << std::endl;
    cv::Mat src_dst;
    if (src_img.data)
    {
        cv::cvtColor(src_img, src_img, cv::COLOR_BGR2RGB);   // CV的 bgr -> rgb
        src_dst = src_img.clone();
    }

    // preprocess, HWC -> CHW
    auto t_start_pre = std::chrono::high_resolution_clock::now();
    std::cout << "prepareImage" << std::endl;
    std::vector<float> curInput = prepareImage_single2(src_dst); // preprocess
    std::cout << "curInput content: " << curInput.data() << std::endl;
    auto t_end_pre = std::chrono::high_resolution_clock::now();
    float total_pre = std::chrono::duration<float, std::milli>(t_end_pre - t_start_pre).count();
    std::cout << "prepare image take: " << total_pre << " ms." << std::endl;
    total_time += total_pre;


    if (!curInput.data()) {
        std::cout << "prepare images ERROR!" << std::endl;
    }

    // DMA the input to the GPU,  execute the batch asynchronously, and DMA it back:
    std::cout << "host2device" << std::endl;
    // input data , cpu copy to gpu memory.
    cudaMemcpyAsync(buffers[0], curInput.data(), bufferSize[0], cudaMemcpyHostToDevice, stream);

    // do inference
    std::cout << "execute" << std::endl;
    auto t_start = std::chrono::high_resolution_clock::now();
    context->execute(BATCH_SIZE, &buffers[0]);  // engine inference
    auto t_end = std::chrono::high_resolution_clock::now();
    float total_inf = std::chrono::duration<float, std::milli>(t_end - t_start).count();
    std::cout << "Inference take: " << total_inf << " ms." << std::endl;

    total_time += total_inf;
    std::cout << "execute success!" << std::endl;
    // std::cout << "outSize1 size:" << outSize1 << std::endl;
    // std::cout << "outSize2 size" << outSize2 << std::endl;
    auto r_start = std::chrono::high_resolution_clock::now();
    float *out1 = new float[outSize1 * BATCH_SIZE];
    float *out2 = new float[outSize2 * BATCH_SIZE];
    cudaMemcpyAsync(out1, buffers[1], bufferSize[1], cudaMemcpyDeviceToHost, stream);
    cudaMemcpyAsync(out2, buffers[2], bufferSize[2], cudaMemcpyDeviceToHost, stream);
    std::cout << "out1 type： " << typeid(out1).name() <<std::endl;
    std::cout << "out1 type size： " << sizeof(out1) <<std::endl;


    // [] -> [[]]，  14400 -> 90*160
    float out2_num[h_heat][w_heat];
    std::vector<cv::Point2f> heat_idx;
    std::vector<float> heat_value;  // all of value about heat value > threshold

    for (int i = 0; i < h_heat; i++)
    {
        for (int j = 0; j < w_heat; j++)
        {
            out2_num[i][j] = out2[i*w_heat + j];
            // if heat value > threshold, then keep heat value's (x, y)
            if(out2_num[i][j] > obj_threshold){
                heat_idx.push_back(cv::Point2f(i, j));
                heat_value.push_back(out2_num[i][j]);
                }
        }
    }
    // std::cout<<"out1_num[0][1]:"<<out2_num[0][1]<<std::endl;
    // std::cout<<"heat_idx point numbers:"<<heat_idx.size()<<std::endl;
    // std::cout<<"heat_value heat point value numbers: "<<heat_value.size()<<std::endl;

    // [] -> [[[]]], 28800 -> 90*160*2
    cv::Point2f out1_num[h_heat][w_heat];
    std::vector<cv::Point2f> reg_value;
    int heatpoint_size = heat_idx.size();
    if(heat_idx.size() > 0){

        for (int i = 0; i < h_heat; i++)
        {
            for (int j = 0; j < w_heat; j++)
            {   
                // save (w, h) between 0 and 1 is relative to origin image
                out1_num[i][j] = cv::Point2f((out1[i*w_heat*2 + j*2]) / (IMAGE_WIDTH / 8), \
                (out1[i*w_heat*2 + j*2+1]) / (IMAGE_HEIGHT / 8));
                // select the corresponding coordinate position of accord with heat value
                for (int k = 0; k < heatpoint_size; k++)
                {
                    if(i==heat_idx[k].x && j==heat_idx[k].y){
                        reg_value.push_back(out1_num[i][j]);
                    }
                }
            }
        }
        std::cout<<"reg_value numbers:"<<reg_value.size()<<std::endl;
    }

    std::vector<cv::Point2f> final_idx;
    if(heat_idx.size() > 0){
        // save (x, y) between 0 and 1 is relative to origin image
        for (int i = 0; i < heat_idx.size(); i++)
        {
            final_idx.push_back(cv::Point2f((heat_idx[i].x + reg_value[i].x) / (IMAGE_WIDTH / 8),\
            (heat_idx[i].y + reg_value[i].y) / (IMAGE_HEIGHT / 8)));
        }
    }

    // // no Nms all of point
    // if(final_idx.size() > 0) {
    //     for (int i = 0; i < final_idx.size(); i++)
    //     {   // circle(img, (x, y), 2, (0, 0, 255), 3)
    //         cv::Point p(int(final_idx[i].y * 720), int(final_idx[i].x * 1280));
    //         cv::circle(src_img, p, 2, (0, 0, 255), 3);           
    //     }
        
    //     cv::imshow("detect result", src_img);
    //     cv::imwrite("/home/xag/code/trt_code/cotton_detect/tensorRT_CPP_detect.jpg", src_img);
    //     cv::waitKey();
    // }
    
    //  Nms preprocess
    std::vector<DetectRes> result;
    for (int i = 0; i < final_idx.size(); i++)
    {
        DetectRes box;
        box.prob = heat_value[i];
        if (box.prob < obj_threshold) {
            continue;
        }
        box.classes = 1;
        box.x = final_idx[i].x;
        box.y = final_idx[i].y;
        // set w and h
        box.w = float(80) / IMAGE_WIDTH;
        box.h = float(80) / IMAGE_HEIGHT;
        result.push_back(box);
    }

    // // no Nms, drawing
    // if(result.size() > 0) {
    //     for (int i = 0; i < result.size(); i++)
    //     {   // circle(img, (x, y), 2, (0, 0, 255), 3)

    //         cv::Point p(int(result[i].y * 720), int(result[i].x * 1280));
    //         int l_x = int(result[i].y * 720) - int(result[i].h * 720)/2;
    //         int l_y = int(result[i].x * 1280) - int(result[i].w * 1280)/2;
    //         int r_x = int(result[i].y * 720) + int(result[i].h * 720)/2;
    //         int r_y = int(result[i].x * 1280) + int(result[i].w * 1280)/2;
    //         std::cout << "int(result[i].h * 720)is "<< int(result[i].h * 720) << std::endl;
    //         std::cout << "int(result[i].w * 1280)is "<< int(result[i].w * 1280) << std::endl;
    //         cv::rectangle(src_img, cv::Point(l_x, l_y),cv::Point(r_x, r_y), cv::Scalar(0, 0, 255), 3, 8, 0);          
    //     }
        
    //     cv::imshow("detect result", src_img);
    //     cv::imwrite("/home/xag/code/trt_code/cotton_detect/tensorRT_CPP_detect.jpg", src_img);
    //     cv::waitKey();
    // }

    // Nms start
    NmsDetect2(result);

    std::vector<cv::Point2f> result_object;
    if(result.size() > 0) {
        for (int i = 0; i < result.size(); i++)
        {
            int center_x = int(result[i].y * IMAGE_HEIGHT); // get x coordinate
            int center_y = int(result[i].x * IMAGE_WIDTH);
            result_object.push_back(cv::Point(center_x, center_y));

            // drawing bbox
            cv::circle(src_img, cv::Point(center_x, center_y), 2, cv::Scalar(0, 0, 255), 3); 
        }

        //cv::imshow("detect result", src_img);
        std::cout << "find object numbers:" << result.size() << std::endl;
    }

    

    // std::map<int, std::vector<int>> result_object;
    // // Nms left and right point save, and drawing
    // if(result.size() > 0) {
    //     for (int i = 0; i < result.size(); i++)
    //     {   
    //         cv::Point p(int(result[i].y * IMAGE_HEIGHT), int(result[i].x * IMAGE_WIDTH));
    //         int l_x = int(result[i].y * IMAGE_HEIGHT) - int(result[i].h * IMAGE_HEIGHT)/2;
    //         int l_y = int(result[i].x * IMAGE_WIDTH) - int(result[i].w * IMAGE_WIDTH)/2;
    //         int r_x = int(result[i].y * IMAGE_HEIGHT) + int(result[i].h * IMAGE_HEIGHT)/2;
    //         int r_y = int(result[i].x * IMAGE_WIDTH) + int(result[i].w * IMAGE_WIDTH)/2;
    //         std::vector<int> result_object_add;
    //         result_object_add.push_back(l_x);
    //         result_object_add.push_back(l_y);
    //         result_object_add.push_back(r_x);
    //         result_object_add.push_back(r_y);
    //         result_object_add.push_back(result[i].prob);
    //         result_object[i] = result_object_add;
    //         // std::cout << "int(result[i].h * IMAGE_WIDTH) is "<< int(result[i].h * IMAGE_HEIGHT) << std::endl;
    //         // std::cout << "int(result[i].w * IMAGE_WIDTH) is "<< int(result[i].w * IMAGE_WIDTH) << std::endl;

    //         // drawing bbox
    //         cv::circle(src_img, cv::Point(int(result[i].y * IMAGE_HEIGHT), int(result[i].x * IMAGE_WIDTH)), 2, cv::Scalar(0, 0, 255), 3); 
    //         cv::rectangle(src_img, cv::Point(l_x, l_y),cv::Point(r_x, r_y), cv::Scalar(0, 0, 255), 3, 8, 0);

    //     }
        
    //     cv::imshow("detect result", src_img);
    //     // cv::imwrite("/home/xag/code/trt_code/cotton_detect/tensorRT_CPP_detect.jpg", src_img);
    //     std::cout << "find object numbers:" << result.size() << std::endl;
    //     // cv::waitKey(0);
        
    // }

    cudaStreamSynchronize(stream);

    delete[] out1;
    delete[] out2;

    return result_object;
}

// HWC to CHW
std::vector<float> Cotton2::prepareImage_single2(cv::Mat &img) {

    cv::Mat img_float;

    img.convertTo(img_float, CV_32FC3,1.);

    //HWC TO CHW
    std::vector<cv::Mat> input_channels(INPUT_CHANNEL);
    cv::split(img_float, input_channels);

    std::vector<float> result(IMAGE_HEIGHT*IMAGE_WIDTH*INPUT_CHANNEL);
    auto data = result.data();
    int channelLength = IMAGE_HEIGHT * IMAGE_WIDTH;

    for (int i = 0; i < INPUT_CHANNEL; ++i) {
        memcpy(data, input_channels[i].data, channelLength*sizeof(float));
        data += channelLength;
    }
    
    return result;
}

void Cotton2::NmsDetect2(std::vector<DetectRes> &detections) {

    sort(detections.begin(), detections.end(), [=](const DetectRes &left, const DetectRes &right) {
        return left.prob > right.prob;
    });

    for (int i = 0; i < (int)detections.size(); i++) {
        for (int j = i + 1; j < (int)detections.size(); j++)
        {
            if (detections[i].classes == detections[j].classes)
            {
                float iou = IOUCalculate2(detections[i], detections[j]);
                // std::cout<<"IOU value is：" <<iou<< std::endl;
                
                if (iou > nms_threshold)
                    detections[j].prob = 0;
            }
        }
    }
    detections.erase(std::remove_if(detections.begin(), detections.end(), [](const DetectRes &det)
    { return det.prob == 0; }), detections.end());
}

float Cotton2::IOUCalculate2(const Cotton2::DetectRes &det_a, const Cotton2::DetectRes &det_b) {
    cv::Point2f center_a(det_a.x, det_a.y);
    cv::Point2f center_b(det_b.x, det_b.y);
    cv::Point2f left_up(std::min(det_a.x - det_a.w / 2, det_b.x - det_b.w / 2),
                        std::min(det_a.y - det_a.h / 2, det_b.y - det_b.h / 2));
    cv::Point2f right_down(std::max(det_a.x + det_a.w / 2, det_b.x + det_b.w / 2),
                           std::max(det_a.y + det_a.h / 2, det_b.y + det_b.h / 2));
    float distance_d = (center_a - center_b).x * (center_a - center_b).x + (center_a - center_b).y * (center_a - center_b).y;
    float distance_c = (left_up - right_down).x * (left_up - right_down).x + (left_up - right_down).y * (left_up - right_down).y;
    float inter_l = det_a.x - det_a.w / 2 > det_b.x - det_b.w / 2 ? det_a.x - det_a.w / 2 : det_b.x - det_b.w / 2;
    float inter_t = det_a.y - det_a.h / 2 > det_b.y - det_b.h / 2 ? det_a.y - det_a.h / 2 : det_b.y - det_b.h / 2;
    float inter_r = det_a.x + det_a.w / 2 < det_b.x + det_b.w / 2 ? det_a.x + det_a.w / 2 : det_b.x + det_b.w / 2;
    float inter_b = det_a.y + det_a.h / 2 < det_b.y + det_b.h / 2 ? det_a.y + det_a.h / 2 : det_b.y + det_b.h / 2;
    if (inter_b < inter_t || inter_r < inter_l)
        return 0;
    float inter_area = (inter_b - inter_t) * (inter_r - inter_l);
    float union_area = det_a.w * det_a.h + det_b.w * det_b.h - inter_area;
    if (union_area == 0)
        return 0;
    else
        return inter_area / union_area - distance_d / distance_c;
}

float Cotton2::sigmoid2(float in){
    return 1.f / (1.f + exp(-in));
}
