#include "deepdetector/DeepDetector.hpp"
#ifdef BACKEND_TRT
#include "deepdetector/TRTBackEnd.hpp"
#endif
#ifdef BACKEND_OV
#include "deepdetector/OVBackEnd.hpp"
#endif


namespace wmj
{
    DeepDetector::DeepDetector()
    {
        // 弃用
        // cv::FileStorage file_param(DEEP_CFG, cv::FileStorage::READ);
        // setParam(file_param);
        // init();
    }

    DeepDetector::DeepDetector(const DetectorParams &params)
    {
        #ifdef BACKEND_TRT
        m_backend = std::make_shared<TRTBackend>(params);
        #endif
        #ifdef BACKEND_OV
        m_backend = std::make_shared<OVBackend>(params);
        #endif

        auto input_size = m_backend -> getInputSize();
        input_width = input_size.width;
        input_height = input_size.height;
        objectLength = m_backend -> objectLength;
        std::cout << "!!!!!!input: "<< input_width << "x" << input_height << "objectLength: " << objectLength << std::endl;

        setParam(params);
        this -> m_params = params;
        init();
    }

    DeepDetector::~DeepDetector()
    {
        // delete[] m_prob;
        // cudaStreamDestroy(m_stream);

        // cudaFree(m_buffers[outputIndex]);
        // cudaFree(m_buffers[inputIndex]);
        // cudaFree(m_gpu_img);

        // delete m_context;
        // delete m_engine;
        // // m_engine->destroy();
    }

    // DeepDetector::DeepDetector(const std::string xml, const std::string bin)
    // {
    //     init(xml, bin);
    // }

    // 弃用
    // void DeepDetector::setParam(cv::FileStorage fs)
    // {
    //     fs["DeepDetector"]["USE_DEEP"] >> Use_Deep;
    //     fs["DeepDetector"]["DEVICE"] >> DEVICE;
    //     fs["DeepDetector"]["Lightratio"] >> m_Lightratio;
    //     fs["DeepDetector"]["nms_threshold"] >> m_nmsThreshold;
    //     fs["DeepDetector"]["conf_threshold"] >> m_confThreshold;

    //     fs["DeepDetector"]["model_name"] >> m_model_file;
    //     fs["DeepDetector"]["input_precision"] >> m_input_precision;
    //     fs["DeepDetector"]["model_type"] >> m_model_type;
    //     fs["DeepDetector"]["ncolor"] >> m_ncolor;
    //     fs["DeepDetector"]["ntype"] >> m_ntype;

    //     fs["DeepDetector"]["ntag"] >> m_ntag;
    //     fs["DeepDetector"]["model_format"] >> m_model_format;
    //     fs["DeepDetector"]["debug"] >> m_debug; 
    //     fs["DeepDetector"]["enemy_color"] >> m_enemy_color; 
    //     fs.release();

    // }

    void DeepDetector::setParam(const DetectorParams params)
    {
        m_enemy_color = params.enemyColor;
        DEVICE = params.DEVICE;
        m_Lightratio = params.lightRatio;
        m_nmsThreshold = params.nmsThreshold;
        m_confThreshold = params.confThreshold;
        m_model_file = params.modelName;
        m_model_format = params.modelFormat;
        m_model_type = params.modelType;
        m_input_precision = params.inputPrecision;
        m_ncolor = params.nColor;
        m_ntype = params.nType;
        m_ntag = params.nTag;
        m_debug = params.debug;
    }

    void DeepDetector::init()
    {
        input_mat = cv::Mat(input_height, input_width, CV_8UC3, cv::Scalar(105, 105, 105));

        m_label2id = {{0, 7}, {1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}, {6, 11}, {7, 8}, {8, 8}};
        if (m_model_type == "x" || m_model_type == "v8")
        {
            //v8为640X640
            if(m_model_type == "v8")
            {
                strides = {8, 16};
            }
            else if(m_model_type == "x")
            {
                strides = {16, 32};
            }
            generate_grids_and_stride(input_width, input_height, strides, grid_strides);
        }
        return;
    }

    cv::Size2i DeepDetector::getInputSize()
    {
        return cv::Size2i(input_width, input_height);
    }

    // bool DeepDetector::MultiThreadDetectTRT(cv::Mat &frame, cv::Rect2d &roi, cudaStream_t &stream, IExecutionContext* &context, NppStreamContext &nppCtx,void* buffers[], void* gpu_img_ptr, float* prob, Armors *result)
    // {
    //     double t1 = wmj::now();
    //     cv::Mat input_mat = input_preprocess(frame, roi);
    //     // float* blob= blobFromImage(input_mat);
    //     double t2 = wmj::now();
    //     // std::cout << "pre " << t2 - t1 << std::endl;
    //     doInference(*context, input_mat.data, prob, buffers, gpu_img_ptr, nppCtx, stream);
    //     std::vector<bbox_t> bboxes;
    //             double t3 = wmj::now();
    //     // std::cout << "infer " << t3 - t2 << std::endl;

    //     decode_YOLOX_outputs(prob, bboxes, input_width, input_height);
    //             double t4 = wmj::now();
    //     // std::cout << "post " << t4 - t3 << std::endl;

    //     *result = bbox2Armor(bboxes);
    //     // delete blob;
    //     return true;
    // }

    // bool DeepDetector::DeepDetectDouble(cv::Mat &left_frame, cv::Mat &right_frame, cv::Rect2d &left_roi, cv::Rect2d &right_roi)
    // {
    //     m_left_armors.clear();
    //     m_right_armors.clear();

    //     std::thread threadRight(
    //         std::bind(&DeepDetector::MultiThreadDetectTRT, this, right_frame, right_roi, m_stream_right, m_context_right, nppCtx_r, m_right_buffers, m_right_gpu_img, m_prob_right,&m_right_armors));
        
    //     MultiThreadDetectTRT(left_frame,left_roi,m_stream_left,m_context_left, nppCtx_l, m_left_buffers, m_left_gpu_img, m_prob_left,&m_left_armors);
    //     threadRight.join();

    //     bool m_true_detected = m_left_armors.size() || m_right_armors.size();
    //     // free(blob);
    //     return m_true_detected;

    // }


    bool DeepDetector::DeepDetectSingle(cv::Mat &frame, cv::Rect2d &roi)
    {        
        m_src = frame;
        m_roi = roi;
        m_armors.clear();
        double t1 = wmj::now();
        input_mat = input_preprocess(frame, roi);
        double t2 = wmj::now();
        bool infer = m_backend->doInference(input_mat);
        std::vector<bbox_t> bboxes;
        double t3 = wmj::now();
        
        //yoloX
        decode_YOLO_outputs(m_backend->getOutput(), bboxes, input_width, input_height);
        double t4 = wmj::now();
        if(m_debug)
        {
            std::cout << "pre " << t2 - t1 << std::endl;
            std::cout << "infer " << t3 - t2 << std::endl;
            std::cout << "get " << t3 - t2 << std::endl;
            std::cout << "post " << t4 - t3 << std::endl;
        }
        m_armors = bbox2Armor(bboxes);
        m_bboxes = bboxes;
        return true;
    }

    cv::Mat DeepDetector::input_preprocess(cv::Mat &frame, cv::Rect2d &ROI)
    {
        if (ROI.height != input_height || ROI.width != input_width)
        {
            m_resize_scale = (input_height / ROI.height) > (input_width / ROI.width) ? (input_height / ROI.height):(input_width / ROI.width) ;
        }
        else
            m_resize_scale = 1;
        cv::Mat roi_mat = frame(ROI);
        if(m_resize_scale < 1)
        {
            std::cout << "resize:" << m_resize_scale << std::endl;
            cv::resize(roi_mat, roi_mat, {(int)round(roi_mat.cols * m_resize_scale), (int)round(roi_mat.rows * m_resize_scale)});
        }
        else
            m_resize_scale = 1;
        cv::Mat resize_img = cv::Mat(input_height, input_width, CV_8UC3, cv::Scalar(105, 105, 105));
        roi_mat.copyTo(resize_img(cv::Rect(0, 0, roi_mat.cols, roi_mat.rows)));
        return resize_img;
    }

    Armors DeepDetector::bbox2Armor(std::vector<bbox_t> &bboxes)
    {
        Armors armors;
        for (int i = 0; i < bboxes.size(); i++)
        {
            Armor armor;

            armor.m_color = wmj::_COLOR(bboxes[i].color_id);
            armor.m_armor_type = wmj::ARMORTYPE(bboxes[i].type_id + 1);

            armor.m_vertices.reserve(4);

            for (int j = 0; j < 4; j++)
            {

                armor.m_vertices.emplace_back((bboxes[i].pts[j] + cv::Point2f(m_roi.tl())));
            }

            armor.m_pairs.reserve(2);

            //************ 装甲板灯条构造（暂未使用） ************//
            Light left, right;
            left.m_center = cv::Point2f((armor.m_vertices[0].x + armor.m_vertices[1].x) / 2, (armor.m_vertices[0].y + armor.m_vertices[1].y) / 2);
            right.m_center = cv::Point2f((armor.m_vertices[2].x + armor.m_vertices[3].x) / 2, (armor.m_vertices[2].y + armor.m_vertices[3].y) / 2);
            left.m_length = getDistance(armor.m_vertices[1], armor.m_vertices[0]);
            right.m_length = getDistance(armor.m_vertices[2], armor.m_vertices[3]);
            left.m_width = left.m_length / m_Lightratio;
            right.m_width = right.m_length / m_Lightratio;
            left.m_ratio = m_Lightratio;
            right.m_ratio = m_Lightratio;
            left.m_angle = std::atan2((armor.m_vertices[1].y - armor.m_vertices[0].y), (armor.m_vertices[1].x - armor.m_vertices[0].x)) * 180 / PI - 90;
            right.m_angle = std::atan2((armor.m_vertices[2].y - armor.m_vertices[3].y), (armor.m_vertices[2].x - armor.m_vertices[3].x)) * 180 / PI - 90;

            left.m_rect = cv::RotatedRect(left.m_center, cv::Size(left.m_width * 1.4, left.m_length * 1.4), left.m_angle);
            right.m_rect = cv::RotatedRect(right.m_center, cv::Size(right.m_width * 1.4, right.m_length * 1.4), right.m_angle);

            left.regularRect(left.m_rect);
            right.regularRect(right.m_rect);

            armor.m_pairs[0] = left;
            armor.m_pairs[1] = right;
            //************ 装甲板灯条构造（暂未使用） ************//

            // 计算装甲板中心
            armor.m_center = cv::Point2f((left.m_center.x + right.m_center.x) / 2,
                                         (left.m_center.y + right.m_center.y) / 2);
            armor.m_width = getDistance(left.m_center, right.m_center);                   // 横向长度
            armor.m_height = (left.m_length + right.m_length) / 2;                        // 纵向长度
            armor.m_ratio = armor.m_width / armor.m_height;                               // 长宽比

            // armor.m_armor_type = armor.m_ratio > 3 ? wmj::ARMOR_LARGE : wmj::ARMOR_SMALL; // 装甲板分类

            armor.m_id = m_label2id[bboxes[i].tag_id];
            armor.m_rect = cv::Rect2d(armor.m_vertices[0], cv::Size2d{armor.m_width, armor.m_height});
            armor.m_rect &= cv::Rect2d(cv::Point2d(0, 0), cv::Point2d(1280, 1024));

            armors.emplace_back(armor);
        }
        return armors;
    }

    void DeepDetector::generate_grids_and_stride(const int target_w, const int target_h, std::vector<int> &strides, std::vector<GridAndStride> &grid_strides)
    {
        //存储每个网点所代表的网格坐标，步长
        for (auto stride : strides)
        {
            int num_grid_w = target_w / stride;
            int num_grid_h = target_h / stride;
            for (int g1 = 0; g1 < num_grid_h; g1++)
            {
                for (int g0 = 0; g0 < num_grid_w; g0++)
                {
                    grid_strides.push_back((GridAndStride){g0, g1, stride});
                }
            }
        }
    }

    void DeepDetector::decode_YOLO_outputs(const float *prob, std::vector<bbox_t> &objects, const int img_w, const int img_h)
    {
        std::vector<bbox_t> proposals;
        // std::cout << "m_params.modelType   " << m_params.modelType << std::endl;

        if(m_params.modelType == "v8")
        {
            generate_yolov8_proposals(grid_strides, prob, m_confThreshold, proposals);
        }
        else
        {
            generate_yolox_proposals(grid_strides, prob, m_confThreshold, proposals);
        }

        // std::cout <<"proposal: " << proposals.size() << std::endl;
        //proposals是传出来可能的anchor

        qsort_descent_inplace(proposals);
        //根据置信度排序
        std::vector<int> picked;

        nms_sorted_bboxes(proposals, picked, m_nmsThreshold);
        int count = picked.size();
        // std::cout <<"count: " << count << std::endl;

        //objects.resize(count);
        for (int i = 0; i < count; i++)
        {
            if (proposals[picked[i]].color_id == m_enemy_color || proposals[picked[i]].color_id == wmj::_COLOR::_WHITE)
            objects.push_back(proposals[picked[i]]);
        }

    }

    void DeepDetector::generate_yolox_proposals(std::vector<GridAndStride> grid_strides, const float *feat_ptr, float prob_threshold, std::vector<bbox_t> &objects)
    {
        const int num_anchors = grid_strides.size();
        // std::cout << "ObjLength" << objectLength << std::endl;
        // std::cout << "num_anchors " << num_anchors << std::endl;
        for (int anchor_idx = 0; anchor_idx < num_anchors; anchor_idx++)
        {
            const int grid0 = grid_strides[anchor_idx].grid0;
            const int grid1 = grid_strides[anchor_idx].grid1;
            const int stride = grid_strides[anchor_idx].stride;

            const int basic_pos =  anchor_idx * objectLength; //相当于是第几组数据
            
        // yolox/models/yolo_head.py decode logic
        //  outputs[..., :2] = (outputs[..., :2] + grids) * strides
        //  outputs[..., 2:4] = torch.exp(outputs[..., 2:4]) * strides
        float x_center = (feat_ptr[basic_pos + 0] + grid0) * stride;

        float y_center = (feat_ptr[basic_pos + 1] + grid1) * stride;

        cv::Point2f _pts[4];//这里存储的是回归出来的点
        for (int i = 0; i < 4; i++)
        {
            _pts[i].x = (feat_ptr[basic_pos + i * 2 + 5] + grid0) * stride;
            _pts[i].y = (feat_ptr[basic_pos + i * 2 + 6] + grid1) * stride;
        }

        float w = exp(feat_ptr[basic_pos + 2]) * stride;
        float h = exp(feat_ptr[basic_pos + 3]) * stride;
        float x0 = x_center - w * 0.5f;
        float y0 = y_center - h * 0.5f;

        int tag_id;
        //这个是box置信度
        float box_objectness = feat_ptr[basic_pos + 4];
        //这个是最大的种类置信度(8个)
        float box_cls_score = maxclass(&feat_ptr[basic_pos + 13 + m_ncolor + m_ntype], m_ntag, tag_id);

        float box_prob = box_objectness * box_cls_score;

        if (box_prob > prob_threshold)
        {
            bbox_t obj;
            obj.color_conf = maxclass(&feat_ptr[basic_pos + 13], m_ncolor, obj.color_id);
            obj.type_conf = maxclass(&feat_ptr[basic_pos + 13 + m_ncolor], m_ntype, obj.type_id);
            // obj.color_id = feat_ptr[basic_pos + 13] > feat_ptr[basic_pos + 14] ? 1 : 0;
            //    switch (obj.color_id)
            //    {
            //    case 0:
            //        obj.color_id = wmj::_COLOR::_BLUE;
            //        break;
            //    case 1:
            //        obj.color_id = wmj::_COLOR::_RED;
            //        break;
            //    case 2:
            //    default:
            //        obj.color_id = wmj::_COLOR::_WHITE;
            //        break;
            //    }
	    //if (obj.color_id != m_enemy_color && obj.color_id != wmj::_COLOR::_WHITE)
            //    continue;
            obj.rect.x = x0 / m_resize_scale;
            obj.rect.y = y0 / m_resize_scale;
            obj.rect.width = w / m_resize_scale;
            obj.rect.height = h / m_resize_scale;
            for (int i = 0; i < 4; i++)
                obj.pts[i] = _pts[i] / m_resize_scale;
            obj.conf = box_prob * 0.3 + obj.color_conf * 0.5 + obj.type_conf * 0.2;
            obj.tag_id = tag_id;
            objects.push_back(obj);
            }
        } // point anchor loop
    }

    void DeepDetector::generate_yolov8_proposals(std::vector<GridAndStride> grid_strides, const float *feat_ptr, float prob_threshold, std::vector<bbox_t> &objects)
    {
        const int num_anchors = grid_strides.size();
        // std::cout << "ObjLength " << objectLength << std::endl;
        // std::cout << "num_anchors " << num_anchors << std::endl;
        for (int anchor_idx = 0; anchor_idx < num_anchors; anchor_idx++)
        {
        const int basic_pos =  anchor_idx * objectLength; //相当于是第几组数据
            
        float x_center = feat_ptr[basic_pos + 0];
        
        float y_center = feat_ptr[basic_pos + 1];

        // std::cout << "The Center Is" << x_center << "+" << y_center << std::endl;

        int tag_id;

        //这个是最大的种类置信度(8个),判断最大值能不能通过
        bbox_t obj;
        
        obj.color_conf = maxclass(&feat_ptr[basic_pos + 4], m_ncolor, obj.color_id);//从第八个开始

        if(obj.color_conf < 0.5)
            continue;
        if(m_debug)
            std::cout << "*******************   "<< obj.color_id << "and  "<<obj.color_conf <<std::endl;


        obj.type_conf = maxclass(&feat_ptr[basic_pos + 8], m_ntype, obj.type_id);//从第十个开始
        float box_cls_score = maxclass(&feat_ptr[basic_pos + 10], m_ntag, tag_id); //抛开开始的四个框坐标，从第五个开始
        
        // std::cout << obj.type_conf << "and  "<< box_cls_score <<std::endl;
        if(box_cls_score < prob_threshold || obj.type_conf < prob_threshold)
            continue;

        cv::Point2f _pts[4];//这里存储的是回归出来的点
        for (int i = 0; i < 4; i++)
        {
            _pts[i].x = (feat_ptr[basic_pos + i * 2 + 18] ) ;
            _pts[i].y = (feat_ptr[basic_pos + i * 2 + 19] ) ;
        }
        //yolov8 前四位为 x，y，w，h
        float w = feat_ptr[basic_pos + 2];
        float h = feat_ptr[basic_pos + 3];
        float x0 = x_center;
        float y0 = y_center;
        if (obj.color_id != m_enemy_color && obj.color_id != wmj::_COLOR::_WHITE)
            continue;

        obj.rect.x = x0 / m_resize_scale;
        obj.rect.y = y0 / m_resize_scale;
        obj.rect.width = w / m_resize_scale;
        obj.rect.height = h / m_resize_scale;
        for (int i = 0; i < 4; i++)
            obj.pts[i] = _pts[i] / m_resize_scale;
        obj.conf = (obj.color_conf*0.1 + box_cls_score*0.3 + obj.type_conf*0.6); //总置信度等于三个置信度求平均
        obj.tag_id = tag_id;
        objects.push_back(obj);
        // std::cout << "Start"<< std::endl;
        // for(int j = 0;j < 26;j++)
        // {
        //     std::cout << feat_ptr[basic_pos + j] << std::endl;
        // }
 
        } // point anchor loop
        
    }

    void DeepDetector::nms_sorted_bboxes(const std::vector<bbox_t> &faceobjects, std::vector<int> &picked, float nms_threshold)
    {
        //faceobjects 前面赛选出来的符合的结果
        picked.clear();

        const int n = faceobjects.size();

        std::vector<float> areas(n);
        for (int i = 0; i < n; i++)
        {
            areas[i] = faceobjects[i].rect.area();
        }

        for (int i = 0; i < n; i++)
        {
            const bbox_t &a = faceobjects[i];

            int keep = 1;
            for (int j = 0; j < (int)picked.size(); j++)
            {
                const bbox_t &b = faceobjects[picked[j]];

                // intersection over union
                float inter_area = intersection_area(a, b);
                float union_area = areas[i] + areas[picked[j]] - inter_area;
                // float IoU = inter_area / union_area
                if (inter_area / union_area > nms_threshold)
                    keep = 0;
            }

            if (keep)
            {
                picked.push_back(i);
            }
                
        }
    }
//debug
    void DeepDetector::DebugOutput()
    {
        std::cout << _yellow("***************DeepDetector Debug Output***************") << std::endl;
        cv::imshow("input_mat", input_mat);
        std::cout << _lightcyan("Detected Armor number: ") << m_bboxes.size() << std::endl;
        // draw roi
        cv::rectangle(m_src, m_roi, cv::Scalar(255), 2);

        // draw box
        for (auto bbox : m_bboxes)
        {
            cv::line(m_src, bbox.pts[0] + cv::Point2f(m_roi.tl()), bbox.pts[2] + cv::Point2f(m_roi.tl()), cv::Scalar(255, 255, 255), 2);
            cv::line(m_src, bbox.pts[1] + cv::Point2f(m_roi.tl()), bbox.pts[3] + cv::Point2f(m_roi.tl()), cv::Scalar(255, 255, 255), 2);
            cv::putText(m_src, "ID: " + std::to_string(m_label2id[bbox.tag_id]),
                        bbox.pts[1] + cv::Point2f(m_roi.tl()) + cv::Point2f(0, 30), cv::FONT_HERSHEY_COMPLEX, 1, cv::Scalar(0, 255, 255), 1);
            cv::putText(m_src, "conf: " + std::to_string(bbox.conf),
                        bbox.pts[1] + cv::Point2f(m_roi.tl()) + cv::Point2f(0, 60), cv::FONT_HERSHEY_COMPLEX, 1, cv::Scalar(0, 255, 255), 1);
            cv::putText(m_src, "color: " + std::to_string(bbox.color_id),
                        bbox.pts[1] + cv::Point2f(m_roi.tl()) + cv::Point2f(0, 90), cv::FONT_HERSHEY_COMPLEX, 1, cv::Scalar(0, 255, 255), 1);
            cv::putText(m_src, "color_conf: " + std::to_string(bbox.color_conf),
                        bbox.pts[1] + cv::Point2f(m_roi.tl()) + cv::Point2f(0, 120), cv::FONT_HERSHEY_COMPLEX, 1, cv::Scalar(0, 255, 255), 1);
            cv::putText(m_src,"type: " + std::to_string(bbox.type_id),
                        bbox.pts[1] + cv::Point2f(m_roi.tl()) + cv::Point2f(0,150), cv::FONT_HERSHEY_COMPLEX, 1, cv::Scalar(0, 255, 255), 1);
        }

        cv::imshow("result", m_src);
        cv::waitKey(1);
        //  std::cout << _yellow("***********************Debug END***********************") << std::endl;
        return;
    }

}
