#include "RosDetect.h"

RosDetect::RosDetect() : it(nh)
{
    pub_raw_image = it.advertise("/classifier/image_raw_copy", 1);                                 //原图
    pub_colored_image = it.advertise("/classifier/image_colored", 1);                              //语义图
    pub_depth_image = it.advertise("/classifier/image_depth_copy", 1);                             //深度图
    sub_raw_image = it.subscribe("/camera/rgb/image_color", 1, &RosDetect::InferenceAndPub, this); //从Kinect接收图像

    if (true) //载入数据集标签
    {
        std::ifstream inputFile(class_name);
        std::string label;
        int label_counter = 0;
        cv::RNG rng(time(0)); //生成随机颜色
        while (std::getline(inputFile, label))
        {
            color_labels[label] = cv::Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
            labels.push_back(label);
            label_counter++;
        }
        if (labels.empty())
            throw std::logic_error("File empty or not found: ");
    }

    color_labels["toy"] = cv::Scalar(0, 0, 255);     //玩具为红色
    color_labels["bicycle"] = cv::Scalar(255, 0, 0); //玩具为红色

    network = core.ReadNetwork(net_work);         //读取网络
    this->input_info = network.getInputsInfo();   //获取输入信息用于下面设置
    this->output_info = network.getOutputsInfo(); //获取输出信息用于下面设置
    input_name = (this->input_info).begin()->first;

    for (auto &item : input_info) //设置输入信息
    {
        auto input_data = item.second;
        input_data->setPrecision(Precision::U8);
        input_data->setLayout(Layout::NCHW);
        input_data->getPreProcess().setResizeAlgorithm(RESIZE_BILINEAR);
        input_data->getPreProcess().setColorFormat(ColorFormat::RGB);
    }

    for (auto &item : output_info) //设置输出信息
    {
        auto output_data = item.second;
        output_data->setPrecision(Precision::FP32);
        output_data->setLayout(Layout::NCHW);
    }

    if (auto ngraphFunction = network.getFunction())
    {
        for (const auto op : ngraphFunction->get_ops())
        {
            auto outputLayer = output_info.find(op->get_friendly_name());
            if (outputLayer != output_info.end())
            {
                auto regionYolo = std::dynamic_pointer_cast<ngraph::op::RegionYolo>(op);
                if (!regionYolo)
                {
                    throw std::runtime_error("Invalid output type: " +
                                             std::string(regionYolo->get_type_info().name) + ". RegionYolo expected");
                }
                yoloParams[outputLayer->first] = YoloParams(regionYolo);
            }
        }
    }

    // executable_network = core.LoadNetwork(network, "MULTI", {{"MULTI_DEVICE_PRIORITIES", "HDDL,CPU"}}); //把网络加载进设备
    executable_network = core.LoadNetwork(network, device_name); //把网络加载进设备
}

RosDetect::RosDetect(std::string para) : it(nh)
{
    this->class_name = "/home/user/Documents/yolov3_tiny_bike.txt";
    this->net_work = "/home/user/Desktop/frozen_darknet_yolov3_model_yolov3-big.xml";
    this->device_name = "HDDL";
    this->inference_num = 20;

    pub_raw_image = it.advertise("/classifier/image_raw_copy", 1);                                 //原图
    pub_colored_image = it.advertise("/classifier/image_colored", 1);                              //语义图
    pub_depth_image = it.advertise("/classifier/image_depth_copy", 1);                             //深度图
    sub_raw_image = it.subscribe("/camera/rgb/image_color", 1, &RosDetect::InferenceAndPub, this); //从Kinect接收图像

    if (true) //载入数据集标签
    {
        std::ifstream inputFile(class_name);
        std::string label;
        int label_counter = 0;
        cv::RNG rng(time(0)); //生成随机颜色
        while (std::getline(inputFile, label))
        {
            color_labels[label] = cv::Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
            labels.push_back(label);
            label_counter++;
        }
        if (labels.empty())
            throw std::logic_error("File empty or not found: ");
    }

    color_labels["toy"] = cv::Scalar(0, 0, 255);     //玩具为红色
    color_labels["bicycle"] = cv::Scalar(255, 0, 0); //玩具为红色

    network = core.ReadNetwork(net_work);         //读取网络
    this->input_info = network.getInputsInfo();   //获取输入信息用于下面设置
    this->output_info = network.getOutputsInfo(); //获取输出信息用于下面设置
    input_name = (this->input_info).begin()->first;

    for (auto &item : input_info) //设置输入信息
    {
        auto input_data = item.second;
        input_data->setPrecision(Precision::U8);
        input_data->setLayout(Layout::NCHW);
        input_data->getPreProcess().setResizeAlgorithm(RESIZE_BILINEAR);
        input_data->getPreProcess().setColorFormat(ColorFormat::RGB);
    }

    for (auto &item : output_info) //设置输出信息
    {
        auto output_data = item.second;
        output_data->setPrecision(Precision::FP32);
        output_data->setLayout(Layout::NCHW);
    }

    if (auto ngraphFunction = network.getFunction())
    {
        for (const auto op : ngraphFunction->get_ops())
        {
            auto outputLayer = output_info.find(op->get_friendly_name());
            if (outputLayer != output_info.end())
            {
                auto regionYolo = std::dynamic_pointer_cast<ngraph::op::RegionYolo>(op);
                if (!regionYolo)
                {
                    throw std::runtime_error("Invalid output type: " +
                                             std::string(regionYolo->get_type_info().name) + ". RegionYolo expected");
                }
                yoloParams[outputLayer->first] = YoloParams(regionYolo);
            }
        }
    }

    executable_network = core.LoadNetwork(network, "MULTI", {{"MULTI_DEVICE_PRIORITIES", "HDDL,CPU"}}); //把网络加载进设备
}

void RosDetect::InferenceAndPub(const sensor_msgs::ImageConstPtr &msg)
{

    if (this->inference_count < inference_num)
    {

        image_vec.push_back(cv::Mat());
        cv_bridge::toCvShare(msg, "bgr8")->image.copyTo(image_vec[inference_count]);
        header_vector.push_back(msg->header);
        // image_vec[inference_count] = cv_bridge::toCvShare(msg, "bgr8")->image;
        request_queue.push_back(executable_network.CreateInferRequestPtr());
        imgBlob = wrapMat2Blob(image_vec[inference_count]);
        request_queue[inference_count]->SetBlob(input_name, imgBlob);
        request_queue[inference_count]->StartAsync();
        inference_count++;
        return;
    }
    else if (this->inference_count == inference_num)
    {
        image_vec.push_back(cv::Mat());
        header_vector.push_back(msg->header);
        request_queue.push_back(executable_network.CreateInferRequestPtr());
        inference_count++;
    }

    cv_bridge::toCvShare(msg, "bgr8")->image.copyTo(image_vec.back());
    header_vector.back() = msg->header;

    cv::Mat image_colored(cv::Size(640, 480), CV_8UC3, cv::Scalar(255, 255, 255));

    imgBlob = wrapMat2Blob(image_vec.back());
    request_queue.back()->SetBlob(input_name, imgBlob);

    request_queue.back()->StartAsync();
    request_queue.front()->Wait(IInferRequest::WaitMode::RESULT_READY); //开始推理

    // ---------------------------Processing output blobs--------------------------------------------------
    // Processing results of the CURRENT request
    const TensorDesc &inputDesc = input_info.begin()->second.get()->getTensorDesc();
    unsigned long resized_im_h = getTensorHeight(inputDesc);
    unsigned long resized_im_w = getTensorWidth(inputDesc);
    std::vector<DetectionObject> objects;
    // Parsing outputs
    for (auto &output : output_info)
    {
        auto output_name = output.first;
        Blob::Ptr blob = request_queue.front()->GetBlob(output_name);
        ParseYOLOV3Output(yoloParams[output_name], output_name, blob, resized_im_h, resized_im_w, 480, 640, THRESOLD, objects);
    }
    // Filtering overlapping boxes
    std::sort(objects.begin(), objects.end(), std::greater<DetectionObject>());
    for (size_t i = 0; i < objects.size(); ++i)
    {
        if (objects[i].confidence == 0)
            continue;
        for (size_t j = i + 1; j < objects.size(); ++j)
            if (IntersectionOverUnion(objects[i], objects[j]) >= IOU_THRE)
                objects[j].confidence = 0;
    }
    // Drawing boxes
    for (auto &object : objects)
    {
        if (object.confidence < 0)
            continue;
        auto label = object.class_id;
        float confidence = object.confidence;
        if (confidence > CONFINDENCE_THRE)
        {
            /** Drawing only objects when >confidence_threshold probability **/
            std::ostringstream conf;
            conf << ":" << std::fixed << std::setprecision(3) << confidence;
            cv::putText(image_vec.front(),
                        (!labels.empty() ? labels[label] : std::string("label #") + std::to_string(label)) + conf.str(),
                        cv::Point2f(static_cast<float>(object.xmin), static_cast<float>(object.ymin - 5)), cv::FONT_HERSHEY_COMPLEX_SMALL, 1,
                        cv::Scalar(0, 0, 255));
            cv::Rect color_rect(cv::Point2f(static_cast<float>(object.xmin), static_cast<float>(object.ymin)),
                                cv::Point2f(static_cast<float>(object.xmax), static_cast<float>(object.ymax))); //生成矩形框
            auto it = color_labels.find(labels[label]);

            if (label != 0 && label != 1)
            {
                continue;
            }

            cv::rectangle(image_vec.front(), color_rect, it->second);
            color_rect.x += (color_rect.width * 0.3);
            color_rect.y += (color_rect.height * 0.1);
            color_rect.width *= 0.4;
            color_rect.height *= 0.3;
            // cv::rectangle(src_img, color_rect, it->second);
            cv::rectangle(image_colored, color_rect, it->second, cv::FILLED);
        }
    }

    //将Mat类型数据转换成ROS的sensor_msg类型
    sensor_msgs::ImagePtr colored_msg = cv_bridge::CvImage(header_vector.front(), "bgr8", image_colored).toImageMsg();
    sensor_msgs::ImagePtr raw_msg = cv_bridge::CvImage(header_vector.front(), "bgr8", image_vec.front()).toImageMsg();

    //发布topic
    pub_raw_image.publish(raw_msg);
    pub_colored_image.publish(colored_msg);

    for (int i = 0; i < inference_num; i++)
    {
        std_msgs::Header tmp_header = header_vector[i];
        header_vector[i] = header_vector[i + 1];
        header_vector[i + 1] = tmp_header;
        cv::Mat tmp_img = image_vec[i];
        image_vec[i] = image_vec[i + 1];
        image_vec[i + 1] = tmp_img;
        request_queue[i].swap(request_queue[i + 1]);
    }
}

int RosDetect::EntryIndex(int side, int lcoords, int lclasses, int location, int entry)
{
    int n = location / (side * side);
    int loc = location % (side * side);
    return n * side * side * (lcoords + lclasses + 1) + entry * side * side + loc;
}

double RosDetect::IntersectionOverUnion(const DetectionObject &box_1, const DetectionObject &box_2)
{
    double width_of_overlap_area = fmin(box_1.xmax, box_2.xmax) - fmax(box_1.xmin, box_2.xmin);
    double height_of_overlap_area = fmin(box_1.ymax, box_2.ymax) - fmax(box_1.ymin, box_2.ymin);
    double area_of_overlap;
    if (width_of_overlap_area < 0 || height_of_overlap_area < 0)
        area_of_overlap = 0;
    else
        area_of_overlap = width_of_overlap_area * height_of_overlap_area;
    double box_1_area = (box_1.ymax - box_1.ymin) * (box_1.xmax - box_1.xmin);
    double box_2_area = (box_2.ymax - box_2.ymin) * (box_2.xmax - box_2.xmin);
    double area_of_union = box_1_area + box_2_area - area_of_overlap;
    return area_of_overlap / area_of_union;
}

void RosDetect::ParseYOLOV3Output(const YoloParams &params, const std::string &output_name,
                                  const Blob::Ptr &blob, const unsigned long resized_im_h,
                                  const unsigned long resized_im_w, const unsigned long original_im_h,
                                  const unsigned long original_im_w,
                                  const double threshold, std::vector<DetectionObject> &objects)
{

    const int out_blob_h = static_cast<int>(blob->getTensorDesc().getDims()[2]);
    const int out_blob_w = static_cast<int>(blob->getTensorDesc().getDims()[3]);
    if (out_blob_h != out_blob_w)
        throw std::runtime_error("Invalid size of output " + output_name +
                                 " It should be in NCHW layout and H should be equal to W. Current H = " + std::to_string(out_blob_h) +
                                 ", current W = " + std::to_string(out_blob_h));

    auto side = out_blob_h;
    auto side_square = side * side;
    LockedMemory<const void> blobMapped = as<MemoryBlob>(blob)->rmap();
    const float *output_blob = blobMapped.as<float *>();
    // --------------------------- Parsing YOLO Region output -------------------------------------
    for (int i = 0; i < side_square; ++i)
    {
        int row = i / side;
        int col = i % side;
        for (int n = 0; n < params.num; ++n)
        {
            int obj_index = EntryIndex(side, params.coords, params.classes, n * side * side + i, params.coords);
            int box_index = EntryIndex(side, params.coords, params.classes, n * side * side + i, 0);
            float scale = output_blob[obj_index];
            if (scale < threshold)
                continue;
            double x = (col + output_blob[box_index + 0 * side_square]) / side * resized_im_w;
            double y = (row + output_blob[box_index + 1 * side_square]) / side * resized_im_h;
            double height = std::exp(output_blob[box_index + 3 * side_square]) * params.anchors[2 * n + 1];
            double width = std::exp(output_blob[box_index + 2 * side_square]) * params.anchors[2 * n];
            for (int j = 0; j < params.classes; ++j)
            {
                int class_index = EntryIndex(side, params.coords, params.classes, n * side_square + i, params.coords + 1 + j);
                float prob = scale * output_blob[class_index];
                if (prob < threshold)
                    continue;
                DetectionObject obj(x, y, height, width, j, prob,
                                    static_cast<float>(original_im_h) / static_cast<float>(resized_im_h),
                                    static_cast<float>(original_im_w) / static_cast<float>(resized_im_w));
                objects.push_back(obj);
            }
        }
    }
}

void RosDetect::getPara(std::string para)
{
    this->class_name = "/home/user/Documents/yolov3_tiny_bike.xml";
    this->net_work = "/home/user/Documents/yolov3_tiny_bicycle.xml";
    this->device_name = "HDDL";
    this->inference_num = 20;
}