#include <iostream>
#include <sstream>
#include <time.h>
#include <inference_engine.hpp>
#include <opencv2/opencv.hpp>
#include <details/os/os_filesystem.hpp>
#include <ngraph/ngraph.hpp>
#include <samples/ocv_common.hpp>
#include <samples/classification_results.h>

#define THRESOLD 0.3
#define CONFINDENCE_THRE 0.01
#define IOU_THRE 0.5

using namespace cv;
using namespace InferenceEngine;
using namespace std;

static int EntryIndex(int side, int lcoords, int lclasses, int location, int entry)
{
    int n = location / (side * side);
    int loc = location % (side * side);
    return n * side * side * (lcoords + lclasses + 1) + entry * side * side + loc;
}

struct DetectionObject
{
    int xmin, ymin, xmax, ymax, class_id;
    float confidence;

    DetectionObject(double x, double y, double h, double w, int class_id, float confidence, float h_scale, float w_scale)
    {
        this->xmin = static_cast<int>((x - w / 2) * w_scale);
        this->ymin = static_cast<int>((y - h / 2) * h_scale);
        this->xmax = static_cast<int>(this->xmin + w * w_scale);
        this->ymax = static_cast<int>(this->ymin + h * h_scale);
        this->class_id = class_id;
        this->confidence = confidence;
    }

    bool operator<(const DetectionObject &s2) const
    {
        return this->confidence < s2.confidence;
    }
    bool operator>(const DetectionObject &s2) const
    {
        return this->confidence > s2.confidence;
    }
};

double IntersectionOverUnion(const DetectionObject &box_1, const DetectionObject &box_2)
{
    double width_of_overlap_area = fmin(box_1.xmax, box_2.xmax) - fmax(box_1.xmin, box_2.xmin);
    double height_of_overlap_area = fmin(box_1.ymax, box_2.ymax) - fmax(box_1.ymin, box_2.ymin);
    double area_of_overlap;
    if (width_of_overlap_area < 0 || height_of_overlap_area < 0)
        area_of_overlap = 0;
    else
        area_of_overlap = width_of_overlap_area * height_of_overlap_area;
    double box_1_area = (box_1.ymax - box_1.ymin) * (box_1.xmax - box_1.xmin);
    double box_2_area = (box_2.ymax - box_2.ymin) * (box_2.xmax - box_2.xmin);
    double area_of_union = box_1_area + box_2_area - area_of_overlap;
    return area_of_overlap / area_of_union;
}

class YoloParams
{
    template <typename T>
    void computeAnchors(const std::vector<T> &mask)
    {
        std::vector<float> maskedAnchors(num * 2);
        for (int i = 0; i < num; ++i)
        {
            maskedAnchors[i * 2] = anchors[mask[i] * 2];
            maskedAnchors[i * 2 + 1] = anchors[mask[i] * 2 + 1];
        }
        anchors = maskedAnchors;
    }

public:
    int num = 0, classes = 0, coords = 0;
    std::vector<float> anchors = {10.0, 13.0, 16.0, 30.0, 33.0, 23.0, 30.0, 61.0, 62.0, 45.0, 59.0, 119.0, 116.0, 90.0,
                                  156.0, 198.0, 373.0, 326.0};

    YoloParams() {}

    YoloParams(const std::shared_ptr<ngraph::op::RegionYolo> regionYolo)
    {
        coords = regionYolo->get_num_coords();
        classes = regionYolo->get_num_classes();
        anchors = regionYolo->get_anchors();
        auto mask = regionYolo->get_mask();
        num = mask.size();

        computeAnchors(mask);
    }
};

void ParseYOLOV3Output(const YoloParams &params, const std::string &output_name,
                       const Blob::Ptr &blob, const unsigned long resized_im_h,
                       const unsigned long resized_im_w, const unsigned long original_im_h,
                       const unsigned long original_im_w,
                       const double threshold, std::vector<DetectionObject> &objects)
{

    const int out_blob_h = static_cast<int>(blob->getTensorDesc().getDims()[2]);
    const int out_blob_w = static_cast<int>(blob->getTensorDesc().getDims()[3]);
    if (out_blob_h != out_blob_w)
        throw std::runtime_error("Invalid size of output " + output_name +
                                 " It should be in NCHW layout and H should be equal to W. Current H = " + std::to_string(out_blob_h) +
                                 ", current W = " + std::to_string(out_blob_h));

    auto side = out_blob_h;
    auto side_square = side * side;
    LockedMemory<const void> blobMapped = as<MemoryBlob>(blob)->rmap();
    const float *output_blob = blobMapped.as<float *>();
    // --------------------------- Parsing YOLO Region output -------------------------------------
    for (int i = 0; i < side_square; ++i)
    {
        int row = i / side;
        int col = i % side;
        for (int n = 0; n < params.num; ++n)
        {
            int obj_index = EntryIndex(side, params.coords, params.classes, n * side * side + i, params.coords);
            int box_index = EntryIndex(side, params.coords, params.classes, n * side * side + i, 0);
            float scale = output_blob[obj_index];
            if (scale < threshold)
                continue;
            double x = (col + output_blob[box_index + 0 * side_square]) / side * resized_im_w;
            double y = (row + output_blob[box_index + 1 * side_square]) / side * resized_im_h;
            double height = std::exp(output_blob[box_index + 3 * side_square]) * params.anchors[2 * n + 1];
            double width = std::exp(output_blob[box_index + 2 * side_square]) * params.anchors[2 * n];
            for (int j = 0; j < params.classes; ++j)
            {
                int class_index = EntryIndex(side, params.coords, params.classes, n * side_square + i, params.coords + 1 + j);
                float prob = scale * output_blob[class_index];
                if (prob < threshold)
                    continue;
                DetectionObject obj(x, y, height, width, j, prob,
                                    static_cast<float>(original_im_h) / static_cast<float>(resized_im_h),
                                    static_cast<float>(original_im_w) / static_cast<float>(resized_im_w));
                objects.push_back(obj);
            }
        }
    }
}

int main()
{
    std::vector<std::string> labels;                //标签
    std::map<std::string, cv::Scalar> color_labels; //标签和颜色的映射
    std::map<std::string, YoloParams> yoloParams;   //yolo输出参数

    cv::FileStorage fs("/home/user/桌面/inference/para.yml", cv::FileStorage::READ);
    string label_path;
    string xml_path;
    string bin_path;
    int inference_num;
    fs["label_path"] >> label_path;
    fs["xml_path"] >> xml_path;
    fs["bin_path"] >> bin_path;
    fs["inference_num"] >> inference_num;

    if (true) //载入数据集标签
    {
        cout << label_path << endl;
        std::ifstream inputFile(label_path);
        std::string label;
        int label_counter = 0;
        cv::RNG rng(time(0)); //生成随机颜色
        while (std::getline(inputFile, label))
        {
            color_labels[label] = cv::Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
            labels.push_back(label);
            label_counter++;
        }
        if (labels.empty())
            throw std::logic_error("File empty or not found: ");
    }

    Core ie;
    CNNNetwork network = ie.ReadNetwork(xml_path, bin_path);
    auto input_info = network.getInputsInfo();   //获取输入信息用于下面设置
    auto output_info = network.getOutputsInfo(); //获取输出信息用于下面设置
    string input_name = input_info.begin()->first;

    for (auto &item : input_info) //设置输入信息
    {
        auto input_data = item.second;
        input_data->setPrecision(Precision::U8);
        input_data->setLayout(Layout::NCHW);
        input_data->getPreProcess().setResizeAlgorithm(RESIZE_BILINEAR);
        input_data->getPreProcess().setColorFormat(ColorFormat::RGB);
    }

    for (auto &item : output_info) //设置输出信息
    {
        auto output_data = item.second;
        output_data->setPrecision(Precision::FP32);
        output_data->setLayout(Layout::NCHW);
    }

    if (auto ngraphFunction = network.getFunction())
    {
        for (const auto op : ngraphFunction->get_ops())
        {
            auto outputLayer = output_info.find(op->get_friendly_name());
            if (outputLayer != output_info.end())
            {
                auto regionYolo = std::dynamic_pointer_cast<ngraph::op::RegionYolo>(op);
                if (!regionYolo)
                {
                    throw std::runtime_error("Invalid output type: " +
                                             std::string(regionYolo->get_type_info().name) + ". RegionYolo expected");
                }
                yoloParams[outputLayer->first] = YoloParams(regionYolo);
            }
        }
    }

    ExecutableNetwork executable_network = ie.LoadNetwork(network, "MULTI", {{"MULTI_DEVICE_PRIORITIES", "HDDL,CPU"}});
    // ExecutableNetwork executable_network = ie.LoadNetwork(network, "CPU");
    vector<InferRequest::Ptr> request_queue;

    VideoCapture cap("/home/user/Documents/oo.avi");

    vector<Mat> image_vec;
    Blob::Ptr imgBlob;
    Blob::Ptr output;

    for (int i = 0; i <= inference_num; i++)
    {
        image_vec.push_back(cv::Mat());
        cap >> image_vec[i];
        request_queue.push_back(executable_network.CreateInferRequestPtr());
        imgBlob = wrapMat2Blob(image_vec[i]);
        request_queue[i]->SetBlob(input_name, imgBlob);
        request_queue[i]->StartAsync();
    }

    image_vec.push_back(cv::Mat());
    request_queue.push_back(executable_network.CreateInferRequestPtr());
    float sum_time = 0;
    float avreage = 0;
    float frame_count = 0;

    while (true)
    {
        float time = getTickCount();
        frame_count++;

        cap >> image_vec.back();
        imgBlob = wrapMat2Blob(image_vec.back());
        request_queue.back()->SetBlob(input_name, imgBlob);

        request_queue.back()->StartAsync();
        request_queue.front()->Wait(IInferRequest::WaitMode::RESULT_READY);
        // output = infer_request.GetBlob(output_name);
        float time_sum = (getTickCount() - time) / getTickFrequency();
        std::cout << "time_sum" << time_sum << endl;
        sum_time += time_sum;
        avreage = sum_time / frame_count;
        cout << "avreage:" << avreage << endl;

        // ---------------------------Processing output blobs--------------------------------------------------
        // Processing results of the CURRENT request
        const TensorDesc &inputDesc = input_info.begin()->second.get()->getTensorDesc();
        unsigned long resized_im_h = getTensorHeight(inputDesc);
        unsigned long resized_im_w = getTensorWidth(inputDesc);
        std::vector<DetectionObject> objects;
        // Parsing outputs
        for (auto &output : output_info)
        {
            auto output_name = output.first;
            Blob::Ptr blob = request_queue.front()->GetBlob(output_name);
            ParseYOLOV3Output(yoloParams[output_name], output_name, blob, resized_im_h, resized_im_w, cap.get(4), cap.get(3), THRESOLD, objects);
        }
        // Filtering overlapping boxes
        std::sort(objects.begin(), objects.end(), std::greater<DetectionObject>());
        for (size_t i = 0; i < objects.size(); ++i)
        {
            if (objects[i].confidence == 0)
                continue;
            for (size_t j = i + 1; j < objects.size(); ++j)
                if (IntersectionOverUnion(objects[i], objects[j]) >= IOU_THRE)
                    objects[j].confidence = 0;
        }
        stringstream ss;
        ss << 1 / time_sum;
        string sss;
        ss >> sss;
        // Drawing boxes
        for (auto &object : objects)
        {
            if (object.confidence < 0)
                continue;
            auto label = object.class_id;
            float confidence = object.confidence;
            if (confidence > CONFINDENCE_THRE)
            {
                /** Drawing only objects when >confidence_threshold probability **/
                std::ostringstream conf;
                conf << ":" << std::fixed << std::setprecision(3) << confidence;
                cv::putText(image_vec.front(),
                            (!labels.empty() ? labels[label] : std::string("label #") + std::to_string(label)) + conf.str(),
                            cv::Point2f(static_cast<float>(object.xmin), static_cast<float>(object.ymin - 5)), cv::FONT_HERSHEY_COMPLEX_SMALL, 1,
                            cv::Scalar(0, 0, 255));
                cv::Rect color_rect(cv::Point2f(static_cast<float>(object.xmin), static_cast<float>(object.ymin)),
                                    cv::Point2f(static_cast<float>(object.xmax), static_cast<float>(object.ymax))); //生成矩形框
                auto it = color_labels.find(labels[label]);
                cv::rectangle(image_vec.front(), color_rect, it->second);
                cv::putText(image_vec.front(), "FPS: " + sss, Point(20, 20), 1, 2, Scalar(0, 0, 255));
            }
        }

        imshow("image", image_vec.front());
        if(waitKey(1)=='r')
        {
            imwrite("image.png",image_vec.front());
        }
        for (int i = 0; i <= inference_num; i++)
        {
            Mat tmp_img = image_vec[i];
            image_vec[i] = image_vec[i + 1];
            image_vec[i + 1] = tmp_img;
            request_queue[i].swap(request_queue[i + 1]);
        }
    }
}