#include <iostream>
#include <memory>
#include <vector>
#include <string>
#include "yolov3_func.hpp"
#include <inference_engine.hpp>
#include "opencv2/opencv.hpp"

using namespace InferenceEngine;

int main()
{        
    cv::VideoCapture capture;
    cv::Mat frame;
    frame = capture.open("/home/yuanpei-chen/桌面/worker-zone-detection.mp4");
    const size_t width  = 1920;
    const size_t height = 1080;

    //------------------------1--------------------------
    Core ie;
    //-------------------------2-------------------------
    CNNNetwork network = ie.ReadNetwork("/opt/intel/openvino/deployment_tools/model_optimizer/frozen_darknet_yolov3_model.xml","/opt/intel/openvino/deployment_tools/model_optimizer/frozen_darknet_yolov3_model.bin");
    //--------------------------3-----------------------
    InputsDataMap inputInfo(network.getInputsInfo());
    InputInfo::Ptr& input = inputInfo.begin()->second;
    auto inputName = inputInfo.begin()->first;

    input->setPrecision(Precision::U8);
    input->getInputData()->setLayout(Layout::NCHW);
    ICNNNetwork::InputShapes inputShapes = network.getInputShapes();
    SizeVector& inSizeVector = inputShapes.begin()->second;
    inSizeVector[0] = 1;  // set batch to 1
    network.reshape(inputShapes);

    OutputsDataMap outputInfo(network.getOutputsInfo());
    for (auto &output : outputInfo) 
    {
            output.second->setPrecision(Precision::FP32);
            output.second->setLayout(Layout::NCHW);
    }
    std::map<std::string, YoloParams> yoloParams;
        if (auto ngraphFunction = network.getFunction()) 
        {
            for (const auto op : ngraphFunction->get_ops()) 
            {
                auto outputLayer = outputInfo.find(op->get_friendly_name());
                if (outputLayer != outputInfo.end()) 
                {
                    auto regionYolo = std::dynamic_pointer_cast<ngraph::op::RegionYolo>(op);
                    if (!regionYolo) 
                    {
                        throw std::runtime_error("Invalid output type: " +
                            std::string(regionYolo->get_type_info().name) + ". RegionYolo expected");
                    }
                    yoloParams[outputLayer->first] = YoloParams(regionYolo);
                }
            }
        }
    
    //---------------------------4--------------------------
    ExecutableNetwork executable_network = ie.LoadNetwork(network, "CPU");
    //---------------------------5---------------------------
    InferRequest::Ptr infer_request = executable_network.CreateInferRequestPtr();
    //---------------------------6---------------------------
    while(capture.read(frame)) 
    {
        Blob::Ptr frameBlob = infer_request->GetBlob(inputName);
        matU8ToBlob<uint8_t>(frame, frameBlob);

        infer_request->Infer();

        const TensorDesc& inputDesc = inputInfo.begin()->second.get()->getTensorDesc();
        unsigned long resized_im_h = getTensorHeight(inputDesc);
        unsigned long resized_im_w = getTensorWidth(inputDesc);
        std::vector<DetectionObject> objects;
        // Parsing outputs
        for (auto &output : outputInfo) 
            {
            auto output_name = output.first;
            Blob::Ptr blob = infer_request->GetBlob(output_name);
            ParseYOLOV3Output(yoloParams[output_name], output_name, blob, resized_im_h, resized_im_w, height, width, 0.5, objects);
            }
        // Filtering overlapping boxes
        std::sort(objects.begin(), objects.end(), std::greater<DetectionObject>());
        for (size_t i = 0; i < objects.size(); ++i) 
            {
            if (objects[i].confidence == 0)
                continue;
            for (size_t j = i + 1; j < objects.size(); ++j)
                if (IntersectionOverUnion(objects[i], objects[j]) >= 0.4)
                    objects[j].confidence = 0;
            }
        // Drawing boxes
        for (auto &object : objects) 
        {
            if (object.confidence < 0.5)
                continue;
            auto label = object.class_id;
            float confidence = object.confidence;
            if (confidence > 0.5) 
            {
            /** Drawing only objects when >confidence_threshold probability **/
                std::ostringstream conf;
                conf << ":" << std::fixed << std::setprecision(3) << confidence;
                cv::rectangle(frame, cv::Point2f(static_cast<float>(object.xmin), static_cast<float>(object.ymin)),
                                  cv::Point2f(static_cast<float>(object.xmax), static_cast<float>(object.ymax)), cv::Scalar(0, 255, 0), 2, 8);
            }
        }
        cv::imshow("Detection results", frame);
        cv::waitKey(10);
    }
    capture.release();
    return 0;
}


