#include <iostream>
#include "inference_engine.hpp"
#include <opencv2/opencv.hpp>
#include <samples/ocv_common.hpp>
#include <time.h>
#include <thread>
#include <mutex>
#include "atomic"

std::mutex mtx;
cv::Mat src_img = cv::imread("/home/llrt/Downloads/0.jpg");
cv::VideoCapture cap(0);

struct infer_strcut
{
    InferenceEngine::Core core;
    InferenceEngine::CNNNetwork network;
    InferenceEngine::InputsDataMap input_info;
    InferenceEngine::ExecutableNetwork executable_network;
    InferenceEngine::InferRequest infer_request;

    void init_infer(std::string model_path)
    {
        network = core.ReadNetwork(model_path);
        input_info = network.getInputsInfo();

        for (auto &item : input_info)
        {
            auto input_data = item.second;
            input_data->setPrecision(InferenceEngine::Precision::U8);
            input_data->setLayout(InferenceEngine::Layout::NCHW);
            input_data->getPreProcess().setResizeAlgorithm(InferenceEngine::RESIZE_BILINEAR);
            input_data->getPreProcess().setColorFormat(InferenceEngine::ColorFormat::RGB);
        }

        executable_network = core.LoadNetwork(network, "CPU");
        infer_request = executable_network.CreateInferRequest();
    }
};

using inferPtr = std::shared_ptr<infer_strcut>;

void single_infer(inferPtr infer_sin)
{
    while (true)
    {
        mtx.lock();
        cap >> src_img;
        mtx.unlock();
        for (auto &item : infer_sin->input_info) //将Mat转为blob输入推理请求
        {
            auto input_name = item.first;
            auto input = infer_sin->infer_request.GetBlob(input_name);
            matU8ToBlob<uint8_t>(src_img, input);
        }

        infer_sin->infer_request.Infer();
    }
}

int main(int argc, char **argv)
{
    std::string MODEL_PATH;
    std::string VIDEO_PATH;

    if (argv[1] == nullptr)
    {
        MODEL_PATH = "/home/llrt/Documents/yolov3-tiny.xml";
        VIDEO_PATH = "/home/llrt/Downloads/1 行人检测测试视频.flv";
    }
    else
    {
        MODEL_PATH = argv[1];
        VIDEO_PATH = argv[2];
    }
    std::cout << "模型位置： " << MODEL_PATH << std::endl;
    std::cout << "输入视频位置： " << VIDEO_PATH << std::endl;

    std::vector<inferPtr> infer_array;

    for (int i = 0; i < 8; i++)
    {
        inferPtr infer_tmp(new infer_strcut);
        infer_tmp->init_infer(MODEL_PATH);
        infer_array.push_back(infer_tmp);
    }

    std::vector<std::thread> thread_array;

    for (auto i : infer_array)
    {
        thread_array.push_back(std::thread(&single_infer, infer_array[0]));
    }

    for (auto &i : thread_array)
    {
        i.detach();
    }

    float time_rec;
    float FPS;
    float sum_FPS = 0;
    float frame_count = 1;

    while (true)
    {
        time_rec = cv::getTickCount();
        mtx.lock();
        cv::imshow("src_img", src_img);
        cv::waitKey(1);
        mtx.unlock();
        FPS = cv::getTickFrequency() / (cv::getTickCount() - time_rec);
        std::cout << "FPS:" << FPS << std::endl;
    }

    return 0;
}
