﻿#include <iostream>
#include <fstream>
#include "NvOnnxParser.h"
#include "NvInfer.h"
#include "opencv2/opencv.hpp"
#include <cuda_runtime_api.h>
#include <opencv2/dnn/dnn.hpp>
#include <chrono>
#include <cuda_runtime.h>
#include<device_launch_parameters.h>
#include "device_functions.h"
#include "image preprocessing.h"
#include <windows.h>


using namespace cv;
using namespace cv::dnn;
using namespace nvinfer1;
using namespace nvonnxparser;
using namespace std;

VideoCapture cap;
//缓存容器
vector<Mat> m_vec_frame;
//互斥锁
mutex m_mutex;

void run();
bool get_frame(Mat& frame, uchar* dev, int w, int h);
void put_frame(Mat frame);

const char* classNames[] = { "reflective_clothes", "other_clothes", "hat", "person" }; // 类别标签列表

class Logger : public ILogger
{
    //void log(Severity severity, const char* msg) override
    void log(Severity severity, nvinfer1::AsciiChar const* msg) noexcept override
    {
        // suppress info-level messages
        if (severity <= Severity::kWARNING)
            std::cout << msg << std::endl;
    }
} gLogger;
void put_frame(Mat frame)
{
	m_mutex.lock();
	// 这边采用的更简单粗暴的清理方式，可以根据实际情况自定义清理方式
	if (m_vec_frame.size() > 3)//测试后发现3的延时最小
		m_vec_frame.clear();
	//存入容器
	m_vec_frame.push_back(frame);
	m_mutex.unlock();
	return;
}
bool get_frame(Mat& frame,uchar *dev,int w,int h)
{
	m_mutex.lock();

	if (m_vec_frame.size() < 1)
	{
		m_mutex.unlock();
		return false;
	}
	else
	{
		//从容器中取图像
		frame = m_vec_frame.back();
	}
	m_mutex.unlock();
	memcpy((void**)dev, frame.data, h * w * 3 * sizeof(uchar));
	return true;
}
void run()
{
	cap.open(0);

	if (!cap.isOpened())                         //判断是否成功打开相机
	{
		cout << "captureOpen failed!" << endl;
	}
	Mat current_frame;
	cap >> current_frame;
	Sleep(50);//初始化一定要有这个初步等待延时
	while (1)
	{
		cap >> current_frame;
		if (current_frame.empty())
		{
			cout << "frame empty\n" << endl;
			return;
		}
		put_frame(current_frame);
	}
}


int main() 
{
    cap.open(0);
    if (!cap.isOpened())
    {
        std::cout << "无法打开摄像头" << std::endl;
        return 0;
    }
    cv::Mat img;
    cap.read(img);
    int h = img.rows;//获取图片的长
    int w = img.cols;//获取图片的宽
    cap.release();

	Mat frame;
	//开始线程
	thread th1(run);//实例化一个线程对象th1，使用函数t1构造，然后该线程就开始执行了
	th1.detach();
	Sleep(2000);



	// 反序列化
	IRuntime* runtime = createInferRuntime(gLogger);
	std::string cached_path = "./build/yolov5_engine_output.trt";
	std::ifstream trtModelFile(cached_path, std::ios_base::in | std::ios_base::binary);
	trtModelFile.seekg(0, ios::end);
	int size = trtModelFile.tellg();
	trtModelFile.seekg(0, ios::beg);

	char* buff = new char[size];
	trtModelFile.read(buff, size);
	trtModelFile.close();
	ICudaEngine* re_engine = runtime->deserializeCudaEngine((void*)buff, size, NULL);
	delete[] buff;


	//创建buffers 指向输入输出流
	float* buffers[2];
	int inputIndex = 1;
	int outputIndex = 1;
	int numInputs = 0;
	int numOutputs = 0;
	int numBindings = re_engine->getNbBindings();

	for (int bi = 0; bi < numBindings; bi++)
	{
		if (re_engine->bindingIsInput(bi) == true) {
			inputIndex = bi;
			numInputs++;
		}
		else {

			numOutputs++;
			outputIndex = bi;
		}
	}
	int input_size = 1;
	int dimSize = 1;
	const nvinfer1::Dims inputDims = re_engine->getBindingDimensions(inputIndex);
	int numDims = inputDims.nbDims; // 获取维度数量
	for (int i = 0; i < numDims; i++) {
		dimSize = inputDims.d[i]; // 获取每个维度的大小
		input_size = input_size * dimSize;
		std::cout << "Dimension " << i << ": " << dimSize << std::endl;
	}
	int input_shape1 = inputDims.d[numDims - 2];
	int input_shape2 = inputDims.d[numDims - 1];

	cout << endl;
	cout << "intput_size:" << input_size << endl;
	cout << endl;
	int output_size = 1;
	const nvinfer1::Dims outputDims = re_engine->getBindingDimensions(outputIndex);
	numDims = outputDims.nbDims; // 获取维度数量
	for (int i = 0; i < numDims; i++) {
		dimSize = outputDims.d[i]; // 获取每个维度的大小
		output_size = output_size * dimSize;
		std::cout << "Dimension " << i << ": " << dimSize << std::endl;
	}
	cout << endl;
	cout << "output_size:" << output_size << endl;


	uchar* devSrc; 
	uchar* devSrc1;
	// 分配buffers空间
	cudaMalloc(&devSrc1, w * h * 3 * sizeof(uchar));
	cudaMalloc(&buffers[inputIndex], input_size * sizeof(float));
	cudaMalloc(&buffers[outputIndex], output_size * sizeof(float));
	cudaMallocHost((void**)&devSrc, sizeof(uchar) * w*h*3);




	//创建cuda流 cuda网络 cuda块
	cudaStream_t stream;
	cudaStreamCreate(&stream);

	// 设置过滤阈值
	float threshold = 0.8500;
	float* result_array = new float[output_size];

	//创建context
	IExecutionContext* context = re_engine->createExecutionContext();

	while (1)
	{
		bool ret = get_frame(frame, devSrc,w,h);
		if (ret == false || frame.empty()) {
			cout << "false！" << endl;
		}
		else
		{ 
			cudaMemcpy(devSrc, devSrc, w * h * 3 * sizeof(uchar), cudaMemcpyHostToDevice);
			resize2GPU(devSrc, w, h, buffers[inputIndex], input_shape1, input_shape2);
			//buffers[inputIndex] = (float*)&d_data_dst;

			//执行推理
			context->enqueueV2((void* const*)buffers, stream, nullptr);

			// 将GPU数据拷贝回CPU

			cudaMemcpy(result_array, buffers[outputIndex], output_size * sizeof(float), cudaMemcpyDeviceToHost);
			// 对每个检测结果进行可视化
			for (int j = 0; j < output_size; j = j + 9)
			{

				float* data = result_array + j;
				float* class_scores = data + 5;
				auto max_it = std::max_element(class_scores, class_scores + 4);
				int classId = std::distance(class_scores, max_it);
				float score = data[4] * data[5 + classId];
				if (score > threshold)
				{
					int left = (int)(((data[0] - (data[2] / 2)) / 640) * w);
					int top = (int)(((data[1] - (data[3] / 2)) / 640) * h);
					int width1 = (int)(((data[2]) / 640) * w);
					int height1 = (int)(((data[3]) / 640) * h);
					Rect box(left, top, width1, height1);
					String label = classNames[classId];
					putText(frame, label, Point(left, top - 5), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 255, 0), 1);
					rectangle(frame, box, Scalar(0, 255, 0), 2);
				}
			}

			cv::imshow("Detection Results", frame);
			cv::waitKey(1);
		}
	}
	//释放资源
	cudaStreamDestroy(stream);
	cudaFree(buffers[inputIndex]);
	cudaFree(buffers[outputIndex]);
	context->destroy();
	re_engine->destroy();
	runtime->destroy();
	cap.release();
	return 0;

}

