﻿//// yolo_test.cpp : 此文件包含 "main" 函数。程序执行将在此处开始并结束。
////
//
//// 运行程序: Ctrl + F5 或调试 >“开始执行(不调试)”菜单
//// 调试程序: F5 或调试 >“开始调试”菜单
//
//// 入门使用技巧: 
////   1. 使用解决方案资源管理器窗口添加/管理文件
////   2. 使用团队资源管理器窗口连接到源代码管理
////   3. 使用输出窗口查看生成输出和其他消息
////   4. 使用错误列表窗口查看错误
////   5. 转到“项目”>“添加新项”以创建新的代码文件，或转到“项目”>“添加现有项”以将现有代码文件添加到项目
////   6. 将来，若要再次打开此项目，请转到“文件”>“打开”>“项目”并选择 .sln 文件
//#include "yolo_v2_class.hpp"
//#include "opencv2/opencv.hpp"
//#include "ffmpeg_headers.h"
//
//// Function to convert YUV420 values to image_t structure
//void convertYUV420toImage(unsigned char* yuv, int width, int height, struct image_t* img) {
//    int image_size = width * height;
//    int uv_image_size = image_size / 4;
//    int rgb_image_size = image_size * 3;
//
//    img->h = height;
//    img->w = width;
//    img->c = 3;
//    img->data = (float*)malloc(rgb_image_size * sizeof(float));
//
//    unsigned char* y = yuv;
//    unsigned char* u = yuv + image_size;
//    unsigned char* v = u + uv_image_size;
//
//    float* rgb_data = img->data;
//
//    for (int i = 0, j = 0; i < image_size; i++, j += 3) {
//        int y_val = (int)y[i];
//        int u_val = (int)u[i / 4];
//        int v_val = (int)v[i / 4];
//
//        float r = y_val + 1.402f * (v_val - 128);
//        float g = y_val - 0.344136f * (u_val - 128) - 0.714136f * (v_val - 128);
//        float b = y_val + 1.772f * (u_val - 128);
//
//        rgb_data[j] = r;
//        rgb_data[j + 1] = g;
//        rgb_data[j + 2] = b;
//    }
//}
//
//// Convert YUV data to cv::Mat
//cv::Mat convertYUVtoMat(unsigned char* yuvData, int width, int height) {
//	cv::Mat yuvImg(height + height / 2, width, CV_8UC1, yuvData);
//	cv::Mat bgrImg;
//	cv::cvtColor(yuvImg, bgrImg, cv::COLOR_YUV2BGR_I420);
//
//	return bgrImg;
//}
//void convertMatToYUV420(const cv::Mat& bgrImg, unsigned char* yuvData, int &width, int &height) {
//	cv::Mat yuvImg;
//	cv::cvtColor(bgrImg, yuvImg, cv::COLOR_BGR2YUV_I420);
//
//	// Get the width and height of the image
//	width = bgrImg.cols;
//	height = bgrImg.rows;
//
//	// Copy Y channel (Grayscale)
//	int ySize = width * height;
//	std::memcpy(yuvData, yuvImg.data, ySize);
//
//	// Copy U and V channels (Chroma)
//	int uSize = width * height / 4;
//	std::memcpy(yuvData + ySize, yuvImg.data + ySize, uSize);
//	std::memcpy(yuvData + ySize + uSize, yuvImg.data + ySize + uSize, uSize);
//}
//
//// Convert YUV data to std::shared_ptr<image_t>
//std::shared_ptr<image_t> convertYUVtoImage(unsigned char* yuvData, int width, int height) {
//	cv::Mat yuvImg(height + height / 2, width, CV_8UC1, yuvData);
//	cv::Mat bgrImg;
//	cv::cvtColor(yuvImg, bgrImg, cv::COLOR_YUV2BGR_I420);
//
//	cv::Mat rgbImg;
//	cv::cvtColor(bgrImg, rgbImg, cv::COLOR_BGR2RGB);
//
//	std::shared_ptr<image_t> image_ptr(new image_t, [](image_t* img) { delete[] img->data; delete img; });
//	image_ptr->h = rgbImg.rows;
//	image_ptr->w = rgbImg.cols;
//	image_ptr->c = rgbImg.channels();
//	int size = image_ptr->h * image_ptr->w * image_ptr->c;
//	image_ptr->data = new float[size];
//
//	if (rgbImg.isContinuous()) {
//		memcpy(image_ptr->data, rgbImg.data, size * sizeof(float));
//	}
//	else {
//		for (int i = 0; i < rgbImg.rows; ++i) {
//			memcpy(image_ptr->data + i * rgbImg.cols * rgbImg.channels(), rgbImg.ptr<float>(i), rgbImg.cols * rgbImg.channels() * sizeof(float));
//		}
//	}
//
//	return image_ptr;
//}
//
////#include <libyuv.h>
//
////std::shared_ptr<image_t> convertYUV420ToImage(unsigned char* yuvData, int width, int height) {
////	const int imageSize = width * height;
////	const int uvSize = imageSize / 4;
////
////	std::shared_ptr<image_t> image_ptr(new image_t, [](image_t* img) { delete[] img->data; delete img; });
////	image_ptr->h = height;
////	image_ptr->w = width;
////	image_ptr->c = 3;  // RGB channels
////
////	const int rgbSize = width * height * 3;
////	image_ptr->data = new float[rgbSize];
////
////	unsigned char* yPlane = yuvData;
////	unsigned char* uPlane = yuvData + imageSize;
////	unsigned char* vPlane = yuvData + imageSize + uvSize;
////
////	int yStride = width;
////	int uvStride = width / 2;
////
////	// Convert YUV420 to RGB
////	libyuv::I420ToRGB24(yPlane, yStride, uPlane, uvStride, vPlane, uvStride,
////		reinterpret_cast<unsigned char*>(image_ptr->data), width * 3, width, height);
////
////	return image_ptr;
////}
//
//#include <iostream>
//
//#include <iostream>
//#include <fstream>
//#include <list>
//#include <string>
//
//std::list<std::string> readLinesToList(const std::string& filename) {
//	std::list<std::string> lines;
//	std::ifstream file(filename);
//
//	if (!file.is_open()) {
//		std::cerr << "Error opening file: " << filename << std::endl;
//		return lines;
//	}
//
//	std::string line;
//	while (std::getline(file, line)) {
//		lines.push_back(line);
//	}
//
//	file.close();
//	return lines;
//}
//std::string get_line_value(const std::list<std::string>& lines, int line_number) {
//	if (line_number < 1 || line_number > lines.size()) {
//		std::cerr << "Invalid line number: " << line_number << std::endl;
//		return "";
//	}
//
//	auto it = lines.begin();
//	std::advance(it, line_number - 1);
//	return *it;
//}
//#include <iostream>
//#include <fstream>
//#include <opencv2/opencv.hpp>
//
//
//std::shared_ptr<image_t> convertYUV420ToBGR24(unsigned char* yuvData, int width, int height) {
//	cv::Mat yuvImg(height + height / 2, width, CV_8UC1, yuvData);
//	cv::Mat bgrImg;
//	cv::cvtColor(yuvImg, bgrImg, cv::COLOR_YUV2BGR_I420);
//
//	std::shared_ptr<image_t> image_ptr(new image_t, [](image_t* img) { delete[] img->data; delete img; });
//	image_ptr->h = height;
//	image_ptr->w = width;
//	image_ptr->c = 3;  // BGR channels
//
//	const int dataSize = width * height * 3;
//	image_ptr->data = new float[dataSize];
//
//	for (int i = 0; i < dataSize; ++i) {
//		image_ptr->data[i] = static_cast<float>(bgrImg.data[i]);
//	}
//
//	return image_ptr;
//}
//
//
//#include <mutex>
//int main()
//{
//	//cfg 文件路径
//   auto bres= built_with_cuda();
//   auto cudanum= get_device_count();
//     std::string cfg_file = R"(D:\study\yolo_test\x64\Release\cfg\yolov4.cfg)"; 
//    //weight 文件路径
//    //std::string weights_file = "D:\\workspace\\qt_project\\yolo_demo\\yolov3.weights";
//     std::string weights_file = R"(D:\study\yolo_test\x64\Release\backup/yolov4.weights)"; 
//
//	 int num = 0;
//
//    // 图像路径
//    std::string image_file  = R"(E:\0.jpg)";
//
//	auto namelist = readLinesToList(R"(D:\study\yolo_test\x64\Release\data/coco.names)");
//	//list* options = readLinesToList((char *)R"(D:\study\yolo_test\x64\Release\data/coco.data)");
//	//char* name_list = option_find_str(options, (char*)"names", (char*)"data/names.list");
//	int names_size = 0;
//	//char** names = get_labels_custom(name_list, &names_size); //get_labels(name_list);
//
//    
//    //设置yolo阈值
//    float thresh = 0.1;
//    // 创建检测对象
//    Detector my_detector(cfg_file,weights_file,0);
//    //yolo检测
//    std::vector<bbox_t> result_vec = my_detector.detect(image_file,thresh);
//    //简单显示检测结果
//   cv::Mat image = cv::imread(image_file);
//    //遍历bbox_t结构体
//    for(std::vector<bbox_t>::iterator iter=result_vec.begin();iter!=result_vec.end();iter++)
//    {
//        //画检测框
//        cv::Rect rect(iter->x,iter->y,iter->w,iter->h);
//        cv::rectangle(image, rect, cv::Scalar(255,0,0), 2);
//		// 设置文字参数
//		std::string text = get_line_value(namelist ,iter->obj_id+1);
//		cv::Point position(iter->x, iter->y); // 文字位置
//		cv::Scalar color(0, 0, 255); // 文字颜色，这里使用红色
//		int fontFace = cv::FONT_HERSHEY_SIMPLEX; // 字体样式
//		double fontScale = 1.0; // 字体缩放系数
//		int thickness = 2; // 文字线条粗细
//
//		// 添加文字到图像
//		cv::putText(image, text, position, fontFace, fontScale, color, thickness);
//
//    }
//    //显示效果图
//    cv::imshow("result",image);
//    cv::waitKey(0);
//
//
//
//	FFmpegDecoderAPI* decoder = FFmpegDecoderAPI::CreateDecoder();
//	FrameInfo _info;
//	memset(&_info, 0, sizeof(FrameInfo));
//	_info.FramesPerSecond = 30;
//	_info.VCodec = CAREYE_CODEC_H264;
//	_info.DecType = DecodeType::kNvencDecode;
//	_info.Width = 1280;
//	_info.Height = 720;
//	 bres = decoder->createDecoder(_info);
//	decoder->Start();
//
//	FFmpegVideoEncoderAPI * m_ffmpegEncoder = FFmpegVideoEncoderAPI::CreateEncoder();
//
//	
//	m_ffmpegEncoder->Init("libx264", CAREYE_FMT_YUV420P, 1280, 720, 30, 1280 * 720 * 3);
//	FFmpegPusher* pusher = FFmpegPusher::CreatePusher();
//
//	bool bres1=pusher->Start("E://Camera_00002.ts");
//	
//	//bool bre11s = encoder_init(1280, 720, 30, 4000);
//	//BaseStreamCapture* pBase = BaseStreamCapture::CreateStreamCapture(R"(rtsp://199.19.110.7:7103/live/park)");
//	BaseStreamCapture* pBase = BaseStreamCapture::CreateStreamCapture(R"(rtsp://192.168.2.9:554/Media/Camera_00001)");
//	pBase->RegisterVideoCallBack([=, &decoder](uint8_t* raw_data, const char* codecid, int raw_len, bool bKey, int nWidth, int nHeight, int64_t nTimeStamp)
//		{
//			decoder->PostFrame(raw_data, raw_len, bKey, 0);
//
//		});
//
//
//	std::mutex m_mutex;
//
//	decoder->RegisterDecodeCallback([=, &num, &my_detector,&m_mutex](uint8_t* yuv, int nWidth, int nHeight, int strideY, int strideU, int strideV, bool bKey, int64_t nTimeStamp)
//		{
//			std::lock_guard<std::mutex> _lock(m_mutex);
//			struct image_t* img = new struct image_t;
//			//auto image1=convertYUVtoImage(yuv, nWidth, nHeight);
//			cv::Mat image = convertYUVtoMat(yuv, nWidth, nHeight);
//
//
//			std::string  imagename = "E://" + std::to_string(num) + ".jpg";
//			num++;
//			cv::imwrite(imagename, image);
//
//			auto image111 = convertYUV420ToBGR24(yuv, nWidth, nHeight);
//			auto image_ptr = my_detector.mat_to_image(image);
//			//convertYUV420toImage(yuv, nWidth, nHeight, img);
//
//			// 创建检测对象
//
//			// //设置yolo阈值
//			float thresh = 0.5;
//			//yolo检测
//			clock_t start, finish;
//			double Times, Times1;
//			start = clock();
//			//std::vector<bbox_t> result_vec = my_detector.detect(imagename, thresh);
//			std::vector<bbox_t> result_vec = my_detector.detect(*(image_ptr.get()), thresh);
//			// //简单显示检测结果
//			finish = clock();
//			Times = (double)(finish - start) / CLOCKS_PER_SEC;
//			Times1 = (double)(finish - start) / CLK_TCK;
//			std::cout << "start(时钟打点): " << start << std::endl;
//			std::cout << "finish(时钟打点): " << finish << std::endl;
//			std::cout << "CLOCKS_PER_SEC: " << CLOCKS_PER_SEC << std::endl;
//			std::cout << "CLK_TCK: " << CLK_TCK << std::endl;
//			std::cout << "运行时间(秒)(CLOCKS_PER_SEC): " << Times << std::endl;
//			std::cout << "运行时间(秒)(CLK_TCK): " << Times1 << std::endl;
//
//			//遍历bbox_t结构体
//			for (std::vector<bbox_t>::iterator iter = result_vec.begin(); iter != result_vec.end(); iter++)
//			{
//				//画检测框
//				cv::Rect rect(iter->x, iter->y, iter->w, iter->h);
//				cv::rectangle(image, rect, cv::Scalar(255, 0, 0), 2);
//				// 设置文字参数
//				std::string text = get_line_value(namelist, iter->obj_id + 1);
//				cv::Point position(iter->x, iter->y); // 文字位置
//				cv::Scalar color(0, 0, 255); // 文字颜色，这里使用红色
//				int fontFace = cv::FONT_HERSHEY_SIMPLEX; // 字体样式
//				double fontScale = 1.0; // 字体缩放系数
//				int thickness = 2; // 文字线条粗细
//
//				// 添加文字到图像
//				cv::putText(image, text, position, fontFace, fontScale, color, thickness);
//			}
//			uint8_t* newyuv = new uint8_t[1280 * 720 * 3 / 2];
//			int nw, nh;
//			convertMatToYUV420(image,newyuv, nw, nh);
//			
//			unsigned char* pOutEncodeBuffer = new unsigned char[nw * nh];
//			int            nOneFrameLength = 0;
//			std::string stdfilter = "person num = " + std::to_string(result_vec.size());
//			m_ffmpegEncoder->ChangeVideoFilter(stdfilter.c_str(), 30, (char *)"red", 0.85, 100, 100);
//			m_ffmpegEncoder->EncodecYUV(newyuv, nw * nh * 3 / 2, pOutEncodeBuffer, &nOneFrameLength);
//			
//			if (nOneFrameLength>0)
//			{
//				pusher->AppendVideo(pOutEncodeBuffer, nOneFrameLength);
//			}
//			delete[]newyuv;
//			newyuv = nullptr;
//		
//			delete []pOutEncodeBuffer;
//			pOutEncodeBuffer = nullptr;
//
//			//显示效果图
//		   //imagename = "E://" + std::to_string(num) + ".jpg";
//		   // num++;
//		  //  cv::imwrite(imagename, image);
//
//
//		}
//
//	);
//
//
//			pBase->Start();
//			cv::waitKey(0);
//			while (true)
//			{
//				getchar();
//
//			}
//			return 0;
//}