/*
 * @Description: yolo检测实现
 * @Author: BiChunkai 321521004@qq.com
 * @Date: 2024-10-25 14:27:55
 * @FilePath: /wsMyROS2_TEST/CPP_CODE/TrayDetect/yoloInterface.cpp
 * 
 * Copyright (c) 2024 by 无锡捷普迅科技有限公司, All Rights Reserved. 
 */

#include "yoloInterface.h"
#include <iostream>
#include <string>
#include <time.h>
#include <vector>

using namespace cv;
using namespace std;
using namespace dnn;

vector<string> coconame = { "person",
		"bicycle","car","motorcycle","airplane","bus","train","truck","boat","traffic light","fire hydrant",
		"stop sign","parking meter","bench","bird","cat","dog","horse","sheep","cow","elephant","bear",
		"zebra","giraffe","backpack","umbrella","handbag","tie","suitcase","frisbee","skis","snowboard",
		"sports ball","kite","baseball bat","baseball glove","skateboard","surfboard","tennis racket",
		"bottle","wine glass","cup","fork","knife","spoon","bowl","banana","apple","sandwich",
		"orange","broccoli","carrot","hot dog","pizza","donut","cake","chair","couch","potted plant",
		"bed","dining table","toilet","tv","laptop","mouse","remote","keyboard","cell phone","microwave","oven",
		"toaster","sink","refrigerator","book","clock","vase","scissors","teddy bear","hair drier","toothbrush" };

YOLOV8::YOLOV8(Config config, std::string classPath) {
	this->confThreshold = config.confThreshold;
	this->nmsThreshold = config.nmsThreshold;
	this->scoreThreshold = config.scoreThreshold;
	this->inpWidth = config.inpWidth;
	this->inpHeight = config.inpHeight;
	this->onnx_path = config.onnx_path;

	// 从classes中加载类
	if (classPath != "")
	{
		coconame.clear();
	    this->loadClassesFromFile(classPath);
	}
	this->initialmodel();

	// 打印类
	std::cout << "检测的类别有:" << std::endl;
	for(auto className : coconame)
	  std::cout << className << std::endl;

}
YOLOV8::~YOLOV8() {}

void YOLOV8::detect(Mat& frame) {
	preprocess_img(frame);                                               // 前处理，将转换到的tensor数据已经传递给infer_request了
	infer_request.infer();                                               // 模型推理，调用infer方法在OpenVINO的推理请求上执行推理
	const ov::Tensor& output_tensor = infer_request.get_output_tensor(); // 获取推理的输出结果
	ov::Shape output_shape = output_tensor.get_shape();                  // 获取输出张量的维度信息
	float* detections = output_tensor.data<float>();                     // 获取输出张量的数据指针
	//this->postprocess_img(frame, detections, output_shape);              // 执行后输出  没有输出
	this->postprocess_img_output(frame, detections, output_shape);              // 执行后输出，有输出结果
}

void YOLOV8::loadClassesFromFile(std::string classesPath)
{
	std::ifstream inputFile(classesPath);
	if (inputFile.is_open())
	{
		std::string classLine;
		while (std::getline(inputFile, classLine))
			coconame.push_back(classLine);
		inputFile.close();
	}
}

void YOLOV8::initialmodel() {
	// 初始化OpenVINO模型，读取ONNX模型
	ov::Core core;
	std::shared_ptr<ov::Model> model = core.read_model(this->onnx_path);
	// 创建一个预处理后处理器，这个是OpenVINO用于处理模型输入和输出的类，提供了一个灵活的接口来配置模型的预处理和后处理步骤
	ov::preprocess::PrePostProcessor ppp = ov::preprocess::PrePostProcessor(model);

	// 配置输入预处理
	ppp.input().tensor().set_element_type(ov::element::u8).set_layout("NHWC").set_color_format(ov::preprocess::ColorFormat::RGB);
	// 配置输入预处理转换
	ppp.input().preprocess().convert_element_type(ov::element::f32).convert_color(ov::preprocess::ColorFormat::RGB).scale({ 255, 255, 255 });// .scale({ 112, 112, 112 });
	// 设置模型布局和输出配置
	ppp.input().model().set_layout("NCHW");
	ppp.output().tensor().set_element_type(ov::element::f32);
	// 构建和编译模型
	model = ppp.build();
	this->compiled_model = core.compile_model(model, "CPU");
	this->infer_request = compiled_model.create_infer_request();
}

void YOLOV8::preprocess_img(Mat& frame) {
	try {

		// 计算调整图像的宽度和高度
		float width = frame.cols;
		float height = frame.rows;
		cv::Size new_shape = cv::Size(inpWidth, inpHeight);     // 模型期望的输入宽度和高度
		float r = float(new_shape.width / max(width, height));  // 计算缩放比例
		int new_unpadW = int(round(width * r));                 // 新的未填充宽度
		int new_unpadH = int(round(height * r));                // 新的未填充高度

		// 调整图像大小并填充图像
		cv::resize(frame, resize.resized_image, cv::Size(new_unpadW, new_unpadH), 0, 0, cv::INTER_AREA);
		resize.dw = new_shape.width - new_unpadW;
		resize.dh = new_shape.height - new_unpadH;
		cv::Scalar color = cv::Scalar(100, 100, 100);
		// 添加边界：使用 cv::copyMakeBorder 在图像周围添加边界，以填充到目标大小。
		cv::copyMakeBorder(resize.resized_image, resize.resized_image, 0, resize.dh, 0, resize.dw, cv::BORDER_CONSTANT, color);

		// 计算宽高比
		this->rx = (float)frame.cols / (float)(resize.resized_image.cols - resize.dw);
		this->ry = (float)frame.rows / (float)(resize.resized_image.rows - resize.dh);

		// 设置输入张量
		float* input_data = (float*)resize.resized_image.data; // 设置输入数据指针
		input_tensor = ov::Tensor(compiled_model.input().get_element_type(), compiled_model.input().get_shape(), input_data);  // 创建输入张量
		infer_request.set_input_tensor(input_tensor);  // 设置推理请求的输入张量
	}
	catch (const std::exception& e) {
		std::cerr << "exception: " << e.what() << std::endl;
	}
	catch (...) {
		std::cerr << "unknown exception" << std::endl;
	}

}

void YOLOV8::postprocess_img(Mat& frame, float* detections, ov::Shape& output_shape) {
	// 1. 检测结果的解析
	std::vector<cv::Rect> boxes;            // 存储检测框
	vector<int> class_ids;                  // 存储类别ID
	vector<float> confidences;              // 存储置信度
	int out_rows = output_shape[1];         // 输出的行数
	int out_cols = output_shape[2];         // 输出的列数
	// 用于处理检测结果
	const cv::Mat det_output(out_rows, out_cols, CV_32F, (float*)detections);

	// 迭代输出，提取检测框
	for (int i = 0; i < det_output.cols; ++i) {
		const cv::Mat classes_scores = det_output.col(i).rowRange(4, 4 + coconame.size());  // 4表示cx(框中心的x坐标),cy(y坐标),ow(框宽度),oh(框高度) + 类别得分(按类别数量来)
		cv::Point class_id_point;
		double score;
		cv::minMaxLoc(classes_scores, nullptr, &score, nullptr, &class_id_point);

		if (score > 0.3) {
			// 预测框的中心坐标
			const float cx = det_output.at<float>(0, i);
			const float cy = det_output.at<float>(1, i);
			// 预测框的宽度和高度
			const float ow = det_output.at<float>(2, i);
			const float oh = det_output.at<float>(3, i);

			// 计算检测框的左上角坐标和宽高，将其存入相应的容器中
			cv::Rect box;
			box.x = static_cast<int>((cx - 0.5 * ow));
			box.y = static_cast<int>((cy - 0.5 * oh));
			box.width = static_cast<int>(ow);
			box.height = static_cast<int>(oh);

			boxes.push_back(box);
			class_ids.push_back(class_id_point.y);
			confidences.push_back(score);
		}
	}

	// 非极大抑制（NMS）
	std::vector<int> nms_result;
	cv::dnn::NMSBoxes(boxes, confidences, this->scoreThreshold, this->nmsThreshold, nms_result);

	// 输出结果的绘制
	// 每个通过NMS筛选出的检测框，创建Detection对象，存储其类别ID、置信度和检测框信息
	std::vector<Detection> output;
	for (int i = 0; i < nms_result.size(); i++)
	{
		Detection result;
		int idx = nms_result[i];
		result.class_id = class_ids[idx];
		result.confidence = confidences[idx];
		result.box = boxes[idx];
		output.push_back(result);
	}
	cout << "output_size:" << output.size() << endl;

	for (int i = 0; i < output.size(); i++)
	{
		auto detection = output[i];
		auto box = detection.box;
		auto classId = detection.class_id;
		// if (classId != 0) continue;
		auto confidence = detection.confidence;

		// 将检测框绘制到图像上
		// 坐标恢复，使用rx和ry将框的坐标和大小恢复到原图像的比例
		box.x = this->rx * box.x;
		box.y = this->ry * box.y;
		box.width = this->rx * box.width;
		box.height = this->ry * box.height;

		float xmax = box.x + box.width;
		float ymax = box.y + box.height;

		// detection box
		// 随机颜色，为每个检测框生成一个随机颜色，并在frame上绘制该框
		std::random_device rd;
		std::mt19937 gen(rd());
		std::uniform_int_distribution<int> dis(100, 255);
		cv::Scalar color = cv::Scalar(dis(gen),
			dis(gen),
			dis(gen));
		cv::rectangle(frame, cv::Point(box.x, box.y), cv::Point(xmax, ymax), color, 3);

		// Detection box text
		// 绘制类别标签
		std::string classString = coconame[classId] + ' ' + std::to_string(confidence).substr(0, 4);
		cv::Size textSize = cv::getTextSize(classString, cv::FONT_HERSHEY_DUPLEX, 1, 2, 0);
		cv::Rect textBox(box.x, box.y - 40, textSize.width + 10, textSize.height + 20);
		cv::rectangle(frame, textBox, color, cv::FILLED);
		cv::putText(frame, classString, cv::Point(box.x + 5, box.y - 10), cv::FONT_HERSHEY_DUPLEX, 1, cv::Scalar(0, 0, 0), 2, 0);

		// cv::rectangle(frame, cv::Point(box.x, box.y - 20), cv::Point(xmax, box.y), cv::Scalar(0, 255, 0), cv::FILLED);
		// cv::putText(frame, coconame[classId], cv::Point(box.x, box.y - 5), cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 0));
	}
}


void YOLOV8::postprocess_img_output(Mat& frame, float* detections, ov::Shape& output_shape) {
	// 1. 检测结果的解析
	std::vector<cv::Rect> boxes;            // 存储检测框
	vector<int> class_ids;                  // 存储类别ID
	vector<float> confidences;              // 存储置信度
	int out_rows = output_shape[1];         // 输出的行数
	int out_cols = output_shape[2];         // 输出的列数
	outDetections.clear();                  // 清空检测参数

	// 用于处理检测结果
	const cv::Mat det_output(out_rows, out_cols, CV_32F, (float*)detections);

	// 迭代输出，提取检测框
	for (int i = 0; i < det_output.cols; ++i) {
		const cv::Mat classes_scores = det_output.col(i).rowRange(4, 4 + coconame.size()); // 4表示cx(框中心的x坐标),cy(y坐标),ow(框宽度),oh(框高度) + 类别得分(按类别数量来)
		cv::Point class_id_point;
		double score;
		cv::minMaxLoc(classes_scores, nullptr, &score, nullptr, &class_id_point);

		if (score > 0.3) {
			// 预测框的中心坐标
			const float cx = det_output.at<float>(0, i);
			const float cy = det_output.at<float>(1, i);
			// 预测框的宽度和高度
			const float ow = det_output.at<float>(2, i);
			const float oh = det_output.at<float>(3, i);

			// 计算检测框的左上角坐标和宽高，将其存入相应的容器中
			cv::Rect box;
			box.x = static_cast<int>((cx - 0.5 * ow));
			box.y = static_cast<int>((cy - 0.5 * oh));
			box.width = static_cast<int>(ow);
			box.height = static_cast<int>(oh);

			boxes.push_back(box);
			class_ids.push_back(class_id_point.y);
			confidences.push_back(score);
		}
	}

	// 非极大抑制（NMS）
	std::vector<int> nms_result;
	cv::dnn::NMSBoxes(boxes, confidences, this->scoreThreshold, this->nmsThreshold, nms_result);

	// 输出结果的绘制
	// 每个通过NMS筛选出的检测框，创建Detection对象，存储其类别ID、置信度和检测框信息
	std::vector<Detection> output;
	for (int i = 0; i < nms_result.size(); i++)
	{
		Detection result;
		int idx = nms_result[i];
		result.class_id = class_ids[idx];
		result.confidence = confidences[idx];
		result.box = boxes[idx];
		output.push_back(result);
	}
	cout << "output_size:" << output.size() << endl;

	for (int i = 0; i < output.size(); i++)
	{
		auto detection = output[i];
		auto box = detection.box;
		auto classId = detection.class_id;
		// if (classId != 0) continue;
		auto confidence = detection.confidence;

		outputDetection outDetection;
		outDetection.confidence = confidence;   // <--输出

		// 将检测框绘制到图像上
		// 坐标恢复，使用rx和ry将框的坐标和大小恢复到原图像的比例
		box.x = this->rx * box.x;
		box.y = this->ry * box.y;
		box.width = this->rx * box.width;
		box.height = this->ry * box.height;

		float xmax = box.x + box.width;
		float ymax = box.y + box.height;

		outDetection.box.x = box.x;                  // <--输出
		outDetection.box.y = box.y;                  // <--输出
		outDetection.box.width = box.width;          // <--输出
		outDetection.box.height = box.height;        // <--输出

		// detection box
		// 随机颜色，为每个检测框生成一个随机颜色，并在frame上绘制该框
		std::random_device rd;
		std::mt19937 gen(rd());
		std::uniform_int_distribution<int> dis(100, 255);
		cv::Scalar color = cv::Scalar(dis(gen),
			dis(gen),
			dis(gen));
		cv::rectangle(frame, cv::Point(box.x, box.y), cv::Point(xmax, ymax), color, 3);

		// Detection box text
		// 绘制类别标签
		std::string classString = coconame[classId] + ' ' + std::to_string(confidence).substr(0, 4);
		cv::Size textSize = cv::getTextSize(classString, cv::FONT_HERSHEY_DUPLEX, 1, 2, 0);
		cv::Rect textBox(box.x, box.y - 40, textSize.width + 10, textSize.height + 20);
		cv::rectangle(frame, textBox, color, cv::FILLED);
		cv::putText(frame, classString, cv::Point(box.x + 5, box.y - 10), cv::FONT_HERSHEY_DUPLEX, 1, cv::Scalar(0, 0, 0), 2, 0);

		outDetection.className = coconame[classId];       // <--输出
		outDetections.push_back(outDetection);            // <--输出

		// cv::rectangle(frame, cv::Point(box.x, box.y - 20), cv::Point(xmax, box.y), cv::Scalar(0, 255, 0), cv::FILLED);
		// cv::putText(frame, coconame[classId], cv::Point(box.x, box.y - 5), cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 0));
	}
}

std::vector<outputDetection>& YOLOV8::getDetection()
{
	return outDetections;
}
