#include "Capture.h"


Capture::Capture(int id, QWidget* parent)
{
	camera_id = id;
	this->parent = parent;

	//camera	
	read_camera_param();

}

Capture::~Capture()
{

}


void Capture::set_rec(cv::Rect rect)
{
	_rec = rect;
}

void Capture::set_identify(bool flag)
{

	is_identify = flag;
	is_calibrate = !is_identify;

	if (flag)
		_factor = 4;
	else
		_factor = 2;

}


//推理

void Capture::inference_dm(cv::Mat mat)
{
	//C:/Users/a/source/repos/CarPos/
	//debug 耗时
	_counter.restart();
	const std::string labelsPath = "models/coco.names";
	const std::string modelPath = "models/yolo10n_uint8.onnx";
	YOLO11Detector detector(modelPath, labelsPath, false); //cpu
	cv::Mat img = cv::imread("models/dogs.jpg");

	std::vector<Detection> detections = detector.detect(img);

	cv::Mat image = img.clone();
	for (const auto& detection : detections) {
		// Skip detections below the confidence threshold
		if (detection.conf <= 0.4f)
			continue;

		// Ensure the object ID is within valid range
		if (detection.classId <= 0)
			continue;

		cv::rectangle(image, cv::Point(detection.box.x, detection.box.y),
			cv::Point(detection.box.x + detection.box.width, detection.box.y + detection.box.height),
			cv::Scalar(255, 0, 0), 2, cv::LINE_AA);
	}
	qDebug() << "推理消耗：" << _counter.elapsed() << Qt::endl;

	cv::imshow("detect", image);
}

//定位 locate
std::vector<cv::Rect> Capture::locate(cv::Mat src)
{
	cv::Mat img_src;
	cv::Mat imge_bgr;

	cv::Mat img_equalize;
	cv::Mat img_blur;
	cv::Mat img_thresh;
	cv::Mat img_dialte;
	cv::Mat open1;
	cv::Mat bg;

	cv::Mat ellipseImage;

	src.copyTo(img_src);
	cv::cvtColor(img_src, imge_bgr, cv::COLOR_GRAY2BGR);

	//直方
	cv::equalizeHist(img_src, img_equalize);

	//模糊
	cv::blur(img_equalize, img_blur, cv::Size(3, 3));
	cv::GaussianBlur(img_src, img_blur, cv::Size(35, 35), 0, 0);

	//二进制
	cv::threshold(img_blur, img_thresh, 100, 255, cv::THRESH_OTSU | cv::THRESH_BINARY_INV);

	cv::Mat element = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3));

	//膨胀	
	//cv::dilate(img_thresh, img_dialte, element);
	//cv::dilate(img_dialte, img_dialte, element);
	//cv::imshow("dialate", img_dialte);

	//开运算
	cv::morphologyEx(img_thresh, open1, cv::MorphTypes::MORPH_OPEN, element);

	//找轮廓	
	img_dialte.copyTo(ellipseImage);

	std::vector<std::vector<cv::Point> > contours;
	std::vector<cv::Vec4i>hierarchy;
	findContours(img_thresh, contours, hierarchy, cv::RetrievalModes::RETR_TREE, cv::ContourApproximationModes::CHAIN_APPROX_SIMPLE);

	std::vector<cv::Moments> mu(contours.size());
	std::vector<cv::Point2f> mc(contours.size());

	//可能的DM
	std::vector <cv::Rect> rects;

	for (std::vector<cv::Point> c : contours)
	{
		double a = cv::contourArea(c);
		if (a < 1000.0) continue;

		cv::Rect rect = cv::boundingRect(c);
		//扩张
		int x = std::max(0, rect.x - 10);
		int y = std::max(0, rect.y - 10);
		cv::Rect rect_b = cv::Rect(x, y, std::min(rect.width + 20, src.cols - x), std::min(rect.height + 20, src.rows - y));

		rects.push_back(rect_b);

		cv::rectangle(imge_bgr, rect, cv::Scalar(255, 0, 0));
	}

	//图像合并显示
	bool debug = false;
	if (debug) {
		std::vector<cv::Mat> images;
		images.push_back(img_src);
		images.push_back(img_equalize);
		images.push_back(img_blur);
		images.push_back(img_thresh);

		int w = img_src.cols * 2;
		int h = img_src.rows * 2;
		cv::Mat result = cv::Mat::zeros(cv::Size(w + 10, h + 10), img_src.type());
		cv::Rect box(0, 0, img_src.cols, img_src.rows);

		for (int i = 0; i < 4; i++) {
			int row = i / 2;
			int col = i % 2;
			box.x = img_src.cols * col + 10 * col;
			box.y = img_src.rows * row + 10 * row;
			images[i].copyTo(result(box));
		}
		cv::imshow("all", result);
	}
	//cv::imshow("result", imge_bgr);

	//
	return rects;
}

//定位 qrcodedetector
void Capture::qr_locate(cv::Mat src)
{
}

//自动亮度和对比度
void Capture::BrightnessAndContrastAuto(const cv::Mat& src, cv::Mat& dst, float clipHistPercent)
{
	CV_Assert(clipHistPercent >= 0);
	CV_Assert((src.type() == CV_8UC1) || (src.type() == CV_8UC3) || (src.type() == CV_8UC4));

	int histSize = 256;
	float alpha, beta;
	double minGray = 0, maxGray = 0;

	//to calculate grayscale histogram
	cv::Mat gray;
	if (src.type() == CV_8UC1) gray = src;
	else if (src.type() == CV_8UC3) cvtColor(src, gray, cv::COLOR_BGR2GRAY);
	else if (src.type() == CV_8UC4) cvtColor(src, gray, cv::COLOR_BGRA2GRAY);
	if (clipHistPercent == 0)
	{
		// keep full available range
		cv::minMaxLoc(gray, &minGray, &maxGray);
	}
	else
	{
		cv::Mat hist; //the grayscale histogram

		float range[] = { 0, 256 };
		const float* histRange = { range };
		bool uniform = true;
		bool accumulate = false;
		calcHist(&gray, 1, 0, cv::Mat(), hist, 1, &histSize, &histRange, uniform, accumulate);

		// calculate cumulative distribution from the histogram
		std::vector<float> accumulator(histSize);
		accumulator[0] = hist.at<float>(0);
		for (int i = 1; i < histSize; i++)
		{
			accumulator[i] = accumulator[i - 1] + hist.at<float>(i);
		}

		// locate points that cuts at required value
		float max = accumulator.back();
		clipHistPercent *= (max / 100.0); //make percent as absolute
		clipHistPercent /= 2.0; // left and right wings
		// locate left cut
		minGray = 0;
		while (accumulator[minGray] < clipHistPercent)
			minGray++;

		// locate right cut
		maxGray = histSize - 1;
		while (accumulator[maxGray] >= (max - clipHistPercent))
			maxGray--;
	}

	// current range
	float inputRange = maxGray - minGray;

	alpha = (histSize - 1) / inputRange;   // alpha expands current range to histsize range
	beta = -minGray * alpha;             // beta shifts current range so that minGray will go to 0

	// Apply brightness and contrast normalization
	// convertTo operates with saurate_cast
	src.convertTo(dst, -1, alpha, beta);

	// restore alpha channel from source 
	if (dst.type() == CV_8UC4)
	{
		int from_to[] = { 3, 3 };
		cv::mixChannels(&src, 4, &dst, 1, from_to, 1);
	}
	return;
}

//读相机参数
void Capture::read_camera_param()
{
	cv::FileStorage fs("camera_params.yml", cv::FileStorage::READ);
	if (!fs.isOpened()) return;

	fs["camera_matrix"] >> _mtx;
	fs["distortion_coefficients"] >> _dist;

	int iw = (int)fs["image_width"];
}

//from mat to QPixmap
QPixmap Capture::pix_from_mt(cv::Mat mt)
{
	QImage qimage(mt.data, mt.cols, mt.rows, static_cast<int>(mt.step), QImage::Format_Grayscale8);
	return QPixmap::fromImage(qimage);

}

//图片信号来
void Capture::new__picture(cv::Mat src) {

	// *debug 2;	
	_counter.restart();

	//2、缩放
	int w = src.cols / _factor;
	int h = src.rows / _factor;
	cv::Mat img_scaled, img_undistorted;
	cv::resize(src, img_scaled, cv::Size(w, h), 0.0);

	//校正图像
	if (is_identify) {
		//cv::Mat mtx_new = cv::getOptimalNewCameraMatrix(_mtx, _dist, cv::Size(w, h), 1, cv::Size(w, h));
		//cv::undistort(img_scaled, img_undistorted, _mtx, _dist, mtx_new);
		img_undistorted = img_scaled;
	}
	else {
		img_undistorted = img_scaled;
	}

	//识别还是校准
	if (is_identify) {
		identify(img_undistorted);
	}

	if (is_calibrate) {
		//cv::imwrite("e:/boardd.png", src);
		calibrate(img_undistorted);
	}

}

//识别
void Capture::identify(cv::Mat src)
{
	//3、剪裁
	cv::Mat im_crop;
	//cv::Rect rect = cv::selectROI("roi", im_scaled, false, false, false);
	if (_rec.x == 0) {
		cv::Rect rect(276, 92, 115, 115);
		_rec = rect;
	}
	try {
		im_crop = src(_rec);
	}
	catch (cv::Exception ex) {
		//返回：裁剪异常
		return;
	}
	//int w = im_crop.cols;
	//int h = im_crop.rows;

	//4、定位

	//inference_dm(im_crop);
	std::vector<cv::Rect> rects = locate(im_crop);

	//5、发送graph图像
	emit new_picture(MatImage{ camera_id, src });

	//6、识别
	std::vector<int> result;
	result.reserve(10);

	int idx = 0;
	for (cv::Rect r : rects) {
		try {
			cv::Mat m = im_crop(r);

			//cv::imshow(QString("aa %1").arg(idx).toStdString(), m);
			int num = detec_dm(m);
			if (num != -1) {
				result.push_back(num);
			}
		}
		catch (std::exception e) {
			qDebug() << "crop error," + QString::fromStdString(e.what()) << Qt::endl;
		}
		idx++;
	}
	//发送识别码
	result.shrink_to_fit();
	emit this->matrix_data(Dm{ camera_id,result });

}

//检测二维码
int Capture::detec_dm(cv::Mat mat) {
	int result = -1;
	// debug 3
	_counter.restart();

	int w = mat.cols;
	int h = mat.rows;
	int m_size = w * h * sizeof(uchar);
	uchar* data = (uchar*)malloc(m_size);
	uchar* p;

	if (data != NULL) {
		for (int i = 0;i < h;i++)
		{
			p = mat.ptr<uchar>(i);
			for (int j = 0;j < w;j++)
			{
				data[i * w + j] = p[j];
			}
		}
	}

	//识别
	DmtxMessage* msg;
	DmtxImage* img = dmtxImageCreate(data, w, h, DmtxPack8bppK);

	//dmtxImageSetProp();
	DmtxDecode* dec = dmtxDecodeCreate(img, 1);
	DmtxRegion* reg = dmtxRegionFindNext(dec, NULL);

	if (reg != NULL) {
		msg = dmtxDecodeMatrixRegion(dec, reg, DmtxFormatMatrix);
		if (msg != NULL) {
			const char* s = (const char*)msg->output;
			result = QLatin1String(s).toInt();
			dmtxMessageDestroy(&msg);
		}
		dmtxRegionDestroy(&reg);
	}

	dmtxDecodeDestroy(&dec);
	dmtxImageDestroy(&img);

	qDebug() << "识别结束：" << _counter.elapsed() << Qt::endl;
	qDebug() << "结果：" << result << Qt::endl;

	return result;
}

//校准
void Capture::calibrate(cv::Mat src)
{
	cv::Mat img_scaled, img_filter;
	//cv::resize(src, img_scaled, cv::Size(src.cols / 2, src.rows / 2));
	cv::bilateralFilter(src, img_filter, 5, 100, 100);

#if USE > 0
	std::vector<cv::Rect> rects = locate(img_filter);
	int idx = 0;
	for (cv::Rect r : rects) {
		try {
			if (r.width >= (img_scaled.cols - 10))
				continue;

			cv::Mat m = img_scaled(r);
			//cv::imshow(QString("aa %1").arg(idx).toStdString(), m);
			qDebug() << QString("%1,%2,%3,%4").arg(r.x).arg(r.y).arg(r.width).arg(r.height) << Qt::endl;

		}
		catch (std::exception e) {
			qDebug() << "crop error," + QString::fromStdString(e.what()) << Qt::endl;
		}
		idx++;
	}
#endif 


	//图像增强
	cv::Mat img_enhance;
	BrightnessAndContrastAuto(img_filter, img_enhance);

	//cv::imshow("src", img_scaled);
	//cv::imshow("filter", img_filter);
	//cv::imshow("enhance", img_enhance);

	emit new_picture(MatImage{ camera_id, img_enhance });
}

//设置相机
void Capture::setup_camera(Camera_onvif* camera)
{
	g_camera_onvif = camera;
	if (g_camera_onvif != nullptr)
		connect(g_camera_onvif, &Camera_onvif::sign_snap_rev, this, &Capture::new__picture);
}
