#include <iostream>
#include <opencv2/opencv.hpp>
#include<thread>
#include<time.h>
#include"track.h"
#include"detectTrack.h"
#include"threadPooling.h"

// cv::Mat kernel0 = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(15, 15));
// cv::Mat kernel1 = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(7, 7));
// cv::Mat kernel2 = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(5, 5));
// cv::Mat kernel3 = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3));
double calculateMean_small(cv::Mat image) {
	int width = image.rows;
	int height = image.cols;
	int total = width * height;
	double sum = 0.0;
	int pnum = 0;
	for (int i = 0; i < image.rows; i++) {
		for (int j = 0; j < image.cols; j++) {
			int pval = image.at<uchar>(i, j);
			if (pval > 20) {
				sum += pval;
				pnum += 1;
			}

		}
	}
	return int(sum / pnum);
}
void TopHat(cv::Mat& image,cv::Mat& outImage) {
	cv::Mat erodeImage, kernel, subtractImage;
	kernel = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3));
	
	cv::erode(image, erodeImage, kernel);
	cv::subtract(image, erodeImage, subtractImage);
	cv::threshold(subtractImage, outImage, 0, 255, cv::THRESH_BINARY | cv::THRESH_OTSU);
}

void blackHat(cv::Mat& image, cv::Mat& outImage) {
	cv::Mat dilateImage, kernel, subtractImage;
	kernel = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3));

	cv::dilate(image, dilateImage, kernel);
	cv::subtract(dilateImage, image, subtractImage);
	cv::threshold(subtractImage, outImage, 0, 255, cv::THRESH_BINARY | cv::THRESH_OTSU);
}

void blackTopHat(cv::Mat& image, cv::Mat& outImage) {
	cv::Mat erodeImage, dilateImage, kernel, subtractImageE, subtractImageD, addImage;
	cv::Point maxLoc;
	kernel = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3));

	cv::erode(image, erodeImage, kernel);
	cv::subtract(image, erodeImage, subtractImageE);

	cv::dilate(image, dilateImage, kernel);
	cv::subtract(dilateImage, image, subtractImageD);
	outImage = subtractImageD + subtractImageD;

	//cv::threshold(addImage, outImage, 0, 255, cv::THRESH_BINARY | cv::THRESH_OTSU);

}
void computeScharrX(const cv::Mat &input, cv::Mat &output)
{
	cv::Mat scharrx;
	cv::Scharr(input, scharrx, CV_64F, 1, 0);
	cv::convertScaleAbs(scharrx, output);
}
void computeScharrY(const cv::Mat &input, cv::Mat &output)
{
	cv::Mat scharry;
	cv::Scharr(input, scharry, CV_64F, 0, 1);
	cv::convertScaleAbs(scharry, output);
}

void computeSobelX(const cv::Mat &input, cv::Mat &output)
{
	cv::Mat sobelx;
	cv::Sobel(input, sobelx, CV_64F, 1, 0);
	cv::convertScaleAbs(sobelx, output);
}
void computeSobelY(const cv::Mat &input, cv::Mat &output)
{
	cv::Mat sobely;
	cv::Sobel(input, sobely, CV_64F, 0, 1);
	cv::convertScaleAbs(sobely, output);
}


template <typename T>
double median(std::vector<T>& values) {
	size_t size = values.size();
	if (size == 0) {
		// Handle empty vector
		return 0.0;
	}

	std::sort(values.begin(), values.end());

	if (size % 2 == 0) {
		// If the size is even, average the two middle values
		return (values[size / 2 - 1] + values[size / 2]) / 2.0;
	}
	else {
		// If the size is odd, take the middle value
		return static_cast<double>(values[size / 2]);
	}
}
double calculateMean(cv::Mat image) {
	int width = image.rows;
	int height = image.cols;
	int total= width* height;
	double sum = 0.0;
	int pval = 0;
	int pnum=0;
	double e = 0.000001;

	for (int i = 0; i < image.rows; i++) {
		for (int j = 0; j < image.cols; j++) {
			
			
			//sum += image.at<uchar>(i, j); // ?????????8��?????????????  
			pval = image.at<uchar>(i, j);
			if(pval<245){
				sum+=pval;
				pnum+=1;
			}
		}
	}
	return int(sum / (pnum+e));
}
cv::Point2i find_contour(std::vector<cv::KeyPoint> &keypoints, cv::Mat &cutimg, cv::Rect opnenroi, cv::Rect prerect, cv::Mat &pre_img) {
	int lxc = 0;
	int lyc = 0;
	int x = prerect.x;
	int y = prerect.y;
	int w = prerect.width;
	int h = prerect.height;
	int xc = x + w / 2;
	int yc = y + h / 2;
	int x1 = opnenroi.x;
	int y1 = opnenroi.y;
	int x2 = opnenroi.x + opnenroi.width;
	int y2 = opnenroi.y + opnenroi.height;

	int threshold = static_cast<int>(std::max_element(keypoints.begin(), keypoints.end(),
		[](const cv::KeyPoint &a, const cv::KeyPoint &b) {
		return a.response < b.response; })->response);

	int lower_threshold = static_cast<int>(threshold * 0.8);
	int upper_threshold = static_cast<int>(threshold * 1.2);

	cv::Mat scimg;
	cv::Canny(cutimg, scimg, lower_threshold, upper_threshold);


	std::vector<std::vector<cv::Point>> contours;
	cv::findContours(scimg, contours, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);

	int mindist = 10000;

	// Calculate correlate pre with current
	int x22 = x + w;
	int y22 = y + h;
	cv::Mat pre_img_copy = pre_img.clone();
	cv::Mat cut_preimg = pre_img(cv::Rect(x, y, w, h));
	std::vector<int> distances;

	for (const auto &contour : contours) {
		cv::Rect bounding_rect = cv::boundingRect(contour);
		int xx = bounding_rect.x;
		int yy = bounding_rect.y;
		int ww = bounding_rect.width;
		int hh = bounding_rect.height;
		int xx_centr = xx + ww / 2 + x1;
		int yy_centr = yy + hh / 2 + y1;
		int dist1 = std::abs(xx_centr - xc) + std::abs(yy_centr - yc);

		if (dist1 > 15) {
			continue;
		}

		int area1 = w * h;
		int area2 = ww * hh;
		int dist2 = std::abs(ww * hh - w * h);
		double val = static_cast<double>(dist2) / area1;

		if (val > 1.5) {
			continue;
		}

		int dist = static_cast<int>(0.3 * dist1 + 0.7 * dist2);
		distances.push_back(dist);

		if (dist < mindist) {
			mindist = dist;
			lxc = xx_centr;
			lyc = yy_centr;
		}
	}

	//std::cout << "distances: ";
	for (int dist : distances) {
		//std::cout << dist << " ";
	}
	//std::cout << std::endl;

	//std::cout << "mindist: " << mindist << std::endl;

	return cv::Point2i(lxc, lyc);
}


void computeCanny(const cv::Mat &input, cv::Mat &output, const int &th_low, const int &th_high)
{
	cv::Mat edges_Canny;
	cv::Canny(input, edges_Canny, th_low, th_high);
	cv::Mat kernel1 = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(9, 9));
	cv::morphologyEx(edges_Canny, output, cv::MORPH_CLOSE, kernel1);
}




//???????
//DETECTTRACK::DETECTTRACK(CentroidTracking& centTrack) :centTrack(), flag(0),preimg(cv::Mat()),
//pre_track(0, 0, 0, 0), precentr(0, 0), missframe(0), maxmissframe(10) {
//
//	cv::Ptr<cv::ORB> orb = cv::ORB::create(10, 1.2f, 8, 31, 0, 2, cv::ORB::HARRIS_SCORE, 31, 20);
//	cv::Ptr<cv::FastFeatureDetector> fast_small = cv::FastFeatureDetector::create(10, true, cv::FastFeatureDetector::TYPE_9_16);
//	cv::Ptr<cv::FastFeatureDetector> fast_big = cv::FastFeatureDetector::create(20, true, cv::FastFeatureDetector::TYPE_9_16);
//
//}

DETECTTRACK::DETECTTRACK(CentroidTracking& centTrack, ThreadPool& pool) {
	DETECTTRACK();
}

DETECTTRACK::DETECTTRACK() : centTrack(), flag(0), preimg(cv::Mat()),
pre_track(0, 0, 0, 0), precentr(0, 0), missframe(0), maxmissframe(10) {
	// ??��???????????
	orb = cv::ORB::create(10, 1.2f, 8, 31, 0, 2, cv::ORB::HARRIS_SCORE, 31, 20);
	fast_small = cv::FastFeatureDetector::create(10, true, cv::FastFeatureDetector::TYPE_9_16);
	fast_big = cv::FastFeatureDetector::create(20, true, cv::FastFeatureDetector::TYPE_9_16);
}


void DETECTTRACK::fast_point(cv::Mat gray) {
	// Implementation of fast_point function

}

double DETECTTRACK::fast_get_thresh(cv::Mat gray) {
	// Implementation of fast_get_thresh function


	// ????????????��????
	double t0 = static_cast<double>(cv::getTickCount());

	// ??????��??��???
	cv::Mat resizedGray;
	cv::resize(gray, resizedGray, cv::Size(gray.cols / 2, gray.rows / 2));

	if (!fast_big) {
		// ??? fast_big ?????????????????????
		std::cerr << "Error: fast_big is not initialized." << std::endl;
		return 0.0;
	}

	// ??? fast_big ?????????
	std::vector<cv::KeyPoint> keypoints;
	fast_big->detect(resizedGray, keypoints);

	// ??????????????????��??
	std::vector<float> responses;
	for (const auto& keypoint : keypoints) {
		responses.push_back(keypoint.response);
	}

	// ???????
	double medval = 0.0;
	if (!responses.empty()) {
		std::nth_element(responses.begin(), responses.begin() + responses.size() / 2, responses.end());
		medval = responses[responses.size() / 2];
	}

	// ?????????????
	////std::cout << "fast_get_thresh time: " << (static_cast<double>(cv::getTickCount()) - t0) / cv::getTickFrequency() << std::endl;

	return medval;
}

std::vector<cv::KeyPoint> DETECTTRACK::fast_keypoints(cv::Mat gray) {
	// Implementation of fast_keypoints function
	// ??? FAST ?????
	std::vector<cv::KeyPoint> keypoints;
	fast_small->detect(gray, keypoints);
	//?????? 
	// ????????????

	return keypoints;

}

std::vector<std::vector<cv::Point>> DETECTTRACK::calculate_contours(std::vector<std::vector<cv::Point>> contours, double thr1, double thr2, cv::Mat image) {
	// Implementation of calculate_contours function
	// ...
	int h = image.rows;
	int w = image.cols;
	double image_area = h * w;
	std::vector<std::vector<cv::Point>> out_contours;
	//???????
	for (const auto& contour : contours) {
		//????????????????
		double area = cv::contourArea(contour);
		// ??????????????????????thr1 * image_area
		if (area > thr1 * image_area) {
			// ???????????????????????
			//cv::Mat mask = cv::Mat::zeros(image.size(), CV_8UC1);
			//cv::drawContours(mask, std::vector<std::vector<cv::Point>>{contour}, -1, cv::Scalar(255), cv::FILLED);
			int num = 0;

			// ????????????????????
			//std::cout << "contour::" << contour.size() << std::endl;
			for (const auto& point : contour) {
				int x = point.x;
				int y = point.y;
				if (y == 0) {
					num += 1;
				}
			}
			//std::cout << "num::" << num << std::endl;

			// if (num > 0 && variance < 600)
			if (num > 0) {
				out_contours.push_back(contour);
			}
		}
		//else {
			//cv::Mat mask = cv::Mat::zeros(image.size(), CV_8UC1);
		//}
	}
	
	return out_contours;

}

void DETECTTRACK::get_sky_roi2(cv::Mat image, std::vector<std::vector<cv::Point>>& out_contours, cv::Mat& edge_Scharr2) {
	// Implementation of get_sky_roi function
	   cv::Mat scharrx, scharry, sobelx,sobely, edge_Scharr, MORPH_CLOSE_NOT, MORPH_CLOSE;
	   cv::Mat sobelxy,tbHat, outtbHat;


	auto t0 = std::chrono::high_resolution_clock::now();
	// ʹ���̳߳�������
	
	auto future1 = pool.enqueue(computeScharrX,std::ref(image), std::ref(scharrx));
	auto future2 = pool.enqueue(computeScharrY, std::ref(image), std::ref(scharry));
	auto future3 = pool.enqueue(computeSobelX, std::ref(image), std::ref(sobelx));
	auto future4 = pool.enqueue(computeSobelY, std::ref(image), std::ref(sobely));
	auto future5 = pool.enqueue(blackTopHat, std::ref(image), std::ref(tbHat));
	//auto future6 = pool.enqueue(computeCanny, std::ref(image), std::ref(edges_Canny), std::ref(th_low), std::ref(th_high));

	// �ȴ������������
	future1.wait();
	future2.wait();
	future3.wait();
	future4.wait();
	future5.wait();
	//future6.wait();

	auto t1 = std::chrono::high_resolution_clock::now();
	auto usetime = std::chrono::duration_cast<std::chrono::milliseconds>(t1 - t0);
	//std::cout << "derivation use time::" << usetime.count() << "ms" << std::endl;


	sobelxy = sobelx + sobely;
	edge_Scharr = scharrx + scharry;
	edge_Scharr2 = edge_Scharr.clone()+ tbHat;


	cv::Mat outallIm;
	cv::Mat allIm;
	allIm = 0.3*sobelxy + 0.3*edge_Scharr + 0.4*tbHat;
	//allIm = 0.5*sobelxy + 0.5*tbHat;
	int thrLow1 = calculateMean_small(allIm);
	cv::threshold(allIm, outallIm, thrLow1, 255, cv::THRESH_BINARY);

	
	//cv::threshold(MORPH_CLOSE_NOT, MORPH_CLOSE_NOT, thrLow, 255, cv::THRESH_BINARY);
	cv::Mat kernel1 = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(5, 5));
	cv::dilate(outallIm, MORPH_CLOSE, kernel1);
	// cv::bitwise_not(MORPH_CLOSE, MORPH_CLOSE_NOT);
	// int thrLow = calculateMean(MORPH_CLOSE_NOT);
	// cv::threshold(MORPH_CLOSE_NOT, MORPH_CLOSE_NOT, thrLow, 255, cv::THRESH_BINARY);
	//cv::imwrite("zxl/outallIm.jpg", outallIm);
	//cv::imwrite("zxl/allIm.jpg", allIm);
	//cv::imwrite("zxl/sobelxy.jpg", sobelxy);
	//cv::imwrite("zxl/tbHat.jpg", tbHat);
	//cv::imwrite("zxl/edge_Scharr.jpg", edge_Scharr);
	//cv::dilate(MORPH_CLOSE, MORPH_CLOSE, kernel1);
	//cv::bitwise_not(MORPH_CLOSE, MORPH_CLOSE_NOT);


    std::vector<std::vector<cv::Point>> contours;
    std::vector<cv::Vec4i> hierarchy;
    cv::findContours(MORPH_CLOSE, contours, hierarchy, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);

    // ��������
    out_contours = calculate_contours(contours, 0.1, 0.25, edge_Scharr);
	//std::cout << "............"<< std::endl;

}





void DETECTTRACK::get_sky_roi(cv::Mat image, std::vector<std::vector<cv::Point>>& out_contours, cv::Mat& edge_Scharr2) {


	// Implementation of get_sky_roi function
	   cv::Mat scharrx, scharry, edges_Canny, edge_Scharr, MORPH_CLOSE_NOT, scharr_canny46, MORPH_CLOSE;

		 //cv::Scharr(image, scharrx, CV_64F, 1, 0);
		 //cv::Scharr(image, scharry, CV_64F, 0, 1);
		//cv::convertScaleAbs(scharrx, abs_scharrx);
		//cv::convertScaleAbs(scharry, abs_scharry);

	double th = fast_get_thresh(image);
	int th_low = static_cast<int>(th * 0.8);
	int th_high = static_cast<int>(th * 1.2);//1.5
	////std::cout << "th::::" << th << std::endl;
	
	if (tmp_trackes.empty()) // when trackes is empty,we need update the out_contours template
	{
		std::thread threadX(computeScharrX, std::ref(image), std::ref(scharrx));
		std::thread threadY(computeScharrY, std::ref(image), std::ref(scharry));
		std::thread threadC(computeCanny, std::ref(image), std::ref(edges_Canny), std::ref(th_low), std::ref(th_high));

		threadX.join();
		threadY.join();
		threadC.join();
	}
	else
	{
		std::thread threadX(computeScharrX, std::ref(image), std::ref(scharrx));
		std::thread threadY(computeScharrY, std::ref(image), std::ref(scharry));

		threadX.join();
		threadY.join();
	}


	edge_Scharr = scharrx + scharry;

    edge_Scharr2 = edge_Scharr.clone();
	//cv::imwrite("edge_Scharr2.jpg",edge_Scharr2);
	cv::Mat kernel1 = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(9, 9));
 //   cv::morphologyEx(edges_Canny2, edges_Canny2, cv::MORPH_CLOSE, kernel1);


	if (tmp_trackes.empty())
	{
		cv::addWeighted(edge_Scharr, 0.4, edges_Canny, 0.6, 0, scharr_canny46);
		//cv::imwrite("scharr_canny46.jpg",scharr_canny46);
		cv::dilate(scharr_canny46, MORPH_CLOSE, kernel1);
		cv::bitwise_not(MORPH_CLOSE, MORPH_CLOSE_NOT);
		//cv::imwrite("m.jpg",MORPH_CLOSE_NOT);


		int thrLow = calculateMean(MORPH_CLOSE_NOT);
		cv::threshold(MORPH_CLOSE_NOT, MORPH_CLOSE_NOT, thrLow, 255, cv::THRESH_BINARY);
		


		std::vector<std::vector<cv::Point>> contours;
		std::vector<cv::Vec4i> hierarchy;
		cv::findContours(MORPH_CLOSE_NOT, contours, hierarchy, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);

		// ????????
		out_contours = calculate_contours(contours, 0.25, 0.25, edge_Scharr);
		tmp_contours = out_contours;
	}
	else
	{
		out_contours = tmp_contours;
	}
	//std::cout << "............"<< std::endl;

    //return out_contours;
}

std::vector<cv::Rect> DETECTTRACK::get_sky_ob(std::vector<std::vector<cv::Point>>& out_contours, cv::Mat& edge_Scharr2) {
	// Implementation of get_sky_ob function
	cv::Mat result, Scharr_img, Scharr_img_close, mask;
	mask = cv::Mat::zeros(edge_Scharr2.size(), CV_8U);
	std::vector<std::vector<cv::Point>> contours_vec{ out_contours[0] };
	cv::drawContours(mask, contours_vec, -1, 255, cv::FILLED);
	//cv::imwrite("mask.jpg",mask);

	// .......................... get sky roi .............................................

	cv::bitwise_and(edge_Scharr2, edge_Scharr2, result, mask = mask);



	cv::threshold(result, Scharr_img, 200, 255, cv::THRESH_BINARY);
//cv::imwrite("Scharr_img.jpg",Scharr_img);
	// ......................... get ob..................................................
	cv::Mat kernel2 = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3));

	cv::morphologyEx(Scharr_img, Scharr_img_close, cv::MORPH_CLOSE, kernel2);

	std::vector<std::vector<cv::Point>> contours3;
	cv::findContours(Scharr_img_close, contours3, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);

	std::vector<cv::Rect> trackes;
	if (!contours3.empty()) {
		for (const auto& cont : contours3) {
			double area = cv::contourArea(cont);
			if (area > 50) {
				cv::Rect boundingRect = cv::boundingRect(cont);
				//cv::rectangle(image0, boundingRect, cv::Scalar(255, 0, 0), 1);
				trackes.push_back(boundingRect);
			}
		}
	}

	return trackes;
}

cv::Point2i DETECTTRACK::track_outsky_ob(cv::Rect& rect, cv::Mat& pre_img, cv::Mat& image, int val) {
	// Implementation of track_outsky_ob function
	int lxc = 0;
	int lyc = 0;
	int x = rect.x;
	int y = rect.y;
	int w = rect.width;
	int h = rect.height;
	int xc = x + w / 2;
	int yc = y + h / 2;

	int W = image.cols;
	int H = image.rows;
	int y1 = y - val * h;
	if (y1 <= 0) {
		y1 = 0;
	}
	int y2 = y + (val + 1) * h;
	if (y2 > H) {
		y2 = H;
	}
	int x1 = x - val * w;
	if (x1 < 0) {
		x1 = 0;
	}
	int x2 = x + (val + 1) * w;
	if (x2 > W) {
		x2 = W;
	}

	////std::cout << "cutimgsywh::" << y1 << ";" << y2 << ";" << x1 << ";" << x2 << std::endl;
	cv::Mat cutimg = image(cv::Range(y1, y2), cv::Range(x1, x2));
	cv::Mat precutimg = pre_img(cv::Range(y1, y2), cv::Range(x1, x2));


	std::vector<cv::KeyPoint> fks = fast_keypoints(cutimg);
	std::vector<cv::KeyPoint> fkspre = fast_keypoints(precutimg);

	if (fks.empty()) {
		return cv::Point2i(lxc, lyc);
	}

	cv::Point2i contour_result = find_contour(fks, cutimg, { x1, y1, x2, y2 }, rect, pre_img);
	////std::cout << "find_contour:lxc,lyc:::" << contour_result.x << ";" << contour_result.y << std::endl;

	if (contour_result.x > 0 && contour_result.y > 0) {
		////std::cout << ".....use contour to find ob...." << std::endl;
		return contour_result;
	}
	else {
		////std::cout << ".....use point to find ob...." << std::endl;
		int target_idx = -1;
		////std::cout << "self.precentr::" << precentr << std::endl;
		////std::cout << "self.pre_track::" << pre_track << std::endl;
		int px, py, pw, ph;
		px = pre_track.x;
		py = pre_track.y;
		pw = pre_track.width;
		ph = pre_track.height;

		int pcx, pcy;
		pcx = precentr.x;
		pcy = precentr.y;


		double dis_thre = 100;
		for (size_t i = 0; i < fkspre.size(); ++i) {
			double fkx = fkspre[i].pt.x + x1;
			double fky = fkspre[i].pt.y + y1;
			double dis = std::abs(fkx - pcx) + std::abs(fky - pcy);
			//std::cout << "fk_coord::" << fkx << ";" << fky << std::endl;

			if (dis < dis_thre) {
				dis_thre = dis;
				target_idx = i;
				//std::cout << "target_idx::" << target_idx << std::endl;
			}
		}

		if (target_idx > -1) {
			cv::Point2f mat_ob = fks[target_idx].pt + cv::Point2f(x1, y1);

			cv::Mat des1, des2;
			std::vector<cv::KeyPoint> kp1, kp2;
			orb->detectAndCompute(cutimg, cv::noArray(), kp1, des1);
			orb->detectAndCompute(precutimg, cv::noArray(), kp2, des2);




			cv::BFMatcher bf;
			std::vector<std::vector<cv::DMatch>> matches;
			bf.knnMatch(des1, des2, matches, 2);

			std::vector<cv::DMatch> good_matches;
			std::vector<int> bad_matches_indx;
			std::vector<double> bad_matches_response;
			std::vector<cv::Point2f> bad_matches_pt;
			//cv::Point2f mat_ob(0, 0);

			for (auto& match : matches) {
				if (match[0].distance < 0.9 * match[1].distance) {
					good_matches.push_back(match[0]);

					if (match[0].trainIdx == target_idx) {
						mat_ob = fks[match[0].queryIdx].pt;
					}
				}
				else {
					bad_matches_indx.push_back(match[0].queryIdx);
					bad_matches_response.push_back(fks[match[0].queryIdx].response);
					bad_matches_pt.push_back(fks[match[0].queryIdx].pt);
				}
			}

	
			//std::cout << "ob in keypoints" << std::endl;
			if (mat_ob.x != 0 && mat_ob.y != 0) {
				int lxc = static_cast<int>(mat_ob.x) + x1;
				int lyc = static_cast<int>(mat_ob.y) + y1;
				return cv::Point2d(lxc, lyc);
			}
			else
			{
				//std::cout << "not find good match ob" << std::endl;
				double max_bad_response = 0;
				int indx = -1;

				for (size_t i = 0; i < bad_matches_response.size(); ++i) {
					double bmr = bad_matches_response[i];
					if (bmr > max_bad_response) {
						max_bad_response = bmr;
						indx = i;
					}
				}

				double medresponse = median(bad_matches_response);
				if (max_bad_response - medresponse > 10) {
					//std::cout << "............>10............." << std::endl;
					cv::Point2i lxc_lyc = bad_matches_pt[indx] + cv::Point2f(x1, y1);
					//std::cout << "lxc, lyc:::" << lxc_lyc << std::endl;
					return lxc_lyc;
				}
				else {
					//std::cout << "lxc, lyc:::" << lxc << ";" << lyc << std::endl;
					return cv::Point2i(lxc, lyc);
				}
			}
		}
		else{
			return cv::Point2i(lxc, lyc);
		}

	}
}
cv::Rect DETECTTRACK::update(cv::Mat& im) {
	
	//cv::imwrite("zxl/im.jpg",im);
	int cutEdge = 200;
	int rw = im.cols -cutEdge;
	int rh = im.rows - cutEdge;
	cv::Mat image = im(cv::Range(cutEdge,rh),cv::Range(cutEdge,rw));
	//cv::GaussianBlur(image,image,cv::Size(3,3),0);

	// ֱ�ӷ��ذ汾
	// ȥ���������

	// �ж��Ƿ��ǻҶ�ͼ��������ǽ��лҶ�ת��


	int numChannels = image.channels();
	if (numChannels == 3) {
		cv::cvtColor(image, image, cv::COLOR_BGR2GRAY);
	}

	// ���������
	std::vector<std::vector<cv::Point>> out_contours;
	cv::Mat edge_Scharr2;
	get_sky_roi(image, out_contours, edge_Scharr2);
	//get_sky_roi2(image, out_contours, edge_Scharr2);
	// �ж�����������Ƿ�Ϊ�գ����Ϊ��ֱ���Ƴ�
	if (out_contours.empty()) {
		return cv::Rect(0, 0, 0, 0);
	}
	// ���������������Ŀ��
	std::vector<cv::Rect> trackes = get_sky_ob(out_contours, edge_Scharr2);
	if (trackes.empty()) {
		return cv::Rect(0, 0, 0, 0);
	}

	// ���к�ѡĿ������켣׷�ٺͶ�֡����
	TrackingResult tresult = centTrack.update(trackes);
	// ���û��Ŀ��Ҳֱ���˳�
	if (tresult.objects.empty()) {
		return cv::Rect(0, 0, 0, 0);
	}
	std::unordered_map<int, int> frameCount = tresult.frameCount;
	std::unordered_map<int, cv::Rect> outrects = tresult.outrects;
	std::unordered_map<int, cv::Point> objects = tresult.objects;

	int maxframe = 0;
	int maxframekey;
	for (auto it = frameCount.begin(); it != frameCount.end(); it++) {
		int keyFrame = it->first;
		int valFrame = it->second;
		if (valFrame >= maxframe) {
			maxframe = valFrame;
			maxframekey = keyFrame;

		}
	}
	//std::cout << "frameThresh:" << frameThresh << std::endl;
	//std::cout << "maxframekey:" << maxframekey << std::endl;
	//std::cout << "maxframe:" << maxframe << std::endl;
	// ����10֡���ϲ�����Ŀ��  ������Ը��� 
	if (objects.size() > 0 && maxframe > frameThresh) {

		flag += 1;
		if (flag == 1) {
			cv::Point betterob = objects[maxframekey];
			cv::Rect betterrect = outrects[maxframekey];

			// update
			preimg = image;
			pre_track = betterrect;
			precentr = betterob;

			betterrect.x+=cutEdge;
			betterrect.y+=cutEdge;


			return betterrect;

		}
		else {
			cv::Point betterob = objects[maxframekey];
			cv::Rect betterrect = outrects[maxframekey];
			//std::cout << "betterrect::" << betterrect << std::endl;

			int bettercount = frameCount[maxframekey];
			//std::cout << "bettercount:::" << bettercount << std::endl;
			// distance
			int px, py, pw, ph;
			std::tie(px, py, pw, ph) = std::make_tuple(pre_track.x, pre_track.y, pre_track.width, pre_track.height);
			int pcx = px + pw / 2;
			int pcy = py + ph / 2;
			int bcx = betterob.x;
			int bcy = betterob.y;
			int dis = abs(bcx - pcx) + abs(bcy - pcy);
			double dis_val = static_cast<double>(dis) / (pw + ph);
			if (dis_val > 10) {



				return cv::Rect(0, 0, 0, 0);
			}
			else {
				pre_track = betterrect;
				preimg = image;
				precentr = betterob;

				betterrect.x+=cutEdge;
				betterrect.y+=cutEdge;

				return betterrect;
			}


		}

	}
	else {
		return cv::Rect(0, 0, 0, 0);
	}
}

void DETECTTRACK::update(cv::Mat& im, cv::Rect& outRect) {

	//cv::imwrite("zxl/im.jpg",im);

	int cutEdge = 200;
	int rw = im.cols -cutEdge;
	int rh = im.rows - cutEdge;
	cv::Mat image = im(cv::Range(cutEdge,rh),cv::Range(cutEdge,rw));

	// Implementation of update function
	outRect = cv::Rect (0, 0, 0, 0);
	// cv::Mat image;
	// cv::cvtColor(image0, image, cv::COLOR_BGR2GRAY);

	std::vector<std::vector<cv::Point>> out_contours;
	cv::Mat edge_Scharr2;
	get_sky_roi2(image, out_contours, edge_Scharr2);
	if (!out_contours.empty()) {


		std::vector<cv::Rect> trackes = get_sky_ob(out_contours, edge_Scharr2);
		if (trackes.empty()) {
			tmp_trackes.clear();
			return;
		}
		tmp_trackes = trackes;

		TrackingResult tresult = centTrack.update(trackes);
		std::unordered_map<int, int> frameCount = tresult.frameCount;
		std::unordered_map<int, cv::Rect> outrects = tresult.outrects;
		std::unordered_map<int, cv::Point> objects = tresult.objects;

		int maxframe = 0;
		int maxframekey;
		for (auto it = frameCount.begin(); it != frameCount.end(); it++)
		{
					int keyFrame = it->first;
					int valFrame = it->second;
					if (valFrame >= maxframe)
					{
						maxframe = valFrame;
						maxframekey = keyFrame;
					}
		}
		//std::cout << "maxframekey:" << maxframekey << std::endl;
		//std::cout << "maxframe:" << maxframe << std::endl;
		// if objects have, is have ob
		int frameTre = 1;
		if (objects.size() > 0 && maxframe >frameTre)
		{
			flag += 1;
			if (flag == 1)
			{
				// int maxframekey = std::max_element(frameCount.begin(), frameCount.end()) - frameCount.begin();

				// int maxframe = 0;
				// int maxframekey;
				// for (auto it = frameCount.begin(); it != frameCount.end(); it++)
				// {
				// 	int keyFrame = it->first;
				// 	int valFrame = it->second;
				// 	if (valFrame >= maxframe)
				// 	{
				// 		maxframe = valFrame;
				// 		maxframekey = keyFrame;
				// 	}
				// }

				cv::Point betterob = objects[maxframekey];
				cv::Rect betterrect = outrects[maxframekey];

				// update
				preimg = image;
				pre_track = betterrect;
				precentr = betterob;
				//outRect = betterrect;
				outRect.x = betterrect.x+cutEdge;
				outRect.y = betterrect.y+cutEdge;
				outRect.width = betterrect.width;
				outRect.height = betterrect.height;
			}
			else
			{
				// int maxframekey = std::max_element(frameCount.begin(), frameCount.end()) - frameCount.begin();
				// int maxframe = 0;
				// int maxframekey;
				// for (auto it = frameCount.begin(); it != frameCount.end(); it++)
				// {
				// 	int keyFrame = it->first;
				// 	int valFrame = it->second;
				// 	if (valFrame >= maxframe)
				// 	{
				// 		maxframe = valFrame;
				// 		maxframekey = keyFrame;
				// 	}
				// }

				
				cv::Point betterob = objects[maxframekey];
				cv::Rect betterrect = outrects[maxframekey];
				//std::cout << "betterrect::" << betterrect << std::endl;
				// distance
				int px, py, pw, ph;
				std::tie(px, py, pw, ph) = std::make_tuple(pre_track.x, pre_track.y, pre_track.width, pre_track.height);
				int pcx = px + pw / 2;
				int pcy = py + ph / 2;
				int bcx = betterob.x;
				int bcy = betterob.y;
				int dis = abs(bcx - pcx) + abs(bcy - pcy);
				// //std::cout << "dis:::" << dis << std::endl;
				// //std::cout << "pw+ph::" << pw << ";" << ph << ";" << (pw + ph) << std::endl;
				//std::cout << "dis/(pw+ph)::" << static_cast<double>(dis) / (pw + ph) << std::endl;
				double dis_val = static_cast<double>(dis) / (pw + ph);
				// update
				if (dis_val > 10)
				{
					//std::cout << "..............distance too len............" << std::endl;
					pre_track = pre_track;
					preimg = preimg;
					precentr = precentr;

					// return out_rect;
				}
				else
				{
					pre_track = betterrect;
					preimg = image;
					precentr = betterob;
					//outRect = betterrect;
					outRect.x = betterrect.x+cutEdge;
					outRect.y = betterrect.y+cutEdge;
					outRect.width = betterrect.width;
					outRect.height = betterrect.height;
				}

				//std::cout << "self.pre_track::0::" << pre_track << std::endl;

				int x = pre_track.x;
				int y = pre_track.y;
				int w = pre_track.width;
				int h = pre_track.height;
				
				cv::Mat pre_cut = preimg(cv::Rect(x, y, w, h));


			}
		}
		else
		{
			// don't detect ob
			// 0 no ob on the way
			// 1 ob run out of the sky
			// 2 ob disappears
			// use preimg and box, use fast \ distance and correlate
			flag = 0;

			if (pre_track.x == 0 && pre_track.y == 0 && pre_track.width == 0 && pre_track.height == 0)
			{
				// 0 no ob on the way
				//std::cout << "objects and pre_track all is None " << std::endl;
			}
			else
			{
				//std::cout << "................find out sky ob..................." << std::endl;
				// 1 ob run out of the sky
				int x = pre_track.x;
				int y = pre_track.y;
				int w = pre_track.width;
				int h = pre_track.height;
				//std::cout << "self.pre_track::3::" << pre_track << std::endl;
				int px2 = x + w;
				int py2 = y + h;
				cv::Mat pre_cut = preimg(cv::Rect(x, y, w, h));


				cv::Point2d lxly = track_outsky_ob(pre_track, preimg, image, 5);
				int lxc = lxly.x;
				int lyc = lxly.y;

				int dist = abs(lxc - px2) + abs(lyc - py2);
				int wh = w + h;
				double distval = static_cast<double>(dist) / wh;
				if (distval > 2)
				{
					//std::cout << ".............too distance..............." << std::endl;
					missframe += 1;
					pre_track = pre_track;
					preimg = preimg;
					precentr = precentr;
				}
				else
				{
					if (lxc > 0 && lyc > 0)
					{
						int x_ = lxc - w / 2;
						int y_ = lyc - h / 2;

						// update
						pre_track = cv::Rect(x_, y_, w, h);
						preimg = image;
						// cv::circle(image0, cv::Point(lxc, lyc), 1, cv::Scalar(0, 255, 0), -1);
						outRect = cv::Rect(x_+cutEdge, y_+cutEdge, w, h);
					}
					else
					{
						//std::cout << ".............no find ob............" << std::endl;
						missframe += 1;
						pre_track = pre_track;
						preimg = preimg;
						precentr = precentr;
					}

					if (missframe > maxmissframe)
					{
						pre_track = cv::Rect(0, 0, 0, 0);
						preimg = cv::Mat();
						precentr = cv::Point(0, 0);
					}
				}

			}
		}
	}
}
