
#ifndef _KCFTRACKER_HEADERS
#include "kcftracker.h"
#include "ffttools.h"
#include "recttools.hpp"
#include "fhog.h"
#include "labdate.h"
#endif

 // Constructor
KCFTracker::KCFTracker(bool hog, bool fixed_window, bool multiscale, bool lab)
{

	//std::cout<<"fixed_window::"<<fixed_window<<std::endl;
	//std::cout<<"multiscale::"<<multiscale<<std::endl;
	// Parameters equal in all cases
	lambda = 0.0001;
	// 目标搜索范围
	padding = 5;
	//output_sigma_factor = 0.1;
	output_sigma_factor = 0.125;


	if (hog) {    // HOG
		// VOT
		interp_factor = 0.012;
		sigma = 0.6;
		// TPAMI
		//interp_factor = 0.02;
		//sigma = 0.5; 
		cell_size = 4;
		_hogfeatures = true;

		if (lab) {
			interp_factor = 0.005;
			sigma = 0.4;
			//output_sigma_factor = 0.025;
			output_sigma_factor = 0.1;

			_labfeatures = true;
			_labCentroids = cv::Mat(nClusters, 3, CV_32FC1, &data);
			cell_sizeQ = cell_size * cell_size;
		}
		else {
			_labfeatures = false;
		}
	}
	else {   // RAW
		interp_factor = 0.012;
		sigma = 0.6;
		cell_size = 1;
		_hogfeatures = false;

		if (lab) {
			printf("Lab features are only used with HOG features.\n");
			_labfeatures = false;
		}
	}


	if (multiscale) { // multiscale
		template_size = 96;
		//template_size = 100;
		scale_step = 1.05;
		scale_weight = 0.95;
		if (!fixed_window) {
			//printf("Multiscale does not support non-fixed window.\n");
			fixed_window = true;
		}
	}
	else if (fixed_window) {  // fit correction without multiscale
		template_size = 96;
		//template_size = 100;
		scale_step = 1;
	}
	else {
		template_size = 1;
		scale_step = 1;
	}
}

// Initialize tracker 
void KCFTracker::init(const cv::Rect &roi, cv::Mat image)
{
	_roi = roi;
	assert(roi.width >= 0 && roi.height >= 0);
	//std::cout<<".....getFeatures......"<<std::endl;
	_tmpl = getFeatures(image, 1);//??��???��??????????????�¦�??1??????????????��????????

	//std::cout<<".....createGaussianPeak......"<<std::endl;
	_prob = createGaussianPeak(size_patch[0], size_patch[1]);//????????��??��????????
	_alphaf = cv::Mat(size_patch[0], size_patch[1], CV_32FC2, float(0));////alphaf?????
	//_num = cv::Mat(size_patch[0], size_patch[1], CV_32FC2, float(0));
	//_den = cv::Mat(size_patch[0], size_patch[1], CV_32FC2, float(0));
	//std::cout<<".....init......"<<std::endl;
	train(_tmpl, 1.0); // train with initial frame
	//std::cout<<".....train......"<<std::endl;
}
// Update position based on the new frame
cv::Rect KCFTracker::update(cv::Mat image)
{
	if (_roi.x + _roi.width <= 0) _roi.x = -_roi.width + 1;
	if (_roi.y + _roi.height <= 0) _roi.y = -_roi.height + 1;
	if (_roi.x >= image.cols - 1) _roi.x = image.cols - 2;
	if (_roi.y >= image.rows - 1) _roi.y = image.rows - 2;
	//std::cout<<"_roi.x::"<<_roi.x<<std::endl;
	//std::cout<<"_roi.y::"<<_roi.y<<std::endl;
	float cx = _roi.x + _roi.width / 2.0f;//?????
	float cy = _roi.y + _roi.height / 2.0f;


	float peak_value;
	cv::Point2f res = detect(_tmpl, getFeatures(image, 0, 1.0f), peak_value);//???response??��??

	//???????��?????
	if (scale_step != 1) {
		// Test at a smaller _scale
		float new_peak_value;
		cv::Point2f new_res = detect(_tmpl, getFeatures(image, 0, 1.0f / scale_step), new_peak_value);
		//update roi and other parameter ????roi?????????
		if (scale_weight * new_peak_value > peak_value) {
			res = new_res;
			peak_value = new_peak_value;
			_scale /= scale_step;
			_roi.width /= scale_step;
			_roi.height /= scale_step;
		}

		// Test at a bigger _scale
		new_res = detect(_tmpl, getFeatures(image, 0, scale_step), new_peak_value);

		if (scale_weight * new_peak_value > peak_value) {
			res = new_res;
			peak_value = new_peak_value;
			_scale *= scale_step;
			_roi.width *= scale_step;
			_roi.height *= scale_step;
		}
	}

	// Adjust by cell size and _scale ????????????cell size????????????????????
	_roi.x = cx - _roi.width / 2.0f + ((float)res.x * cell_size * _scale);
	_roi.y = cy - _roi.height / 2.0f + ((float)res.y * cell_size * _scale);
	//???????????
	if (_roi.x >= image.cols - 1) _roi.x = image.cols - 1;
	if (_roi.y >= image.rows - 1) _roi.y = image.rows - 1;
	if (_roi.x + _roi.width <= 0) _roi.x = -_roi.width + 2;
	if (_roi.y + _roi.height <= 0) _roi.y = -_roi.height + 2;

	assert(_roi.width >= 0 && _roi.height >= 0);

	//??????roi???��?????????????????????????
	cv::Mat x = getFeatures(image, 0);//??????roi????
	train(x, interp_factor);//??????????????

	return _roi;
}


// Detect object in the current frame.
cv::Point2f KCFTracker::detect(cv::Mat z, cv::Mat x, float &peak_value)
{
	using namespace FFTTools;

	cv::Mat k = gaussianCorrelation(x, z);//?????????
	cv::Mat res = (real(fftd(complexMultiplication(_alphaf, fftd(k)), true)));//???response

	//minMaxLoc only accepts doubles for the peak, and integer points for the coordinates
	cv::Point2i pi;//??????response?????????��??
	double pv;//pv??????response????
	//???????????????/??��???????????????pv?????????pi????????????��??
	cv::minMaxLoc(res, NULL, &pv, NULL, &pi);
	peak_value = (float)pv;

	//subpixel peak estimation, coordinates will be non-integer
	cv::Point2f p((float)pi.x, (float)pi.y);
	//
	//target location is at the maximum response. we must take into
	//account the fact that, if the target doesn't move, the peak
	//will appear at the top - left corner, not at the center(this is
	//discussed in the paper).the responses wrap around cyclically.
	if (pi.x > 0 && pi.x < res.cols - 1) {
		p.x += subPixelPeak(res.at<float>(pi.y, pi.x - 1), peak_value, res.at<float>(pi.y, pi.x + 1));
	}

	if (pi.y > 0 && pi.y < res.rows - 1) {
		p.y += subPixelPeak(res.at<float>(pi.y - 1, pi.x), peak_value, res.at<float>(pi.y + 1, pi.x));
	}

	p.x -= (res.cols) / 2;
	p.y -= (res.rows) / 2;

	return p; //responses??????????????��??
}

// train tracker with a single image
void KCFTracker::train(cv::Mat x, float train_interp_factor)
{
	using namespace FFTTools;

	cv::Mat k = gaussianCorrelation(x, x);
	cv::Mat alphaf = complexDivision(_prob, (fftd(k) + lambda));

	_tmpl = (1 - train_interp_factor) * _tmpl + (train_interp_factor)* x;
	_alphaf = (1 - train_interp_factor) * _alphaf + (train_interp_factor)* alphaf;


	/*cv::Mat kf = fftd(gaussianCorrelation(x, x));
	cv::Mat num = complexMultiplication(kf, _prob);
	cv::Mat den = complexMultiplication(kf, kf + lambda);

	_tmpl = (1 - train_interp_factor) * _tmpl + (train_interp_factor) * x;
	_num = (1 - train_interp_factor) * _num + (train_interp_factor) * num;
	_den = (1 - train_interp_factor) * _den + (train_interp_factor) * den;

	_alphaf = complexDivision(_num, _den);*/

}

// Evaluates a Gaussian kernel with bandwidth SIGMA for all relative shifts between input images X and Y, 
// which must both be MxN. They must    also be periodic (ie., pre-processed with a cosine window).
cv::Mat KCFTracker::gaussianCorrelation(cv::Mat x1, cv::Mat x2)
{
	using namespace FFTTools;
	cv::Mat c = cv::Mat(cv::Size(size_patch[1], size_patch[0]), CV_32F, cv::Scalar(0));
	// HOG features
	if (_hogfeatures) {
		cv::Mat caux;
		cv::Mat x1aux;
		cv::Mat x2aux;
		for (int i = 0; i < size_patch[2]; i++) {
			x1aux = x1.row(i);   // Procedure do deal with cv::Mat multichannel bug
			x1aux = x1aux.reshape(1, size_patch[0]);
			x2aux = x2.row(i).reshape(1, size_patch[0]);
			cv::mulSpectrums(fftd(x1aux), fftd(x2aux), caux, 0, true);
			caux = fftd(caux, true);
			rearrange(caux);
			caux.convertTo(caux, CV_32F);
			c = c + real(caux);
		}
	}
	// Gray features
	else {
		cv::mulSpectrums(fftd(x1), fftd(x2), c, 0, true);
		c = fftd(c, true);
		rearrange(c);
		c = real(c);
	}
	cv::Mat d;
	cv::max(((cv::sum(x1.mul(x1))[0] + cv::sum(x2.mul(x2))[0]) - 2. * c) / (size_patch[0] * size_patch[1] * size_patch[2]), 0, d);

	cv::Mat k;
	cv::exp((-d / (sigma * sigma)), k);
	return k;
}

// Create Gaussian Peak. Function called only in the first frame.
cv::Mat KCFTracker::createGaussianPeak(int sizey, int sizex)
{
	cv::Mat_<float> res(sizey, sizex);

	int syh = (sizey) / 2;
	int sxh = (sizex) / 2;

	float output_sigma = std::sqrt((float)sizex * sizey) / padding * output_sigma_factor;//0.125
	float mult = -0.5 / (output_sigma * output_sigma);

	for (int i = 0; i < sizey; i++)
		for (int j = 0; j < sizex; j++)
		{
			int ih = i - syh;
			int jh = j - sxh;
			res(i, j) = std::exp(mult * (float)(ih * ih + jh * jh));
		}
	return FFTTools::fftd(res);
}

// Obtain sub-window from image, with replication-padding and extract features
cv::Mat KCFTracker::getFeatures(const cv::Mat & image, bool inithann, float scale_adjust)
{
	cv::Rect extracted_roi;

	float cx = _roi.x + _roi.width / 2;
	float cy = _roi.y + _roi.height / 2;
	//std::cout<<"_roi::"<<_roi<<std::endl;
	if (inithann) {//????????????
		int padded_w = _roi.width * padding;
		int padded_h = _roi.height * padding;

		if (template_size > 1) {  // Fit largest dimension to the given template size
			if (padded_w >= padded_h)  //fit to width
				_scale = padded_w / (float)template_size;
			else
				_scale = padded_h / (float)template_size;

			_tmpl_sz.width = padded_w / _scale;
			_tmpl_sz.height = padded_h / _scale;
		}
		else {  //No template size given, use ROI size
			_tmpl_sz.width = padded_w;
			_tmpl_sz.height = padded_h;
			_scale = 1;
			// original code from paper:
			/*if (sqrt(padded_w * padded_h) >= 100) {   //Normal size
				_tmpl_sz.width = padded_w;
				_tmpl_sz.height = padded_h;
				_scale = 1;
			}
			else {   //ROI is too big, track at half size
				_tmpl_sz.width = padded_w / 2;
				_tmpl_sz.height = padded_h / 2;
				_scale = 2;
			}*/
		}

		if (_hogfeatures) {
			// Round to cell size and also make it even
			_tmpl_sz.width = (((int)(_tmpl_sz.width / (2 * cell_size))) * 2 * cell_size) + cell_size * 2;
			_tmpl_sz.height = (((int)(_tmpl_sz.height / (2 * cell_size))) * 2 * cell_size) + cell_size * 2;
		}
		else {  //Make number of pixels even (helps with some logic involving half-dimensions)
			_tmpl_sz.width = (_tmpl_sz.width / 2) * 2;
			_tmpl_sz.height = (_tmpl_sz.height / 2) * 2;
		}
	}

	extracted_roi.width = scale_adjust * _scale * _tmpl_sz.width;
	extracted_roi.height = scale_adjust * _scale * _tmpl_sz.height;
	
	//std::cout<<"extracted_roi:0:"<<extracted_roi<<std::endl;
	//std::cout<<"cx::"<<cx<<"cy::"<<cy<<std::endl;
	// center roi with new size
	extracted_roi.x = cx - extracted_roi.width / 2;
	extracted_roi.y = cy - extracted_roi.height / 2;
	//std::cout<<"extracted_roi:1:"<<extracted_roi<<std::endl;
//std::cout<<"extracted_roi.x::"<<extracted_roi.x<<std::endl;
//std::cout<<"image.cols.x::"<<image.cols<<std::endl;
	if(extracted_roi.x<0)extracted_roi.x=0;
	if(extracted_roi.y<0)extracted_roi.y=0;
	if(extracted_roi.x>image.cols)extracted_roi.x=image.cols;
	if(extracted_roi.y>image.rows)extracted_roi.x=image.rows;

	cv::Mat FeaturesMap;
	//obtain a subwindow for detection at the position from last
	//	frame, and convert to Fourier domain(its size is unchanged)
	// ?????��????????��????????
	
	cv::Mat z = RectTools::subwindow(image, extracted_roi, cv::BORDER_REPLICATE);
	//??????z???????????????????????????????????????response????????????��??
	//////////////////////////////////////////////////////////////
	///////////////////////////////////////////////////////////////
	if (z.cols != _tmpl_sz.width || z.rows != _tmpl_sz.height) {
		cv::resize(z, z, _tmpl_sz);
	}

	// HOG features
	//if (_hogfeatures) {
	//	// ?? cv::Mat ???? IplImage
	//	IplImage* iplImage = new IplImage(z);
	//	IplImage z_ipl = z;
	//	CvLSVMFeatureMapCaskade *map;
	//	getFeatureMaps(&z_ipl, cell_size, &map);
	//	normalizeAndTruncate(map, 0.2f);
	//	PCAFeatureMaps(map);
	//	size_patch[0] = map->sizeY;
	//	size_patch[1] = map->sizeX;
	//	size_patch[2] = map->numFeatures;

	//	FeaturesMap = cv::Mat(cv::Size(map->numFeatures, map->sizeX*map->sizeY), CV_32F, map->map);  // Procedure do deal with cv::Mat multichannel bug
	//	FeaturesMap = FeaturesMap.t();
	//	freeFeatureMapObject(&map);

	//	// Lab features  //??lab = false????��???
	//	if (_labfeatures) {
	//		cv::Mat imgLab;
	//		cvtColor(z, imgLab, CV_BGR2Lab);
	//		unsigned char *input = (unsigned char*)(imgLab.data);

	//		// Sparse output vector
	//		cv::Mat outputLab = cv::Mat(_labCentroids.rows, size_patch[0] * size_patch[1], CV_32F, float(0));

	//		int cntCell = 0;
	//		// Iterate through each cell
	//		for (int cY = cell_size; cY < z.rows - cell_size; cY += cell_size) {
	//			for (int cX = cell_size; cX < z.cols - cell_size; cX += cell_size) {
	//				// Iterate through each pixel of cell (cX,cY)
	//				for (int y = cY; y < cY + cell_size; ++y) {
	//					for (int x = cX; x < cX + cell_size; ++x) {
	//						// Lab components for each pixel
	//						float l = (float)input[(z.cols * y + x) * 3];
	//						float a = (float)input[(z.cols * y + x) * 3 + 1];
	//						float b = (float)input[(z.cols * y + x) * 3 + 2];

	//						// Iterate trough each centroid
	//						float minDist = FLT_MAX;
	//						int minIdx = 0;
	//						float *inputCentroid = (float*)(_labCentroids.data);
	//						for (int k = 0; k < _labCentroids.rows; ++k) {
	//							float dist = ((l - inputCentroid[3 * k]) * (l - inputCentroid[3 * k]))
	//								+ ((a - inputCentroid[3 * k + 1]) * (a - inputCentroid[3 * k + 1]))
	//								+ ((b - inputCentroid[3 * k + 2]) * (b - inputCentroid[3 * k + 2]));
	//							if (dist < minDist) {
	//								minDist = dist;
	//								minIdx = k;
	//							}
	//						}
	//						// Store result at output
	//						outputLab.at<float>(minIdx, cntCell) += 1.0 / cell_sizeQ;
	//						//((float*) outputLab.data)[minIdx * (size_patch[0]*size_patch[1]) + cntCell] += 1.0 / cell_sizeQ; 
	//					}
	//				}
	//				cntCell++;
	//			}
	//		}
	//		// Update size_patch[2] and add features to FeaturesMap
	//		size_patch[2] += _labCentroids.rows;
	//		FeaturesMap.push_back(outputLab);
	//	}
	//}
	
	//else { //raw pixel
	//std::cout<<"z::"<<z.size()<<std::endl;

		FeaturesMap = RectTools::getGrayImage(z);
		FeaturesMap -= (float) 0.5; // In Paper;
		size_patch[0] = z.rows;
		size_patch[1] = z.cols;
		size_patch[2] = 1;
	//}

	if (inithann) {//????????????????????????/????? ??????
		createHanningMats();
	}
	FeaturesMap = hann.mul(FeaturesMap);//???????????????????????
	return FeaturesMap; //????????????????????????????????????????????????????
	//////////////////////////////////////////////////////////////////////////////////////////////////
	//////////////////////////////////////////////////////////////////////////////////
}

// Initialize Hanning window. Function called only in the first frame.
void KCFTracker::createHanningMats()
{
	cv::Mat hann1t = cv::Mat(cv::Size(size_patch[1], 1), CV_32F, cv::Scalar(0));
	cv::Mat hann2t = cv::Mat(cv::Size(1, size_patch[0]), CV_32F, cv::Scalar(0));

	for (int i = 0; i < hann1t.cols; i++)
		hann1t.at<float >(0, i) = 0.5 * (1 - std::cos(2 * 3.14159265358979323846 * i / (hann1t.cols - 1)));
	for (int i = 0; i < hann2t.rows; i++)
		hann2t.at<float >(i, 0) = 0.5 * (1 - std::cos(2 * 3.14159265358979323846 * i / (hann2t.rows - 1)));

	cv::Mat hann2d = hann2t * hann1t;
	// HOG features
	if (_hogfeatures) {
		cv::Mat hann1d = hann2d.reshape(1, 1); // Procedure do deal with cv::Mat multichannel bug

		hann = cv::Mat(cv::Size(size_patch[0] * size_patch[1], size_patch[2]), CV_32F, cv::Scalar(0));
		for (int i = 0; i < size_patch[2]; i++) {
			for (int j = 0; j < size_patch[0] * size_patch[1]; j++) {
				hann.at<float>(i, j) = hann1d.at<float>(0, j);
			}
		}
	}
	// Gray features
	else {
		hann = hann2d;
	}
}

// Calculate sub-pixel peak for one dimension
float KCFTracker::subPixelPeak(float left, float center, float right)
{
	float divisor = 2 * center - right - left;
	//divisor = 0.28 0.35 0.27 0.37 0.30 0.33 0.24 0.38
	//printf("%f \n", divisor);
	if (divisor == 0)
		return 0;

	return 0.5 * (right - left) / divisor;
}

int KCFTracker::Algorithm_Reset() {
	return 0;
}
const char *KCFTracker::Algorithm_Name()
{

	return "MyAlgorithm";
}

const char *KCFTracker::Algorithm_Version()
{

	return "Ver.1.0";
}
int KCFTracker::Algorithm_Init(const char *CfgPath)
{

	return 0;
}
int KCFTracker::Algorithm_RunCfg(DetectRect *rect, ss_video_frame *frame)
{

	cv::Rect roi;
	roi.x = rect->x();
	roi.y = rect->y();
	roi.width = rect->w();
	roi.height = rect->h();

	

	cv::Mat image(frame->height,frame->width,CV_8UC1,frame->virt_addr[0]);
	init(roi, image);
	////std::cout << "********************\n";
	return 0;
}
int KCFTracker::Algorithm_DoProcess(ss_video_frame* frame, std::vector<DetectRect>* resultIN, std::vector<DetectRect>* resultOUT)
{

	cv::Mat image(frame->height,frame->width,CV_8UC1,frame->virt_addr[0]);
	cv::Rect resroi;

	resroi = update(image);

	DetectRect rect(resroi.x,resroi.y,resroi.width,resroi.height);
	resultOUT->push_back(rect);
	return 0;
}