/**
 * @inproceedings{cvpr2013Fast_Match,
 *	  title={Fast-Match: Fast Affine Template Matching},
 *	  author={Korman, Simon and Reichman, Daniel and Tsur, Gilad and Avidan, Shai},
 *	  booktitle={Computer Vision and Pattern Recognition (CVPR), 2013 IEEE Conference on},
 *	  pages={1940--1947},
 *	  year={2013},
 *	  organization={IEEE}
 * }
 */

#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <unistd.h>
#include <float.h>

#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>

#include <cv.h>
#include <highgui.h>

#include <log4cxx/logger.h>
#include <base/logging.h>
#include <base/types.cuh>
#include <base/methods.cuh>
#include <gpu/cu_methods.cuh>

#include <gpu/fast_match_helper.cuh>
#include <gpu/fast_match.cuh>

namespace recognition {
namespace gpu {

using namespace cv;

const log4cxx::LoggerPtr FAsT_Match::logger = log4cxx::LoggerPtr(log4cxx::Logger::getLogger("recognition.gpu.FAsT_Match"));
const log4cxx::LoggerPtr FAsT_Match::output = log4cxx::LoggerPtr(log4cxx::Logger::getLogger("output.recognition.gpu.FAsT_Match"));

FastMatchResult FAsT_Match::RunGpu() {
	double start_time = (double)getTickCount();
	LOG_INFO(logger, "Starting FAsT-Match...");
	FastMatchResult res;

	std::vector<int> xs, ys;
	uint npoints = GetNPoints();
	GetRandomPoints(npoints, w1_, h1_, xs, ys);
//	fill_xsys(w1_, h1_, xs, ys);

	uint nconfigs, naffines, nQ;
	_cu_CreateConfigs(nconfigs);

	if (nconfigs > MAX_TRANSFORMATIONS) {
		LOG_ERROR(logger, "Too many transformations: %d", nconfigs);
		exit(1);
	}

	double delta_fact = 1.511;
	double new_delta = delta_;

	std::vector<double> best_distsances;
	double best_distance = -1;
	uint best_idx = 0;

	uint max_levels = 20;
	uint level = 0;
	while (true) {
		++level;
		double time = (double)getTickCount();
		ReportNumberOfCreatedConfigs(logger, level, nconfigs);

		// Create affines
		_cu_ConfigsToAffines(nconfigs);
		_cu_FilterConfigsAndAffines(nconfigs, naffines);
		set_d_filter(NULL);
		ReportNumberOfCreatedAffines(logger, naffines, CV_TIME(time));

		// Evaluating affines
		_cu_EvaluateAffines(naffines, xs, ys);
		set_d_affines(NULL);
		ReportAffinesEvaluationTime(logger, CV_TIME(time));

		// Finding best distance
		best_distance = h_distances_[0];
		best_idx = 0;
		if (naffines >= REDUCTION_BLOCK_SIZE) {
			pointReduceWithIdxDoubleFunction1_t h_ReduceMinDoulbe1;
			pointReduceWithIdxDoubleFunction2_t h_ReduceMinDoulbe2;
			cudaMemcpyFromSymbol(&h_ReduceMinDoulbe1, d_ReduceMinDoulbe1, sizeof(pointReduceWithIdxDoubleFunction1_t));
			cudaMemcpyFromSymbol(&h_ReduceMinDoulbe2, d_ReduceMinDoulbe2, sizeof(pointReduceWithIdxDoubleFunction2_t));

			double* out_data;
			uint* out_idxs;
			uint out_size;
			_cu_reduce<FULL_BLOCK_SIZE, double>(h_distances_, naffines, FLT_MAX, h_ReduceMinDoulbe1, h_ReduceMinDoulbe2, out_data, out_idxs, out_size);

			for (uint i = 0; i < out_size; ++i) {
				double tmp = out_data[i];
				if (best_distance > tmp) {
					best_distance = tmp;
					best_idx = out_idxs[i];
				}
			}

			checkCudaErrors( cudaFreeHost(out_data) );
			checkCudaErrors( cudaFreeHost(out_idxs) );
		} else {
			for (uint i = 1; i < naffines; ++i) {
				double tmp = h_distances_[i];
				if (tmp < best_distance) {
					best_distance = tmp;
					best_idx = i;
				}
			}
		}
		LOG_DEBUG(logger, "Best distance: %.4f with index: %d", best_distance, best_idx);

		double* h_best_config = dev_to_host<double>(&(d_configs_[best_idx * 6]), 6);
		memcpy(res.best_config, h_best_config, 6 * sizeof(double));
		checkCudaErrors( cudaFreeHost(h_best_config) );

		CreateAffineTransformation(res.best_config, res.best_affine);

		best_distsances.push_back(best_distance);
		if ((best_distance < 0.005) || ((level > 2) && (best_distance < 0.015)) || (level >= max_levels) ||
				((level > 3) && (best_distance > mean<double>(best_distsances, level - 3, level) * 0.97)) ||
				((level > 8) && (nQ > max_levels)))
			break;

		// Creatint Q
		bool percentage_higher_limit;
		_cu_Create_Q(best_distance, new_delta, naffines, nQ, percentage_higher_limit);
		set_d_configs(NULL);

		// Creatint S
		if ((percentage_higher_limit && (best_distance > 0.05) && (level == 1) && (nconfigs < 7500000)) ||
								((best_distance > 0.1) && (level == 1) && (nconfigs < 5000000) ) ) {
			double fact = 0.9;
			LOG_INFO(logger, "##### RESTARTING!!! changing from delta: %.3f, to delta: %.3f", new_delta, new_delta * fact);
			new_delta = new_delta * fact;
			level = 0;
			bounds_.tx.step = fact * bounds_.tx.step;
			bounds_.ty.step = fact * bounds_.ty.step;
			bounds_.r.step = fact * bounds_.r.step;
			bounds_.s.step = fact * bounds_.s.step;
			_cu_CreateConfigs(nconfigs);
		} else {
			double prev_delta = new_delta;
			new_delta = new_delta / delta_fact;
			LOG_INFO(logger, "##### CONTINUING!!! prev_delta = %.3f,  new_delta = %.3f", prev_delta, new_delta);

			uint extension_scale = 80;
			bool full_extension = false;
			ExtensionType extension_type = RANDOM_EXTENSION;
			switch (extension_type) {
			case RANDOM_EXTENSION:
				extension_scale = 80;
				full_extension = false;
				break;
			case FULL_EXTENSION:
				extension_scale = 729;	// 3^6
				full_extension = true;
				break;
			}
			_cu_Create_S(nQ, level, extension_scale, delta_fact, full_extension);
			uint nS = nQ * extension_scale;
			nconfigs = nQ + nS;

			set_d_configs(NULL);
			checkCudaErrors( cudaMalloc((void **) &d_configs_, nconfigs * 6 * sizeof(double)) );
			checkCudaErrors( cudaMemcpy(d_configs_, d_Q_, nQ * 6 * sizeof(double), cudaMemcpyDeviceToDevice) );
			checkCudaErrors( cudaMemcpy(&(d_configs_[nQ * 6]), d_S_, nS * 6 * sizeof(double), cudaMemcpyDeviceToDevice) );
			set_d_Q(NULL);
			set_d_S(NULL);

			LOG_INFO(logger, "|Q| = [%-7d], |S| = [%-7d]", nQ, nS);
		}
		ReportLevelTime(logger, level, CV_TIME(time));

		// Refresh random points
		xs.clear(); ys.clear();
		GetRandomPoints(npoints, w1_, h1_, xs, ys);
//		fill_xsys(w1_, h1_, xs, ys);
	}
	res.error = best_distance;

	ReportFastMatchTime(logger, CV_TIME(start_time));
	ReportFastMatchResult(logger, res);
	LOG_INFO(logger, "Exiting FAsT-Match...");
	return res;
}

FastMatchResult FAsT_Match::RunCpu() {
	double start_time = (double)getTickCount();
	LOG_INFO(logger, "Starting FAsT-Match...");
	FastMatchResult res;

	std::vector<int> xs, ys;
	uint npoints = GetNPoints();
	GetRandomPoints(npoints, w1_, h1_, xs, ys);
//	fill_xsys(w1_, h1_, xs, ys);

	Mat configs, affines, Q, S;
	CreateConfigs(configs);

	if (configs.rows > MAX_TRANSFORMATIONS) {
		LOG_ERROR(logger, "Too many transformations: %d", configs.rows);
		exit(1);
	}

	double delta_fact = 1.511;
	double new_delta = delta_;

	std::vector<double> best_distsances;
	double best_distance = -1;

	uint max_levels = 20;
	uint level = 0;
	while (true) {
		++level;
		double time = (double)getTickCount();
		uint nconfigs = configs.rows;
		ReportNumberOfCreatedConfigs(logger, level, nconfigs);

		// Create affines
		affines = Mat_<double>(nconfigs, 6);
		bool* filter = (bool*) malloc(configs.rows * sizeof(bool));
		uint naffines;
		ConfigsToAffines(configs, affines, filter, naffines);

		// Filter by bounds
		Mat affines_filterd = Mat_<double>(naffines, 6);
		FilterMatrix(affines, affines_filterd, filter);
		affines = affines_filterd;

		Mat configs_filterd = Mat_<double>(naffines, 6);
		FilterMatrix(configs, configs_filterd, filter);
		configs = configs_filterd;
		free(filter);
		ReportNumberOfCreatedAffines(logger, naffines, CV_TIME(time));

		// Evaluate affines
		Mat distances = Mat_<double>(1, naffines);
		EvaluateAffines(affines, xs, ys, distances);
		ReportAffinesEvaluationTime(logger, CV_TIME(time));

		double* distances_ptr = distances.ptr<double>(0);
		best_distance = distances_ptr[0];
		uint best_idx = 0;
		for (uint i = 1; i < naffines; ++i) {
			if (distances_ptr[i] < best_distance) {
				best_distance = distances_ptr[i];
				best_idx = i;
			}
		}
		LOG_DEBUG(logger, "Best distance: %.4f with index: %d", best_distance, best_idx);

		double* best_config = configs.ptr<double>(best_idx);
		memcpy(res.best_config, best_config, 6 * sizeof(double));
		CreateAffineTransformation(res.best_config, res.best_affine);

		best_distsances.push_back(best_distance);
		if ((best_distance < 0.005) || ((level > 2) && (best_distance < 0.015)) || (level >= max_levels) ||
				((level > 3) && (best_distance > mean<double>(best_distsances, level - 3, level) * 0.97)) ||
				((level > 8) && (Q.rows > max_levels)))
			break;

		// Creating Q
		bool percentage_higher_limit;
		Create_Q(configs, best_distance, new_delta, distances, Q, percentage_higher_limit);
		uint nQ = Q.rows;

		// Creating S
		if ((percentage_higher_limit && (best_distance > 0.05) && (level == 1) && (nconfigs < 7500000)) ||
								((best_distance > 0.1) && (level == 1) && (nconfigs < 5000000) ) ) {
			double fact = 0.9;
			LOG_INFO(logger, "##### RESTARTING!!! changing from delta: %.3f, to delta: %.3f", new_delta, new_delta * fact);
			new_delta = new_delta * fact;
			level = 0;
			bounds_.tx.step = fact * bounds_.tx.step;
			bounds_.ty.step = fact * bounds_.ty.step;
			bounds_.r.step = fact * bounds_.r.step;
			bounds_.s.step = fact * bounds_.s.step;
			CreateConfigs(configs);
		} else {
			double prev_delta = new_delta;
			new_delta = new_delta / delta_fact;
			LOG_INFO(logger, "##### CONTINUING!!! prev_delta = %.3f,  new_delta = %.3f", prev_delta, new_delta);

			uint extension_scale = 80;
			ExtensionType expansion_type = RANDOM_EXTENSION;
			switch (expansion_type) {
			case RANDOM_EXTENSION:
				Create_S_random(Q, level, extension_scale, delta_fact, S);
				break;
			case FULL_EXTENSION:
				Create_S_full(Q, level, delta_fact, S);
				break;
			}

			Q.push_back(S);
			configs = Q;
		}
		LOG_INFO(logger, "|Q| = [%-7d], |S| = [%-7d]", nQ, S.rows);
		ReportLevelTime(logger, level, CV_TIME(time));

		// Refresh random points
		xs.clear(); ys.clear();
		GetRandomPoints(npoints, w1_, h1_, xs, ys);
//		fill_xsys(w1_, h1_, xs, ys);
	}
	res.error = best_distance;

	ReportFastMatchTime(logger, CV_TIME(start_time));
	ReportFastMatchResult(logger, res);
	LOG_INFO(logger, "Exiting FAsT-Match...");
	return res;
}

void FAsT_Match::GetConfigDims(uint& ntx_steps, uint& nty_steps, uint& nr_steps, uint& ns_steps) {
	ntx_steps = (bounds_.tx.max - bounds_.tx.min) / bounds_.tx.step + 1;
	nty_steps = (bounds_.ty.max - bounds_.ty.min) / bounds_.ty.step + 1;
	nr_steps = (bounds_.r.max - bounds_.r.min) / bounds_.r.step + 1;
	ns_steps = bounds_.s.step == 0 ? 1 : ((bounds_.s.max - bounds_.s.min) / bounds_.s.step + 1);

	ntx_steps += bounds_.tx.max - bounds_.tx.min - bounds_.tx.step * (ntx_steps - 1) > 0.5 * bounds_.tx.step;
	nty_steps += bounds_.ty.max - bounds_.ty.min - bounds_.ty.step * (nty_steps - 1) > 0.5 * bounds_.ty.step;
	nr_steps += bounds_.r.max - bounds_.r.min - bounds_.r.step * (nr_steps - 1) > bounds_.r.step;
	ns_steps += bounds_.s.max - bounds_.s.min - bounds_.s.step * (ns_steps - 1) > 0.5 * bounds_.s.step;
}

void FAsT_Match::CreateConfigs(Mat& configs) {
	double time = (double)getTickCount();

	uint ntx_steps, nty_steps, nr_steps, ns_steps;
	GetConfigDims(ntx_steps, nty_steps, nr_steps, ns_steps);

	std::vector<double> tx_steps, ty_steps, r_steps, s_steps;
	FillRangeWithStep<double>(bounds_.tx.min, bounds_.tx.step, ntx_steps, tx_steps);
	FillRangeWithStep<double>(bounds_.ty.min, bounds_.ty.step, nty_steps, ty_steps);
	FillRangeWithStep<double>(bounds_.r.min, bounds_.r.step, nr_steps, r_steps);
	FillRangeWithStep<double>(bounds_.s.min, bounds_.s.step, ns_steps, s_steps);

	uint nr2_steps = nr_steps;
	if (bounds_.r.max - bounds_.r.min == 2 * M_PI) {
		nr2_steps = 0;
		double max_bound = -M_PI / 2 + bounds_.r.step / 2;
		for (uint i = 0; i < r_steps.size(); ++i)
			if (r_steps[i] < max_bound)
				nr2_steps++;
	}

	uint nconfigs = ntx_steps * nty_steps * (ns_steps * ns_steps) * (nr_steps * nr2_steps);

	configs = Mat_<double>(nconfigs, 6);
	uint row = 0;
	double* tx = &(tx_steps[0]);
	double* ty = &(ty_steps[0]);
	double* r = &(r_steps[0]);
	double* s = &(s_steps[0]);
	for (uint tx_idx = 0; tx_idx < ntx_steps; ++tx_idx) {
		for (uint ty_idx = 0; ty_idx < nty_steps; ++ty_idx) {
			for (uint r1_idx = 0; r1_idx < nr_steps; ++r1_idx) {
				for (uint r2_idx = 0; r2_idx < nr2_steps; ++r2_idx) {
					for (uint sx_idx = 0; sx_idx < ns_steps; ++sx_idx) {
						for (uint sy_idx = 0; sy_idx < ns_steps; ++sy_idx) {
							double* config = configs.ptr<double>(row);
							config[0] = tx[tx_idx];
							config[1] = ty[ty_idx];
							config[2] = r[r2_idx];
							config[3] = s[sx_idx];
							config[4] = s[sy_idx];
							config[5] = r[r1_idx];

							++row;
						}
					}
				}
			}
		}
	}

	ReportMethodExecutionTime(__func__, CV_TIME(time));
}

void FAsT_Match::_cu_CreateConfigs(uint& nconfigs) {
	double dev_mem_used_start = GetDevMemoryUsedMb();
	double time = (double)getTickCount();

	uint ntx_steps, nty_steps, nr_steps, ns_steps;
	GetConfigDims(ntx_steps, nty_steps, nr_steps, ns_steps);

	std::vector<double> r_steps;
	FillRangeWithStep<double>(bounds_.r.min, bounds_.r.step, nr_steps, r_steps);

	uint nr2_steps = nr_steps;
	if (bounds_.r.max - bounds_.r.min == 2 * M_PI) {
		nr2_steps = 0;
		double max_bound = -M_PI / 2 + bounds_.r.step / 2;
		for (uint i = 0; i < r_steps.size(); ++i)
			if (r_steps[i] < max_bound)
				nr2_steps++;
	}

	nconfigs = ntx_steps * nty_steps * (ns_steps * ns_steps) * (nr_steps * nr2_steps);

	_cu_create_configs__global__KernelArgs *h_args, *d_args;
	_cu_create_configs__global__KernelArgs args(bounds_, ntx_steps, nty_steps, nr_steps, nr2_steps, ns_steps);
	h_args = &args;
	checkCudaErrors( cudaMalloc((void **) &d_args, sizeof(_cu_create_configs__global__KernelArgs)) );
	checkCudaErrors( cudaMemcpy(d_args, h_args, sizeof(_cu_create_configs__global__KernelArgs), cudaMemcpyHostToDevice) );

	double *h_configs, *d_configs;
	checkCudaErrors( cudaMalloc((void **) &d_configs, nconfigs * 6 * sizeof(double)) );

	dim3 dimBlock(BLOCK_SIZE * BLOCK_SIZE);
	dim3 dimGrid(EXPAND_TO_FULL_BLOCK_SIZE(nconfigs) / dimBlock.x);
	cudaEvent_t start, stop;
	checkCudaErrors( cudaEventCreate(&start) );
	checkCudaErrors( cudaEventCreate(&stop) );
	checkCudaErrors( cudaEventRecord(start, NULL) );
//	int n_iter = 10;
	int n_iter = 1;
	for (int i = 0; i < n_iter; ++i) {
	_cu_create_configs__global__<<<dimGrid, dimBlock>>>(d_args, nconfigs, d_configs);
	}
	checkCudaErrors( cudaEventRecord(stop, NULL) );
	checkCudaErrors( cudaEventSynchronize(stop) );
	float cu_time_msec = 0.0F;
	checkCudaErrors( cudaEventElapsedTime(&cu_time_msec, start, stop) );
	checkCudaErrors( cudaEventDestroy(start) );
	checkCudaErrors( cudaEventDestroy(stop) );
	float cu_time_sec = cu_time_msec / 1000.0F;
	ReportKernelExecutionTime(__func__, cu_time_sec / n_iter);

	set_d_configs(d_configs);

	h_configs = dev_to_host<double>(d_configs, nconfigs * 6);
	checkCudaErrors( cudaFree(d_args) );

	ReportMethodExecutionTime(__func__, CV_TIME(time));
	ReportDevMemoryUsage(__func__, dev_mem_used_start);
}

void FAsT_Match::_cu_ConfigsToAffines(const uint nconfigs) {
	double dev_mem_used_start = GetDevMemoryUsedMb();
	double time = (double)getTickCount();

	double *d_affines;
	checkCudaErrors( cudaMalloc((void **) &d_affines, nconfigs * 6 * sizeof(double)) );

	bool *d_filter;
	checkCudaErrors( cudaMalloc((void **) &d_filter, nconfigs * sizeof(bool)) );

	_cu_configs_to_affine__global__KernelArgs _args(w1_, h1_, w2_, h2_, r1x_, r1y_, r2x_, r2y_);
	_cu_configs_to_affine__global__KernelArgs *h_args = &_args;
	_cu_configs_to_affine__global__KernelArgs *d_args;
	checkCudaErrors(cudaMalloc((void **) &d_args, sizeof(_cu_configs_to_affine__global__KernelArgs)));
	checkCudaErrors(cudaMemcpy(d_args, h_args, sizeof(_cu_configs_to_affine__global__KernelArgs), cudaMemcpyHostToDevice));

	dim3 dimBlock(BLOCK_SIZE * BLOCK_SIZE);
	dim3 dimGrid(EXPAND_TO_FULL_BLOCK_SIZE(nconfigs) / dimBlock.x);
	cudaEvent_t start, stop;
	checkCudaErrors( cudaEventCreate(&start) );
	checkCudaErrors( cudaEventCreate(&stop) );
	checkCudaErrors( cudaEventRecord(start, NULL) );
//	int n_iter = 10;
	int n_iter = 1;
	for (int i = 0; i < n_iter; ++i) {
	_cu_configs_to_affine__global__<<<dimGrid, dimBlock>>>(d_configs_, d_affines, nconfigs, d_args, d_filter);
	}
	checkCudaErrors( cudaEventRecord(stop, NULL) );
	checkCudaErrors( cudaEventSynchronize(stop) );
	float cu_time_msec = 0.0F;
	checkCudaErrors( cudaEventElapsedTime(&cu_time_msec, start, stop) );
	checkCudaErrors( cudaEventDestroy(start) );
	checkCudaErrors( cudaEventDestroy(stop) );
	float cu_time_sec = cu_time_msec / 1000.0F;
	ReportKernelExecutionTime(__func__, cu_time_sec / n_iter);

	set_d_affines(d_affines);
	set_d_filter(d_filter);

	checkCudaErrors( cudaFree(d_args) );

	ReportMethodExecutionTime(__func__, CV_TIME(time));
	ReportDevMemoryUsage(__func__, dev_mem_used_start);
}

void FAsT_Match::ConfigsToAffines(cv::Mat& configs, Mat& affines, bool* filter, uint& naffines) {
	double time = (double)getTickCount();

	naffines = 0;
	for (uint row = 0; row < configs.rows ; ++row) {
		double* config = configs.ptr<double>(row);
		double tx, ty, r1, r2, sx, sy;
		tx = config[0];
		ty = config[1];
		r2 = config[2];
		sx = config[3];
		sy = config[4];
		r1 = config[5];

		double* affine = affines.ptr<double>(row);
		double a11, a12, a13, a21, a22, a23;
		affine[0] = a11 = sx * cos(r1) * cos(r2) - sy * sin(r1) * sin(r2);
		affine[1] = a12 = - sx * cos(r1) * sin(r2) - sy * cos(r2) * sin(r1);
		affine[2] = a13 = tx;
		affine[3] = a21 = sx * cos(r2) * sin(r1) + sy * cos(r1) * sin(r2);
		affine[4] = a22 = sy * cos(r1) * cos(r2) - sx * sin(r1) * sin(r2);
		affine[5] = a23 = ty;

		double c1x, c1y, c2x, c2y, c3x, c3y, c4x, c4y;
		c1x = a11 * (1 - (r1x_ + 1)) + a12 * (1 - (r1y_ + 1)) + (r2x_ + 1) + a13;
		c1y = a21 * (1 - (r1x_ + 1)) + a22 * (1 - (r1y_ + 1)) + (r2y_ + 1) + a23;
		c2x = a11 * (w1_ - (r1x_ + 1)) + a12 * (1 - (r1y_ + 1)) + (r2x_ + 1) + a13;
		c2y = a21 * (w1_ - (r1x_ + 1)) + a22 * (1 - (r1y_ + 1)) + (r2y_ + 1) + a23;
		c3x = a11 * (w1_ - (r1x_ + 1)) + a12 * (h1_ - (r1y_ + 1)) + (r2x_ + 1) + a13;
		c3y = a21 * (w1_ - (r1x_ + 1)) + a22 * (h1_ - (r1y_ + 1)) + (r2y_ + 1) + a23;
		c4x = a11 * (1 - (r1x_ + 1)) + a12 * (h1_ - (r1y_ + 1)) + (r2x_ + 1) + a13;
		c4y = a21 * (1 - (r1x_ + 1)) + a22 * (h1_ - (r1y_ + 1)) + (r2y_ + 1) + a23;

		// allow to exceed boundary by at most q
		int q = 10;
		int k = (c1x > -q) && (c1x < w2_ + q) && (c1y > -q) && (c1y < h2_ + q) &&
				(c2x > -q) && (c2x < w2_ + q) && (c2y > -q) && (c2y < h2_ + q) &&
				(c3x > -q) && (c3x < w2_ + q) && (c3y > -q) && (c3y < h2_ - q) &&	// FIXME: h2_ - q --- BUG? should be h2_ + q (bottom vertical boundary)
				(c4x > -q) && (c4x < w2_ + q) && (c4y > -q) && (c4y < h2_ + q);

		naffines += k;
		filter[row] = k > 0;
	}

	ReportMethodExecutionTime(__func__, CV_TIME(time));
}

void FAsT_Match::_cu_FilterConfigsAndAffines(const uint nconfigs, uint& naffines) {
	double dev_mem_used_start = GetDevMemoryUsedMb();
	double time = (double)getTickCount();

	bool* filter = dev_to_host<bool>(d_filter_, nconfigs);
	uint *h_idxs, *d_idxs;
	checkCudaErrors( cudaMallocHost((void **) &h_idxs, nconfigs * sizeof(uint)) );

	uint nfiltered = 0;
	for (uint i = 0; i < nconfigs; ++i) {
		if (filter[i]) {
			h_idxs[i] = nfiltered;
			++nfiltered;
		} else {
			h_idxs[i] = 0;
		}
	}
	naffines = nfiltered;
	d_idxs = host_to_dev<uint>(h_idxs, nconfigs);
	checkCudaErrors( cudaFreeHost(h_idxs) );
	checkCudaErrors( cudaFreeHost(filter) );

	double *out_configs, *out_affines;
	checkCudaErrors( cudaMalloc((void **) &out_configs, nfiltered * 6 * sizeof(double)) );
	checkCudaErrors( cudaMalloc((void **) &out_affines, nfiltered * 6 * sizeof(double)) );

	dim3 dimBlock(BLOCK_SIZE * BLOCK_SIZE);
	dim3 dimGrid(EXPAND_TO_FULL_BLOCK_SIZE(nconfigs) / dimBlock.x);
	cudaEvent_t start, stop;
	checkCudaErrors( cudaEventCreate(&start) );
	checkCudaErrors( cudaEventCreate(&stop) );
	checkCudaErrors( cudaEventRecord(start, NULL) );
//	int n_iter = 10;
	int n_iter = 1;
	for (int i = 0; i < n_iter; ++i) {
	_cu_filter_configs_and_affines__global__<<<dimGrid, dimBlock>>>(nconfigs, d_configs_, d_affines_, d_filter_, d_idxs, out_configs, out_affines);
	}
	checkCudaErrors( cudaEventRecord(stop, NULL) );
	checkCudaErrors( cudaEventSynchronize(stop) );
	float cu_time_msec = 0.0F;
	checkCudaErrors( cudaEventElapsedTime(&cu_time_msec, start, stop) );
	checkCudaErrors( cudaEventDestroy(start) );
	checkCudaErrors( cudaEventDestroy(stop) );
	float cu_time_sec = cu_time_msec / 1000.0F;
	ReportKernelExecutionTime(__func__, cu_time_sec / n_iter);

	set_d_configs(out_configs);
	set_d_affines(out_affines);

	checkCudaErrors( cudaFree(d_idxs) );

	ReportMethodExecutionTime(__func__, CV_TIME(time));
	ReportDevMemoryUsage(__func__, dev_mem_used_start);
}

void FAsT_Match::FilterMatrix(cv::Mat& src, cv::Mat& dst, bool* filter) {
	uint dst_idx = 0;
	double* src_begin = src.ptr<double>(0);
	double* dst_begin = dst.ptr<double>(0);
	for (uint src_idx = 0; src_idx < src.rows; ++src_idx) {
		if (filter[src_idx]) {
			double* src_ptr = &src_begin[src_idx * 6];
			double* dst_ptr = &dst_begin[dst_idx * 6];

			for (uint k = 0; k < 6; ++k)
				dst_ptr[k] = src_ptr[k];

			++dst_idx;
		}
	}
}

void FAsT_Match::_cu_EvaluateAffines(uint naffines, std::vector<int>& xs, std::vector<int>& ys) {
	double dev_mem_used_start = GetDevMemoryUsedMb();
	double time = (double)getTickCount();

	uint npoints = xs.size();

	double *I1_ptr, *I2_ptr;
	I1_ptr = I1_.ptr<double>(0);
	I2_ptr = I2_.ptr<double>(0);

	// Centered points
	int *xs_centered, *ys_centered;
	checkCudaErrors( cudaMallocHost((void **) &xs_centered, npoints * sizeof(int)) );
	checkCudaErrors( cudaMallocHost((void **) &ys_centered, npoints * sizeof(int)) );
	for (uint i = 0 ; i < npoints ; ++i) {
		xs_centered[i] = xs[i] - (r1x_ + 1);
		ys_centered[i] = ys[i] - (r1y_ + 1);
	}

	// Pre-calculating values of I1 at chosen points
	double* vals_I1;
	checkCudaErrors( cudaMallocHost((void **) &vals_I1, npoints * sizeof(double)) );
	for (uint j = 0; j < npoints ; j++) {
		vals_I1[j] = I1_ptr[(ys[j] - 1) * w1_ + (xs[j] - 1)]; // -1 is for c
	}

	// Load to device memory
	int *d_xs_centered, *d_ys_centered;
	checkCudaErrors(cudaMalloc((void **) &(d_xs_centered), sizeof(int) * npoints));
	checkCudaErrors(cudaMemcpy(d_xs_centered, xs_centered, sizeof(int) * npoints, cudaMemcpyHostToDevice));

	checkCudaErrors(cudaMalloc((void **) &(d_ys_centered), sizeof(int) * npoints));
	checkCudaErrors(cudaMemcpy(d_ys_centered, ys_centered, sizeof(int) * npoints, cudaMemcpyHostToDevice));

	double *d_vals_I1;
	checkCudaErrors(cudaMalloc((void **) &(d_vals_I1), sizeof(double) * npoints));
	checkCudaErrors(cudaMemcpy(d_vals_I1, vals_I1, sizeof(double) * npoints, cudaMemcpyHostToDevice));

	double *d_I;
	// I is of height 3 * I (this padding is for not needing to check bounds)
	checkCudaErrors(cudaMalloc((void **) &(d_I), sizeof(double) * 5 * h2_ * w2_));
	checkCudaErrors(cudaMemset(d_I, 2, sizeof(double) * 5 * h2_ * w2_));
	checkCudaErrors(cudaMemcpy(d_I + 2 * h2_ * w2_, I2_ptr, sizeof(double) * h2_ * w2_, cudaMemcpyHostToDevice));

	double *d_distances;
	checkCudaErrors(cudaMalloc((void **) &(d_distances), sizeof(double) * naffines));

	_cu_eval_distances__global__KernelArgs _args(r2x_, r2y_, w2_, h2_, use_photometric_);
	_cu_eval_distances__global__KernelArgs *h_args = &_args;
	_cu_eval_distances__global__KernelArgs *d_args;
	checkCudaErrors(cudaMalloc((void **) &d_args, sizeof(_cu_eval_distances__global__KernelArgs)));
	checkCudaErrors(cudaMemcpy(d_args, h_args, sizeof(_cu_eval_distances__global__KernelArgs), cudaMemcpyHostToDevice));

	// Find correlation
	dim3 dimBlock(BLOCK_SIZE * BLOCK_SIZE);
	dim3 dimGrid(EXPAND_TO_FULL_BLOCK_SIZE(naffines) / dimBlock.x);
	cudaEvent_t start, stop;
	checkCudaErrors( cudaEventCreate(&start) );
	checkCudaErrors( cudaEventCreate(&stop) );
	checkCudaErrors( cudaEventRecord(start, NULL) );
//	int n_iter = 10;
	int n_iter = 1;
	for (int i = 0; i < n_iter; ++i) {
	_cu_eval_distances__global__<<<dimGrid, dimBlock>>>(d_affines_, naffines, d_xs_centered, d_ys_centered, d_vals_I1, npoints,
			d_I, d_args, d_distances);
	}
	checkCudaErrors( cudaEventRecord(stop, NULL) );
	checkCudaErrors( cudaEventSynchronize(stop) );
	float cu_time_msec = 0.0F;
	checkCudaErrors( cudaEventElapsedTime(&cu_time_msec, start, stop) );
	checkCudaErrors( cudaEventDestroy(start) );
	checkCudaErrors( cudaEventDestroy(stop) );
	float cu_time_sec = cu_time_msec / 1000.0F;
	ReportKernelExecutionTime(__func__, cu_time_sec / n_iter);

	double* h_distances = dev_to_host<double>(d_distances, naffines);
	set_h_distances(h_distances);

	checkCudaErrors( cudaFree(d_args) );
	checkCudaErrors( cudaFree(d_distances) );
	checkCudaErrors( cudaFree(d_I) );
	checkCudaErrors( cudaFree(d_vals_I1) );
	checkCudaErrors( cudaFree(d_ys_centered) );
	checkCudaErrors( cudaFree(d_xs_centered) );
	checkCudaErrors( cudaFreeHost(xs_centered) );
	checkCudaErrors( cudaFreeHost(ys_centered) );
	checkCudaErrors( cudaFreeHost(vals_I1) );

	ReportMethodExecutionTime(__func__, CV_TIME(time));
	ReportDevMemoryUsage(__func__, dev_mem_used_start);
}

//	void BestTransformation::EvaluateConfigs2(
//			int n_transformations,
//			int n_points,
//			double* pattern,		// n_points
//			double* image,			// 5 * h2 * w2
//			double* cover,			// 6 * n_transformations (column wise)
//			int w2,
//			double c,
//			int* x,					// n_points
//			int* y,					// n_points
//			double* correlations	// 6 * n_transformations
//			) {
//
//	    for (int i = 0 ; i < n_transformations ; ++i) {
//	    	double a11, a12, a13, a21, a22, a23;
//	    	double correlation = 0;
//
//			a11 = cover[6 * i];
//			a12 = cover[6 * i + 1];
//			a13 = cover[6 * i + 2];
//			a21 = cover[6 * i + 3];
//			a22 = cover[6 * i + 4];
//			a23 = cover[6 * i + 5];
//
//			for (int j = 0; j < n_points ; j++) {
//				int x_tmp, y_tmp, idx;
//
//				x_tmp = a11 * (*x) + a12 * (*y);
//				y_tmp = a21 * (*x) + a22 * (*y);
//				idx = y_tmp * w2 + x_tmp + a13 + a23 * w2 + c;
//				correlation += fabs((*pattern) - image[idx]);
//
//				++pattern;
//				++x;
//				++y;
//			}
//
//			correlations[i] = correlation / n_points;
//		}
//	}

void FAsT_Match::EvaluateAffines(Mat& affines_, std::vector<int>& xs_, std::vector<int>& ys_, Mat& distances_) {
	double time = (double)getTickCount();

	uint npoints, naffines;
	naffines = affines_.rows;
	npoints = xs_.size();

	double *I1_ptr, *I2_ptr, *I;
	I1_ptr = I1_.ptr<double>(0);
	I2_ptr = I2_.ptr<double>(0);
	// I is of height 3*I2 (this padding is for not needing to check bounds)
	I = (double*) malloc(5 * h2_ * w2_ * sizeof(double));
	memset(I, 2, 5 * h2_ * w2_ * sizeof(double));
	memcpy(I + 2 * h2_ * w2_, I2_ptr, h2_ * w2_ * sizeof(double));

	/*Centered pointes*/
	int *xs_centered, *ys_centered;
	xs_centered = (int*) malloc(npoints * sizeof(int));
	ys_centered = (int*) malloc(npoints * sizeof(int));
	for (uint i = 0 ; i < npoints ; ++i) {
		xs_centered[i] = xs_[i] - (r1x_ + 1);
		ys_centered[i] = ys_[i] - (r1y_ + 1);
	}

	/*Precalculating source point indices into I1 (and the values themselves)*/
	double* vals_I1 = (double*) malloc(npoints * sizeof(double));
	for (uint j = 0; j < npoints ; j++) {
		vals_I1[j] = I1_ptr[(ys_[j] - 1) * w1_ + (xs_[j] - 1)]; // -1 is for c
	}

	double* distances = distances_.ptr<double>(0);
	for (uint i = 0 ; i < naffines ; ++i) {
		double* config = affines_.ptr<double>(i);
		double a11, a12, a13, a21, a22, a23;
		a11 = config[0];
		a12 = config[1];
		a13 = config[2];
		a21 = config[3];
		a22 = config[4];
		a23 = config[5];

		double score = 0;

		int target_x, target_y, target_idx;
		double tmp1 = (r2x_ + 1) + a13 + 0.5;
		double tmp2 = (r2y_ + 1) + a23 + 0.5 + 2 * h2_;
		if (!use_photometric_) {
			for (uint j = 0; j < npoints ; j++) {
				target_x = a11 * xs_centered[j] + a12 * ys_centered[j] + tmp1; // includes rounding
				target_y = a21 * xs_centered[j] + a22 * ys_centered[j] + tmp2; // includes rounding
				target_idx = (target_y - 1) * w2_ + target_x - 1; // -1 is for c
				score += fabs(vals_I1[j] - I[target_idx]);
			}
		} else {
			double* xs_target = (double*) malloc(npoints * sizeof(double));
			double* ys_target = (double*) malloc(npoints * sizeof(double));
			double sumXiYi = 0; double sumXi = 0; double sumYi = 0;
			double sumXiSqrd = 0; double sumYiSqrd = 0; double Xi, Yi;

			for (uint j = 0; j < npoints ; j++) {
				target_x = a11 * xs_centered[j] + a12 * ys_centered[j] + tmp1; // includes rounding
				target_y = a21 * xs_centered[j] + a22 * ys_centered[j] + tmp2; // includes rounding
				target_idx = (target_y - 1) * w2_ + (target_x - 1); // -1 for c
				Xi = vals_I1[j] ;
				Yi = I[target_idx];
				xs_target[j] = Xi;
				ys_target[j] = Yi;

				sumXi += Xi;
				sumYi += Yi;
				sumXiSqrd += (Xi * Xi);
				sumYiSqrd += (Yi * Yi);
			}
			double epsilon = 0.0000001;
			double meanX, meanY;
			meanX = sumXi / npoints;
			meanY = sumYi / npoints;

			double sigX, sigY;
			sigX = sqrt((sumXiSqrd - (sumXi * sumXi) / npoints) / npoints) + epsilon;
			sigY = sqrt((sumYiSqrd - (sumYi * sumYi) / npoints) / npoints) + epsilon;
			double sigXoversigY = sigX / sigY;

			// Variable that stores a sum used repeatadly in the computation: -meanX+sigXoversigY*meanY
			double faster = -meanX + sigXoversigY * meanY;

			for (uint j = 0; j < npoints ; j++) {
				score += fabs(xs_target[j] - sigXoversigY * ys_target[j] + faster);
			}

			free(xs_target);
			free(ys_target);
		}

		distances[i] = score / npoints;
	}

	/* Free the allocated arrays */
	free(xs_centered);
	free(ys_centered);
	free(vals_I1);
	free(I);

	ReportMethodExecutionTime(__func__, CV_TIME(time));
}

void FAsT_Match::_cu_Create_Q(double best_distance, double new_delta, uint naffines, uint& nQ, bool& percentage_higher_limit) {
	double time = (double)getTickCount();

	double thresh = best_distance + GetThreshPerDelta(new_delta);
	nQ = 0;
	double no_value = FLT_MAX;

	pointReduceCountLesserOrEqualDoubleFunction_t h_ReduceCountLesserOrEqualDoulbe;
	cudaMemcpyFromSymbol(&h_ReduceCountLesserOrEqualDoulbe, d_ReduceCountLesserOrEqualDoulbe, sizeof(pointReduceCountLesserOrEqualDoubleFunction_t));

	_cu_reduce_count<FULL_BLOCK_SIZE, double>(h_distances_, naffines, no_value, h_ReduceCountLesserOrEqualDoulbe, thresh, nQ);

	// TODO: come up with faster way to reduce (like binary search from best_distance to best_distance + GetThreshPerDelta(new_delta))
	while (nQ > 27000) {
		thresh *= 0.99;

		nQ = 0;
		_cu_reduce_count<FULL_BLOCK_SIZE, double>(h_distances_, naffines, no_value, h_ReduceCountLesserOrEqualDoulbe, thresh, nQ);
	}

	if (nQ != 0) {
		std::vector<uint> indexes;
		for (uint i = 0; i < naffines; ++i)
			if (h_distances_[i] <= thresh)
				indexes.push_back(i);

		double* d_Q;
		checkCudaErrors( cudaMalloc((void **) &d_Q, nQ * 6 * sizeof(double)) );
		set_d_Q(d_Q);
		for (uint i = 0; i < nQ; ++i) {
			checkCudaErrors( cudaMemcpy(&(d_Q[i * 6]), &(d_configs_[indexes[i] * 6]), 6 * sizeof(double), cudaMemcpyDeviceToDevice) );
		}
	} else {
		thresh = best_distance;

		_cu_reduce_count<FULL_BLOCK_SIZE, double>(h_distances_, naffines, no_value, h_ReduceCountLesserOrEqualDoulbe, thresh, nQ);

		if (nQ > 10000) {
			// All with the same error exactly - probably equivalent
			std::vector<uint> indexes;
			nQ = 0;
			for (uint i = 0; i < naffines; ++i) {
				if (h_distances_[i] <= thresh) {
					indexes.push_back(i);
					++nQ;
				}

				if (nQ >= 100)
					break;
			}

			double* d_Q;
			checkCudaErrors( cudaMalloc((void **) &d_Q, nQ * 6 * sizeof(double)) );
			set_d_Q(d_Q);
			for (uint i = 0; i < nQ; ++i) {
				checkCudaErrors( cudaMemcpy(&(d_Q[i * 6]), &(d_configs_[indexes[i] * 6]), 6 * sizeof(double), cudaMemcpyDeviceToDevice) );
			}
		}
	}

	double percentage = (double) nQ / naffines;
	percentage_higher_limit = percentage > 0.022;
	LOG_DEBUG(logger, "Percentage = %f", percentage);

	ReportMethodExecutionTime(__func__, CV_TIME(time));
}

void FAsT_Match::Create_Q(Mat& configs, double best_distance, double new_delta, Mat& distances, Mat& Q, bool& percentage_higher_limit) {
	double time = (double)getTickCount();

	double thresh = best_distance + GetThreshPerDelta(new_delta);
	double* distances_ptr = distances.ptr<double>(0);
	uint nQ = 0;
	for (uint i = 0; i < distances.cols; ++i) {
		if (distances_ptr[i] <= thresh) {
			++nQ;
		}
	}

	// TODO: come up with faster way to reduce (like binary search from bestDist to bestDist + GetThreshPerDelta(new_delta))
	while (nQ > 27000) {
		thresh = thresh * 0.99;

		nQ = 0;
		for (uint i = 0; i < distances.cols; ++i)
			if (distances_ptr[i] <= thresh)
				++nQ;
	}

	if (nQ != 0) {
		std::vector<uint> indexes;
		for (uint i = 0; i < distances.cols; ++i) {
			if (distances_ptr[i] <= thresh) {
				indexes.push_back(i);
			}
		}

		Q = Mat_<double>(nQ, 6);
		for (uint i = 0; i < nQ; ++i) {
			double* Q_ptr = Q.ptr<double>(i);
			double* configs_ptr = configs.ptr<double>(indexes[i]);
			for (uint j = 0; j < 6; ++j) {
				Q_ptr[j] = configs_ptr[j];
			}
		}
	} else {
		thresh = best_distance;

		for (uint i = 0; i < distances.cols; ++i)
			if (distances_ptr[i] <= thresh)
				++nQ;

		if (nQ > 10000) {
			std::vector<uint> indexes;
			nQ = 0;
			// All with the same error exactly - probably equivalent
			for (uint i = 0; i < distances.cols; ++i) {
				if (nQ >= 100)
					break;

				if (distances_ptr[i] <= thresh) {
					indexes.push_back(i);
					++nQ;
				}
			}

			Q = Mat_<double>(nQ, 6);
			for (uint i = 0; i < nQ; ++i) {
				double* Q_ptr = Q.ptr<double>(i);
				double* configs_ptr = configs.ptr<double>(indexes[i]);
				for (uint j = 0; j < 6; ++j) {
					Q_ptr[j] = configs_ptr[j];
				}
			}
		}
	}

	double percentage = (double) nQ / configs.rows;
	percentage_higher_limit = percentage > 0.022;
	LOG_DEBUG(logger, "Percentage = %f", percentage);

	ReportMethodExecutionTime(__func__, CV_TIME(time));
}

double FAsT_Match::GetThreshPerDelta(double delta) {
	// Empirical
	double p1 = 0.1341;
	double p2 = 0.0278;
	double safety = 0.02;
	return p1 * delta + p2 - safety;	//+0.01;
}

void FAsT_Match::_cu_Create_S(uint nQ, uint level, uint extension_scale, double delta_fact, bool full_extension) {
	double time = (double)getTickCount();

	double fact = mypow(delta_fact, level);
	double halfstep_tx, halfstep_ty, halfstep_r, halfstep_s;
	halfstep_tx = bounds_.tx.step / fact;
	halfstep_ty = bounds_.ty.step / fact;
	halfstep_r = bounds_.r.step / fact;
	halfstep_s = bounds_.s.step / fact;

	uint nS = nQ * extension_scale;

	uint* d_rand_vec;
	checkCudaErrors( cudaMalloc((void **) &d_rand_vec, nS * sizeof(uint)) );

	if (full_extension) {
		uint* h_rand_vec;
		checkCudaErrors( cudaMallocHost((void **) &h_rand_vec, nS * sizeof(uint)) );
		for (uint i = 0; i < nS; ++i) {
			h_rand_vec[i] = i % extension_scale;
		}
		checkCudaErrors( cudaMemcpy(d_rand_vec, h_rand_vec, nS * sizeof(uint), cudaMemcpyHostToDevice) );
		checkCudaErrors( cudaFreeHost(h_rand_vec) );
	} else {
		dim3 rand_vec_dim_block(FULL_BLOCK_SIZE);
		dim3 rand_vec_dim_grid(EXPAND_TO_REDUCTION_BLOCK_SIZE(nS) / REDUCTION_BLOCK_SIZE);
		uint seed = (getTickCount() % 10000) + 1;
		uint cycles = 1000;

		_cu_ParkMiller_Kernel<<<rand_vec_dim_grid, rand_vec_dim_block>>>(d_rand_vec, seed, cycles, nS);
		checkCudaErrors( cudaDeviceSynchronize() );
	}

	_cu_create_S_random__global__KernelArgs *h_args, *d_args;
	_cu_create_S_random__global__KernelArgs args(halfstep_tx, halfstep_ty, halfstep_r, halfstep_s);
	h_args = &args;
	checkCudaErrors( cudaMalloc((void **) &d_args, sizeof(_cu_create_S_random__global__KernelArgs)) );
	checkCudaErrors( cudaMemcpy(d_args, h_args, sizeof(_cu_create_S_random__global__KernelArgs), cudaMemcpyHostToDevice) );

	double* d_S;
	checkCudaErrors( cudaMalloc((void **) &d_S, nS * 6 * sizeof(double)) );

	dim3 dimBlock(FULL_BLOCK_SIZE);
	dim3 dimGrid(EXPAND_TO_FULL_BLOCK_SIZE(nS) / FULL_BLOCK_SIZE);
	_cu_create_S_random__global__<<<dimGrid, dimBlock>>>(d_Q_, nQ, extension_scale, d_rand_vec, d_args, d_S);
	checkCudaErrors( cudaDeviceSynchronize() );

	set_d_S(d_S);

	checkCudaErrors( cudaFree(d_args) );
	checkCudaErrors( cudaFree(d_rand_vec) );

	ReportMethodExecutionTime(__func__, CV_TIME(time));
}

void FAsT_Match::Create_S_random(Mat& configs, uint level, uint extension_scale, double delta_fact, Mat& S) {
	double time = (double)getTickCount();

	double fact = mypow(delta_fact, level);
	double halfstep_tx, halfstep_ty, halfstep_r, halfstep_s;
	halfstep_tx = bounds_.tx.step / fact;
	halfstep_ty = bounds_.ty.step / fact;
	halfstep_r = bounds_.r.step / fact;
	halfstep_s = bounds_.s.step / fact;

	uint nconfigs = configs.rows;

	Mat randvec_ = Mat(extension_scale * nconfigs, 6, CV_32SC1);
	randu(randvec_, Scalar(-1), Scalar(2));
	Mat randvec(randvec_.size(), CV_64FC1);
	randvec_.convertTo(randvec, CV_64FC1);

	Mat expanded = repeat(configs, extension_scale, 1);
	Mat ranges = (Mat_<double>(1, 6) << halfstep_tx, halfstep_ty, halfstep_r, halfstep_s, halfstep_s, halfstep_r);
	ranges = repeat(ranges, extension_scale * nconfigs, 1);
	multiply(randvec, ranges, ranges);
	add(expanded, ranges, S);

	ReportMethodExecutionTime(__func__, CV_TIME(time));
}

void FAsT_Match::Create_S_full(Mat& configs, uint level, double delta_fact, Mat& S) {
	double time = (double)getTickCount();

	double fact = mypow(delta_fact, level);
	double halfstep_tx, halfstep_ty, halfstep_r, halfstep_s;
	halfstep_tx = bounds_.tx.step / fact;
	halfstep_ty = bounds_.ty.step / fact;
	halfstep_r = bounds_.r.step / fact;
	halfstep_s = bounds_.s.step / fact;

	uint npoints = pow(3, 6);
	Mat additions = Mat::zeros(npoints, 6, CV_64FC1);
	double* additions_ptr;
	uint i = 0;
	for (double tx = -halfstep_tx; tx <= halfstep_tx; tx += halfstep_tx)
		for (double ty = -halfstep_ty; ty <= halfstep_ty; ty += halfstep_ty)
			for (double r2 = -halfstep_r; r2 <= halfstep_r; r2 += halfstep_r)
				for (double sx = -halfstep_s; sx <= halfstep_s; sx += halfstep_s)
					for (double sy = -halfstep_s; sy <= halfstep_s; sy += halfstep_s)
						for (double r1 = -halfstep_r; r1 <= halfstep_r; r1 += halfstep_r) {
							additions_ptr = additions.ptr<double>(i);
							additions_ptr[0] = tx;
							additions_ptr[1] = ty;
							additions_ptr[2] = r2;
							additions_ptr[3] = sx;
							additions_ptr[4] = sy;
							additions_ptr[5] = r1;
							i = i + 1;
						}

	uint nconfigs = configs.rows;
	Mat expanded = repeat(configs, npoints, 1);
	additions = repeat(additions, nconfigs, 1);
	add(expanded, additions, S);

	ReportMethodExecutionTime(__func__, CV_TIME(time));
}

void FAsT_Match::CreateAffineTransformation(double* config, double affine[3][3]) {
	double tx, ty, sx, sy, r1, r2;
	tx =  config[0];
	ty =  config[1];
	r2 =  config[2];
	sx =  config[3];
	sy =  config[4];
	r1 =  config[5];

//	tx ty r2 sx sy r1
//	A = [ sx*cos(r1)*cos(r2) - sy*sin(r1)*sin(r2), - sx*cos(r1)*sin(r2) - sy*cos(r2)*sin(r1), tx]
// 		[ sx*cos(r2)*sin(r1) + sy*cos(r1)*sin(r2),   sy*cos(r1)*cos(r2) - sx*sin(r1)*sin(r2), ty]
//		[                                       0,                                         0,  1]
//	A(1:2,1:2) = R1 * S * R2;

	Mat R1 = (Mat_<double>(2, 2) << cos(r1), -sin(r1) , sin(r1), cos(r1));
	Mat R2 = (Mat_<double>(2, 2) << cos(r2), -sin(r2) , sin(r2), cos(r2));
	Mat S = (Mat_<double>(2, 2) << sx, 0 , 0, sy);
	Mat A = (Mat_<double>(3, 3) << 0, 0, tx , 0, 0, ty , 0, 0, 1);

	Mat A_ = R1 * S * R2;
	for (uint y = 0; y < 2; ++y)
		for (uint x = 0; x < 2; ++x)
			A.at<double>(y, x) = A_.at<double>(y, x);

	for (uint y = 0; y < 3; ++y)
		for (uint x = 0; x < 3; ++x)
			affine[y][x] = A.at<double>(y, x);
}

void FAsT_Match::make_images_continuous() {
	if (!I1_.isContinuous() || !I2_.isContinuous()) {
		double time = (double)getTickCount();
		if (!I1_.isContinuous()) {
			I1_ = I1_.clone();
			LOG_INFO(logger, "I1 is not continuous. Cloning...");
		}

		if (!I2_.isContinuous()) {
			I2_ = I2_.clone();
			LOG_INFO(logger, "I2 is not continuous. Cloning...");
		}
		ReportMethodExecutionTime(__func__, CV_TIME(time));
	}
}

}	// namespace gpu
}	// namespace recognition
