#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <unistd.h>
#include <iostream>

#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>

#include <cv.h>
#include <highgui.h>

#include <log4cxx/logger.h>
#include <base/logging.h>
#include <base/types.cuh>
#include <base/methods.cuh>
#include <gpu/cu_methods.cuh>

#include <gpu/ciratefi_helper.cuh>
#include <gpu/ciratefi.cuh>

#ifndef max
#define max( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif

#ifndef min
#define min( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif

namespace recognition {
namespace gpu {

const log4cxx::LoggerPtr Ciratefi::logger = log4cxx::LoggerPtr(log4cxx::Logger::getLogger("recognition.gpu.Ciratefi"));
const log4cxx::LoggerPtr Ciratefi::output = log4cxx::LoggerPtr(log4cxx::Logger::getLogger("output.recognition.gpu.Ciratefi"));

using namespace cv;

Mat Ciratefi::C_Q() {
	double time = (double)getTickCount();
	Mat CQ;

	// Creating collection of circles
	CreateCircles();

	double scale = bounds_.s.min;
	CQ = Mat_<double>(n_, l_);
	for (int i = 0; i < n_; ++i) {
		Mat I;
		resize(I1_, I, Size(), scale, scale);
		Point2D center(I.cols / 2, I.rows / 2);

		int radius = bounds_.r.min;
		for (int k = 0; k < l_; ++k) {
			CQ.at<double>(i, k) = Cis(I, center, radius);
			radius += bounds_.r.step;
		}

		scale += bounds_.s.step;
	}

	ReportMethodExecutionTime(__func__, CV_TIME(time));
	return CQ;
}

void Ciratefi::_cu_C_A__shared__() {
	assert(max_num_of_circle_points_ * sizeof(Point2D) <= maximum_shared_memory_);
	double dev_mem_used_start = GetDevMemoryUsedMb();

	// Empiric value providing maximum bandwidth (trying to get 2 circles_blocks)
	int shared_mem_size = min((max_num_of_circle_points_ + total_num_of_circle_points_) / 2 * sizeof(Point2D), maximum_shared_memory_);

//	printf("max_num_of_circle_points_ * sizeof(Point2D)=%d\n", max_num_of_circle_points_ * sizeof(Point2D));
//	printf("total_num_of_circle_points_ * sizeof(Point2D)=%d\n", total_num_of_circle_points_ * sizeof(Point2D));
//	printf("maximum_shared_memory_=%d\n", maximum_shared_memory_);
//	printf("shared_mem_size=%d\n", shared_mem_size);

	Point2D *h_circles, *d_circles;
	checkCudaErrors( cudaMallocHost((void**) &h_circles, total_num_of_circle_points_ * sizeof(Point2D)) );

	SizeAndIdxs *h_circles_offsets, *d_circles_offsets;
	checkCudaErrors( cudaMallocHost((void**) &h_circles_offsets, l_ * sizeof(SizeAndIdxs)) );

	SizeAndIdxs *h_circles_blocks, *d_circles_blocks;
	int max_num_of_circles_blocks = l_;
	checkCudaErrors( cudaMallocHost((void**) &h_circles_blocks, max_num_of_circles_blocks * sizeof(SizeAndIdxs)) );

	int circle_start_idx = 0;
	int block_idx = 0;
	int block_start_idx = 0;
	int block_size = 0;
	for (int r = bounds_.r.min; r <= bounds_.r.max; r += bounds_.r.step) {
		int r_idx = RadiusToIdx(r);
		int size = circles_[r_idx].size();
		memcpy(&(h_circles[circle_start_idx]), &(circles_[r_idx][0]), size * sizeof(Point2D));

		h_circles_offsets[r_idx] = SizeAndIdxs(size, circle_start_idx);
		circle_start_idx += size;

		if ((block_size + size) * sizeof(Point2D) <= shared_mem_size) {
			block_size += size;
		} else {
			h_circles_blocks[block_idx] = SizeAndIdxs(block_size, block_start_idx, r_idx - 1);
			++block_idx;
			block_size = size;
			block_start_idx = r_idx;
		}
	}
	h_circles_blocks[block_idx] = SizeAndIdxs(block_size, block_start_idx, l_ - 1);

	checkCudaErrors( cudaMalloc((void **) &d_circles, total_num_of_circle_points_ * sizeof(Point2D)) );
	checkCudaErrors( cudaMemcpy(d_circles, h_circles, total_num_of_circle_points_ * sizeof(Point2D), cudaMemcpyHostToDevice) );

	checkCudaErrors( cudaMalloc((void **) &d_circles_offsets, l_ * sizeof(SizeAndIdxs)) );
	checkCudaErrors( cudaMemcpy(d_circles_offsets, h_circles_offsets, l_ * sizeof(SizeAndIdxs), cudaMemcpyHostToDevice) );

	checkCudaErrors( cudaMalloc((void **) &d_circles_blocks, max_num_of_circles_blocks * sizeof(SizeAndIdxs)) );
	checkCudaErrors( cudaMemcpy(d_circles_blocks, h_circles_blocks, max_num_of_circles_blocks * sizeof(SizeAndIdxs), cudaMemcpyHostToDevice) );

	_cu_create_C_A__shared__KernelArgs *h_args, *d_args;
	_cu_create_C_A__shared__KernelArgs args(bounds_, l_, offset_, block_idx + 1);
	h_args = &args;
	checkCudaErrors(cudaMalloc((void **) &d_args, sizeof(_cu_create_C_A__shared__KernelArgs)));
	checkCudaErrors(cudaMemcpy(d_args, h_args, sizeof(_cu_create_C_A__shared__KernelArgs), cudaMemcpyHostToDevice));

	Matrix2d *h_C_A, *d_C_A;
	Matrix2d C_A(EXPAND_TO_BLOCK_SIZE(I2_.rows), EXPAND_TO_BLOCK_SIZE(I2_.cols));
	h_C_A = &C_A;
	checkCudaErrors(cudaMalloc((void **) &(h_C_A->elements), h_C_A->height * h_C_A->width * l_ * sizeof(double)));
	checkCudaErrors(cudaMalloc((void **) &d_C_A, sizeof(Matrix2d)));
	checkCudaErrors(cudaMemcpy(d_C_A, h_C_A, sizeof(Matrix2d), cudaMemcpyHostToDevice));

	dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
	dim3 dimGrid(h_C_A->width / dimBlock.x, h_C_A->height / dimBlock.y);
	cudaEvent_t start, stop;
	checkCudaErrors(cudaEventCreate(&start));
	checkCudaErrors(cudaEventCreate(&stop));
	checkCudaErrors(cudaEventRecord(start, NULL));
//	int n_iter = 10;
	int n_iter = 1;
	for (int i = 0; i < n_iter; ++i) {
	_cu_create_C_A__shared__<<<dimGrid, dimBlock, shared_mem_size>>>(d_A_, d_circles, d_circles_offsets, d_circles_blocks, d_args, d_C_A);
	}
	checkCudaErrors(cudaEventRecord(stop, NULL));
	checkCudaErrors(cudaEventSynchronize(stop));
	float cu_time_msec = 0.0F;
	checkCudaErrors(cudaEventElapsedTime(&cu_time_msec, start, stop));
	checkCudaErrors( cudaEventDestroy(start) );
	checkCudaErrors( cudaEventDestroy(stop) );
	float cu_time_sec = cu_time_msec / 1000.0F;
	ReportKernelExecutionTime(__func__, cu_time_sec / n_iter);

	set_d_C_A(d_C_A);
	set_d_C_A_elements(h_C_A->elements);

	checkCudaErrors(cudaFree(d_args));
	checkCudaErrors(cudaFree(d_circles_blocks));
	checkCudaErrors(cudaFree(d_circles_offsets));
	checkCudaErrors(cudaFree(d_circles));
	checkCudaErrors(cudaFreeHost(h_circles_blocks));
	checkCudaErrors(cudaFreeHost(h_circles_offsets));
	checkCudaErrors(cudaFreeHost(h_circles));

	ReportDevMemoryUsage(__func__, dev_mem_used_start);
}

void Ciratefi::_cu_C_A__global__() {
	double dev_mem_used_start = GetDevMemoryUsedMb();
	// Load circles to device memory
	Point2D_lst *h_circles, *d_circles;
	checkCudaErrors(cudaMallocHost((void **) &h_circles, l_ * sizeof(Point2D_lst)));
	for (int r = bounds_.r.min; r <= bounds_.r.max; r += bounds_.r.step) {
		int r_idx = RadiusToIdx(r);
		int size = circles_[r_idx].size();

		Point2D* d_circle_points;
		checkCudaErrors(cudaMalloc((void **) &d_circle_points, size * sizeof(Point2D)));
		checkCudaErrors(cudaMemcpy(d_circle_points, &(circles_[r_idx][0]), size * sizeof(Point2D), cudaMemcpyHostToDevice));
		h_circles[r_idx].points = d_circle_points;
		h_circles[r_idx].size = size;
	}
	checkCudaErrors(cudaMalloc((void **) &d_circles, l_ * sizeof(Point2D_lst)));
	checkCudaErrors(cudaMemcpy(d_circles, h_circles, l_ * sizeof(Point2D_lst), cudaMemcpyHostToDevice));

	_cu_create_C_A__global__KernelArgs *h_args, *d_args;
	_cu_create_C_A__global__KernelArgs args(bounds_, l_, offset_);
	h_args = &args;
	checkCudaErrors(cudaMalloc((void **) &d_args, sizeof(_cu_create_C_A__global__KernelArgs)));
	checkCudaErrors(cudaMemcpy(d_args, h_args, sizeof(_cu_create_C_A__global__KernelArgs), cudaMemcpyHostToDevice));

	Matrix2d *h_C_A, *d_C_A;
	Matrix2d C_A(EXPAND_TO_BLOCK_SIZE(I2_.rows), EXPAND_TO_BLOCK_SIZE(I2_.cols));
	h_C_A = &C_A;
	checkCudaErrors(cudaMalloc((void **) &(h_C_A->elements), h_C_A->height * h_C_A->width * l_ * sizeof(double)));
	checkCudaErrors(cudaMalloc((void **) &d_C_A, sizeof(Matrix2d)));
	checkCudaErrors(cudaMemcpy(d_C_A, h_C_A, sizeof(Matrix2d), cudaMemcpyHostToDevice));

	dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
	dim3 dimGrid(h_C_A->width / dimBlock.x, h_C_A->height / dimBlock.y);
	cudaEvent_t start, stop;
	checkCudaErrors(cudaEventCreate(&start));
	checkCudaErrors(cudaEventCreate(&stop));
	checkCudaErrors(cudaEventRecord(start, NULL));
//	int n_iter = 10;
	int n_iter = 1;
	for (int i = 0; i < n_iter; ++i) {
		_cu_create_C_A__global__<<<dimGrid, dimBlock>>>(d_A_, d_circles, d_args, d_C_A);
	}
	checkCudaErrors(cudaEventRecord(stop, NULL));
	checkCudaErrors(cudaEventSynchronize(stop));
	float cu_time_msec = 0.0F;
	checkCudaErrors(cudaEventElapsedTime(&cu_time_msec, start, stop));
	checkCudaErrors( cudaEventDestroy(start) );
	checkCudaErrors( cudaEventDestroy(stop) );
	float cu_time_sec = cu_time_msec / 1000.0F;
	ReportKernelExecutionTime(__func__, cu_time_sec / n_iter);

	set_d_C_A(d_C_A);
	set_d_C_A_elements(h_C_A->elements);

	checkCudaErrors(cudaFree(d_args));
	checkCudaErrors(cudaFree(d_circles));
	for (int i = 0; i < l_; ++i) {
		checkCudaErrors(cudaFree(h_circles[i].points));
	}
	checkCudaErrors(cudaFreeHost(h_circles));

	ReportDevMemoryUsage(__func__, dev_mem_used_start);
}

void Ciratefi::_cu_C_A() {
	double dev_mem_used_start = GetDevMemoryUsedMb();
	double time = (double)getTickCount();
	Matrix2d A = A_expanded_and_alligned_by_block_size();

	Matrix2d* h_A = &A;
	Matrix2d* d_A;
	double* d_A_elements;
	checkCudaErrors(cudaMalloc((void **) &d_A, sizeof(Matrix2d)));
	checkCudaErrors(cudaMalloc((void **) &d_A_elements, h_A->width * h_A->height * sizeof(double)));
	checkCudaErrors(cudaMemcpy(d_A_elements, h_A->elements, h_A->width * h_A->height * sizeof(double), cudaMemcpyHostToDevice));
	checkCudaErrors(cudaMemcpy(d_A, h_A, sizeof(Matrix2d), cudaMemcpyHostToDevice));
	checkCudaErrors(cudaMemcpy(&(d_A->elements), &(d_A_elements), sizeof(double*), cudaMemcpyHostToDevice));

	bool use_shared_memory = true;
	if (max_num_of_circle_points_ * sizeof(Point2D) > maximum_shared_memory_) {
		use_shared_memory = false;
		LOG_DEBUG(logger, "Circle [radius: %d], [step: %d] exceeds shared memory limit!\n"
				"\t[number of points: %d], [size per point: %zu bytes], but "
				"[shared mem limit: %d bytes] < [required space: %zu bytes]\n"
				"\tUsing global memory...", circle_radius_with_max_points_, bounds_.r.step,
				max_num_of_circle_points_, sizeof(Point2D),
				maximum_shared_memory_, max_num_of_circle_points_ * sizeof(Point2D));
	}

	set_d_A(d_A);
	set_d_A_elements(d_A_elements);

	if (use_shared_memory)
		_cu_C_A__shared__();
	else
		_cu_C_A__global__();

	ReportMethodExecutionTime(__func__, CV_TIME(time));
	ReportDevMemoryUsage(__func__, dev_mem_used_start);
}

Mat Ciratefi::dev_to_host_C_A() {
	double time = (double)getTickCount();
	int sizes[3] = {I2_.cols, I2_.rows, l_};
	Mat CA = Mat_<double>(3, sizes);

	int height = EXPAND_TO_BLOCK_SIZE(I2_.rows);
	int width = EXPAND_TO_BLOCK_SIZE(I2_.cols);
	double* C_A_elements;
	checkCudaErrors(cudaMallocHost((void **) &C_A_elements, height * width * l_ * sizeof(double)));
	checkCudaErrors(cudaMemcpy(C_A_elements, d_C_A_elements_, height * width * l_ * sizeof(double), cudaMemcpyDeviceToHost));

	for (int x = 0; x < I2_.cols; ++x) {
		for (int y = 0; y < I2_.rows; ++y) {
			for (int k = 0; k < l_; ++k) {
				CA.at<double>(x, y, k) = C_A_elements[(y * width + x) * l_ + k];
			}
		}
	}
	checkCudaErrors(cudaFreeHost(C_A_elements));
	ReportMethodExecutionTime(__func__, CV_TIME(time));
	return CA;
}

Mat Ciratefi::C_A() {
	double time = (double)getTickCount();
	Mat CA;
	Mat I = A_expanded();

	int radius = bounds_.r.min;
	int sizes[3] = {I2_.cols, I2_.rows, l_};
	CA = Mat_<double>(3, sizes);
	for (int x = 0; x < I2_.cols; ++x) {
		for (int y = 0; y < I2_.rows; ++y) {
			Point2D center(x + offset_, y + offset_);

			for (int k = 0; k < l_; ++k) {
				CA.at<double>(x, y, k) = Cis(I, center, radius);

				radius += bounds_.r.step;
			}

			radius = bounds_.r.min;
		}
	}

	ReportMethodExecutionTime(__func__, CV_TIME(time));
	return CA;
}

std::vector<FirstGradePoint> Ciratefi::_cu_CisCorr_AQ(Mat& CQ) {
	double dev_mem_used_start = GetDevMemoryUsedMb();
	double time = (double)getTickCount();
	std::vector<FirstGradePoint> res;

	Matrix2d *h_C_Q, *d_C_Q;
	double *d_C_Q_elements;
	Matrix2d C_Q_(n_, l_);
	h_C_Q = &C_Q_;

	checkCudaErrors( cudaMallocHost((void**) &(h_C_Q->elements), h_C_Q->width * h_C_Q->height * sizeof(double)) );
	for (int row = 0; row < n_; ++row) {
		double* CQ_ptr = CQ.ptr<double>(row);
		memcpy(&(h_C_Q->elements[row * l_]), CQ_ptr, l_ * sizeof(double));
	}

	checkCudaErrors(cudaMalloc((void **) &d_C_Q, sizeof(Matrix2d)));
	checkCudaErrors(cudaMalloc((void **) &d_C_Q_elements, h_C_Q->width * h_C_Q->height * sizeof(double)));
	checkCudaErrors(cudaMemcpy(d_C_Q_elements, h_C_Q->elements, h_C_Q->width * h_C_Q->height * sizeof(double), cudaMemcpyHostToDevice));
	checkCudaErrors(cudaMemcpy(d_C_Q, h_C_Q, sizeof(Matrix2d), cudaMemcpyHostToDevice));
	checkCudaErrors(cudaMemcpy(&(d_C_Q->elements), &(d_C_Q_elements), sizeof(double*), cudaMemcpyHostToDevice));

	_cu_CisCorr_AQ__global__KernelArgs *h_args, *d_args;
	_cu_CisCorr_AQ__global__KernelArgs args(thresholds_, bc_thresholds_, l_, n_);
	h_args = &args;
	checkCudaErrors(cudaMalloc((void **) &d_args, sizeof(_cu_CisCorr_AQ__global__KernelArgs)));
	checkCudaErrors(cudaMemcpy(d_args, h_args, sizeof(_cu_CisCorr_AQ__global__KernelArgs), cudaMemcpyHostToDevice));

	FirstGradeMatrix *h_CisCorr_mat, *d_CisCorr_mat;
	FirstGradeMatrix CisCorr_mat(EXPAND_TO_BLOCK_SIZE(I2_.rows), EXPAND_TO_BLOCK_SIZE(I2_.cols));
	h_CisCorr_mat = &CisCorr_mat;

	checkCudaErrors(cudaMalloc((void **) &(h_CisCorr_mat->scale_idxs), h_CisCorr_mat->height * h_CisCorr_mat->width * sizeof(int)));
	checkCudaErrors(cudaMalloc((void **) &d_CisCorr_mat, sizeof(FirstGradeMatrix)));
	checkCudaErrors(cudaMemcpy(d_CisCorr_mat, h_CisCorr_mat, sizeof(FirstGradeMatrix), cudaMemcpyHostToDevice));

	bool use_shared_memory = true;
	if (n_ * l_ * sizeof(double) > maximum_shared_memory_) {
		use_shared_memory = false;
		LOG_DEBUG(logger, "CQ [size: %d x %d] exceeds shared memory limit!\n"
				"\t[number of elements: %d], [size per element: %zu bytes], but "
				"[shared mem limit: %d bytes] < [required space: %zu bytes]\n"
				"\tUsing global memory...", n_, l_,
				n_ * l_, sizeof(double),
				maximum_shared_memory_, n_ * l_ * sizeof(double));
	}

	dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
	dim3 dimGrid(h_CisCorr_mat->width / dimBlock.x, h_CisCorr_mat->height / dimBlock.y);
	cudaEvent_t start, stop;
	checkCudaErrors(cudaEventCreate(&start));
	checkCudaErrors(cudaEventCreate(&stop));
	checkCudaErrors(cudaEventRecord(start, NULL));
//	int n_iter = 10;
	int n_iter = 1;
	for (int i = 0; i < n_iter; ++i) {
		if (use_shared_memory)
			_cu_CisCorr_AQ__shared__<<<dimGrid, dimBlock, n_ * l_ * sizeof(double)>>>(d_C_Q, d_C_A_, d_args, d_CisCorr_mat);
		else
			_cu_CisCorr_AQ__global__<<<dimGrid, dimBlock>>>(d_C_Q, d_C_A_, d_args, d_CisCorr_mat);
	}
	checkCudaErrors(cudaEventRecord(stop, NULL));
	checkCudaErrors(cudaEventSynchronize(stop));
	float cu_time_msec = 0.0F;
	checkCudaErrors(cudaEventElapsedTime(&cu_time_msec, start, stop));
	checkCudaErrors( cudaEventDestroy(start) );
	checkCudaErrors( cudaEventDestroy(stop) );
	float cu_time_sec = cu_time_msec / 1000.0F;
	ReportKernelExecutionTime(__func__, cu_time_sec / n_iter);

	int* scale_idxs;
	checkCudaErrors( cudaMallocHost((void**) &scale_idxs, h_CisCorr_mat->width * h_CisCorr_mat->height * sizeof(int)) );
	checkCudaErrors( cudaMemcpy(scale_idxs, h_CisCorr_mat->scale_idxs, h_CisCorr_mat->width * h_CisCorr_mat->height * sizeof(int), cudaMemcpyDeviceToHost) );
	for (int y = 0; y < I2_.rows; ++y) {
		for (int x = 0; x < I2_.cols; ++x) {
			int idx = y * h_CisCorr_mat->width + x;
			int scale_idx = scale_idxs[idx];
			if (scale_idx > -1) {
				FirstGradePoint point(Point2D(x, y), scale_idx);
				res.push_back(point);
			}
		}
	}
	checkCudaErrors(cudaFreeHost(scale_idxs));

	checkCudaErrors(cudaFree(d_CisCorr_mat));
	checkCudaErrors(cudaFree(h_CisCorr_mat->scale_idxs));
	checkCudaErrors(cudaFree(d_args));
	checkCudaErrors(cudaFree(d_C_Q_elements));
	checkCudaErrors(cudaFree(d_C_Q));
	checkCudaErrors(cudaFreeHost(h_C_Q->elements));

	ReportMethodExecutionTime(__func__, CV_TIME(time));
	ReportDevMemoryUsage(__func__, dev_mem_used_start);
	return res;
}

std::vector<FirstGradePoint> Ciratefi::CisCorr_AQ(Mat& CQ, Mat& CA) {
	double time = (double)getTickCount();
	std::vector<FirstGradePoint> res;

	for (int y = 0; y < I2_.rows; ++y) {
		for (int x = 0; x < I2_.cols; ++x) {
			int scale_idx = -1;
			double max_corr = 0;
			double* CAxy = CA.ptr<double>(x, y);
			for (int i = 0; i < n_; ++i) {
				double* CQi = CQ.ptr<double>(i);
				double corr_ = abs(corr(CQi, CAxy, l_));
				if (corr_ > thresholds_.cifi_t1 && corr_ > max_corr) {
					max_corr = corr_;
					scale_idx = i;
				}
			}

			// Check if (x, y) is a first grade point
			if (scale_idx > -1) {
				FirstGradePoint point;
				point.p.x = x;
				point.p.y = y;
				point.scale_idx = scale_idx;
				res.push_back(point);
			}
		}
	}

	ReportMethodExecutionTime(__func__, CV_TIME(time));
	return res;
}

Mat Ciratefi::R_Q() {
	double time = (double)getTickCount();
	Mat RQ = Mat_<double>(1, m_);

	// Creating collection of lines
	CreateLines();

	double angle = bounds_.phi.min;
	Point2D start(I1_.cols / 2, I1_.rows / 2);

	for (int j = 0; j < m_; ++j) {
		// Counterclockwise lines
		RQ.at<double>(0, j) = Ras(I1_, start, bounds_.r.max, angle);

		angle += bounds_.phi.step;
	}

	ReportMethodExecutionTime(__func__, CV_TIME(time));
	return RQ;
}

void Ciratefi::_cu_R_A__global__(std::vector<FirstGradePoint>& fgps) {
	double dev_mem_used_start = GetDevMemoryUsedMb();
	double time = (double)getTickCount();
	// Load lines to device memory
	Line_lst *h_lines, *d_lines;
	checkCudaErrors( cudaMallocHost((void**) &h_lines, (max_lambda_ + 1) * sizeof(Line_lst)) );

	std::vector<int> line_chunk_sizes;
	for (int r = 0; r <= max_lambda_; ++r) {
		int size = 0;
		for (int phi_idx = 0; phi_idx < m_; ++phi_idx) {
			size += lines_[r][phi_idx].size();
		}
		line_chunk_sizes.push_back(size);
	}

	SizeAndIdxs* line_size_and_idxs;
	checkCudaErrors( cudaMallocHost((void**) &line_size_and_idxs, m_ * sizeof(SizeAndIdxs)) );
	for (int r = 0; r <= max_lambda_; ++r) {
		Point2D* d_line_points;
		checkCudaErrors(cudaMalloc((void **) &d_line_points, line_chunk_sizes[r] * sizeof(Point2D)));
		SizeAndIdxs* d_line_size_and_idxs;
		checkCudaErrors(cudaMalloc((void **) &d_line_size_and_idxs, m_ * sizeof(SizeAndIdxs)));
		int next_line_idx = 0;
		for (int phi_idx = 0; phi_idx < m_; ++phi_idx) {
			int size = lines_[r][phi_idx].size();
			Point2D* points = &(lines_[r][phi_idx][0]);

			line_size_and_idxs[phi_idx] = SizeAndIdxs(size, next_line_idx);
			checkCudaErrors(cudaMemcpy(&(d_line_points[next_line_idx]), points, size * sizeof(Point2D), cudaMemcpyHostToDevice));
			next_line_idx += size;
		}
		checkCudaErrors(cudaMemcpy(d_line_size_and_idxs, line_size_and_idxs, m_ * sizeof(SizeAndIdxs), cudaMemcpyHostToDevice));

		h_lines[r].points = d_line_points;
		h_lines[r].size_and_idxs = d_line_size_and_idxs;
	}
	checkCudaErrors(cudaFreeHost(line_size_and_idxs));

	checkCudaErrors(cudaMalloc((void **) &d_lines, (max_lambda_ + 1) * sizeof(Line_lst)));
	checkCudaErrors(cudaMemcpy(d_lines, h_lines, (max_lambda_ + 1) * sizeof(Line_lst), cudaMemcpyHostToDevice));

	_cu_create_R_A__global__KernelArgs *h_args, *d_args;
	_cu_create_R_A__global__KernelArgs args(bounds_, m_, offset_);
	h_args = &args;
	checkCudaErrors(cudaMalloc((void **) &d_args, sizeof(_cu_create_R_A__global__KernelArgs)));
	checkCudaErrors(cudaMemcpy(d_args, h_args, sizeof(_cu_create_R_A__global__KernelArgs), cudaMemcpyHostToDevice));

	FirstGradePoint *d_fgps;
	int fgps_size = fgps.size();
	checkCudaErrors(cudaMalloc((void **) &d_fgps, fgps_size * sizeof(FirstGradePoint)));
	checkCudaErrors(cudaMemcpy(d_fgps, &(fgps[0]), fgps_size * sizeof(FirstGradePoint), cudaMemcpyHostToDevice));

	double* d_R_A;
	checkCudaErrors(cudaMalloc((void **) &d_R_A, fgps_size * m_ * sizeof(double)));

	dim3 dimBlock(BLOCK_SIZE * BLOCK_SIZE);
	dim3 dimGrid(EXPAND_TO_FULL_BLOCK_SIZE(fgps_size) / dimBlock.x);
	cudaEvent_t start, stop;
	checkCudaErrors(cudaEventCreate(&start));
	checkCudaErrors(cudaEventCreate(&stop));
	checkCudaErrors(cudaEventRecord(start, NULL));
//	int n_iter = 10;
	int n_iter = 1;
	for (int i = 0; i < n_iter; ++i) {
		_cu_create_R_A__global__<<<dimGrid, dimBlock>>>(d_A_, d_fgps, fgps_size, d_lines, d_args, d_R_A);
	}
	checkCudaErrors(cudaEventRecord(stop, NULL));
	checkCudaErrors(cudaEventSynchronize(stop));
	float cu_time_msec = 0.0F;
	checkCudaErrors(cudaEventElapsedTime(&cu_time_msec, start, stop));
	checkCudaErrors( cudaEventDestroy(start) );
	checkCudaErrors( cudaEventDestroy(stop) );
	float cu_time_sec = cu_time_msec / 1000.0F;
	ReportKernelExecutionTime(__func__, cu_time_sec / n_iter);

	set_d_R_A(d_R_A);

	checkCudaErrors(cudaFree(d_fgps));
	checkCudaErrors(cudaFree(d_args));
	checkCudaErrors(cudaFree(d_lines));
	for (int r = 0; r <= max_lambda_; ++r) {
		checkCudaErrors(cudaFree(h_lines[r].points));
		checkCudaErrors(cudaFree(h_lines[r].size_and_idxs));
	}
	checkCudaErrors(cudaFreeHost(h_lines));
	ReportMethodExecutionTime(__func__, CV_TIME(time));
	ReportDevMemoryUsage(__func__, dev_mem_used_start);
}

void Ciratefi::_cu_R_A(std::vector<FirstGradePoint>& fgps) {
	_cu_R_A__global__(fgps);
}

Mat Ciratefi::dev_to_host_R_A(std::vector<FirstGradePoint>& fgps) {
	double time = (double)getTickCount();
	int sizes[3] = {I2_.cols, I2_.rows, m_};
	Mat RA = Mat_<double>(3, sizes);

	int fgps_size = fgps.size();
	double* R_A_elements;
	checkCudaErrors(cudaMallocHost((void **) &R_A_elements, fgps_size * m_ * sizeof(double)));
	checkCudaErrors(cudaMemcpy(R_A_elements, d_R_A_, fgps_size * m_ * sizeof(double), cudaMemcpyDeviceToHost));

	for (int i = 0; i < fgps_size; ++i) {
		FirstGradePoint point = fgps[i];
		for (int j = 0; j < m_; ++j) {
			RA.at<double>(point.p.x, point.p.y, j) = R_A_elements[i * m_ + j];
		}
	}
	checkCudaErrors(cudaFreeHost(R_A_elements));

	ReportMethodExecutionTime(__func__, CV_TIME(time));
	return RA;
}

Mat Ciratefi::R_A(std::vector<FirstGradePoint>& fgps) {
	double time = (double)getTickCount();
	Mat RA;
	Mat I = A_expanded();

	int sizes[3] = {I2_.cols, I2_.rows, m_};
	RA = Mat_<double>(3, sizes);

	int fgps_size = fgps.size();
	for (int i = 0; i < fgps_size; ++i) {
		int x, y;
		double s;
		x = fgps[i].p.x;
		y = fgps[i].p.y;
		s = bounds_.s.min + fgps[i].scale_idx * bounds_.s.step;
		int lambda = s * bounds_.r.max;

		Point2D center(x + offset_, y + offset_);
		double angle = bounds_.phi.min;
		for (int j = 0; j < m_ ; ++j) {
			RA.at<double>(x, y, j) = Ras(I, center, lambda, angle);

			angle += bounds_.phi.step;
		}
	}

	ReportMethodExecutionTime(__func__, CV_TIME(time));
	return RA;
}

std::vector<SecondGradePoint> Ciratefi::_cu_RasCorr_AQ(Mat& RQ, std::vector<FirstGradePoint>& fgps) {
	double dev_mem_used_start = GetDevMemoryUsedMb();
	double time = (double)getTickCount();
	std::vector<SecondGradePoint> res;

	double* RQj_ = RQ.ptr<double>(0);
	Mat RQjs = Mat_<double>(m_, m_);	// cshift_j(RQ)
	for (int row = 0; row < m_; ++row) {
		for (int col = 0; col < m_; ++col) {
			// Counterclockwise shift
			RQjs.at<double>(row, col) = RQj_[(col + row) % m_];
		}
	}

	DArrayWrap *h_RQjs, *d_RQjs;
	h_RQjs = (DArrayWrap*) malloc(m_ * sizeof(DArrayWrap));
	for (int i = 0; i < m_; ++i) {
		double* d_RQ_elements;
		checkCudaErrors(cudaMalloc((void **) &d_RQ_elements, m_ * sizeof(double)));
		checkCudaErrors(cudaMemcpy(d_RQ_elements, RQjs.ptr<double>(i), m_ * sizeof(double), cudaMemcpyHostToDevice));
		h_RQjs[i].elements = d_RQ_elements;
	}
	checkCudaErrors(cudaMalloc((void **) &d_RQjs, m_ * sizeof(DArrayWrap)));
	checkCudaErrors(cudaMemcpy(d_RQjs, h_RQjs, m_ * sizeof(DArrayWrap), cudaMemcpyHostToDevice));

	_cu_RasCorr_AQ__global__KernelArgs *h_args, *d_args;
	_cu_RasCorr_AQ__global__KernelArgs args(thresholds_, bc_thresholds_, m_);
	h_args = &args;
	checkCudaErrors(cudaMalloc((void **) &d_args, sizeof(_cu_RasCorr_AQ__global__KernelArgs)));
	checkCudaErrors(cudaMemcpy(d_args, h_args, sizeof(_cu_RasCorr_AQ__global__KernelArgs), cudaMemcpyHostToDevice));

	int fgps_size = fgps.size();
	int* d_angle_idxs;
	checkCudaErrors(cudaMalloc((void **) &d_angle_idxs, fgps_size * m_ * sizeof(int)));

	dim3 dimBlock(BLOCK_SIZE * BLOCK_SIZE);
	dim3 dimGrid(EXPAND_TO_FULL_BLOCK_SIZE(fgps_size) / dimBlock.x);
	cudaEvent_t start, stop;
	checkCudaErrors(cudaEventCreate(&start));
	checkCudaErrors(cudaEventCreate(&stop));
	checkCudaErrors(cudaEventRecord(start, NULL));
//	int n_iter = 10;
	int n_iter = 1;
	for (int i = 0; i < n_iter; ++i) {
		_cu_RasCorr_AQ__global__<<<dimGrid, dimBlock>>>(d_RQjs, d_R_A_, fgps_size, d_args, d_angle_idxs);
	}
	checkCudaErrors(cudaEventRecord(stop, NULL));
	checkCudaErrors(cudaEventSynchronize(stop));
	float cu_time_msec = 0.0F;
	checkCudaErrors(cudaEventElapsedTime(&cu_time_msec, start, stop));
	checkCudaErrors( cudaEventDestroy(start) );
	checkCudaErrors( cudaEventDestroy(stop) );
	float cu_time_sec = cu_time_msec / 1000.0F;
	ReportKernelExecutionTime(__func__, cu_time_sec / n_iter);

	int* angle_idxs;
	checkCudaErrors( cudaMallocHost((void **) &angle_idxs, fgps_size * sizeof(int)) );
	checkCudaErrors( cudaMemcpy(angle_idxs, d_angle_idxs, fgps_size * sizeof(int), cudaMemcpyDeviceToHost) );
	for (int i = 0; i < fgps_size; ++i) {
		int angle_idx = angle_idxs[i];
		if (angle_idx >= 0) {
			FirstGradePoint fgp = fgps[i];
			SecondGradePoint sgp(Point2D(fgp.p.x, fgp.p.y), fgp.scale_idx, angle_idx);
			res.push_back(sgp);
		}
	}
	checkCudaErrors(cudaFreeHost(angle_idxs));

	checkCudaErrors(cudaFree(d_angle_idxs));
	checkCudaErrors(cudaFree(d_args));
	checkCudaErrors(cudaFree(d_RQjs));
	for (int i = 0; i < m_; ++i) {
		checkCudaErrors(cudaFree(h_RQjs[i].elements));
	}
	free(h_RQjs);

	ReportMethodExecutionTime(__func__, CV_TIME(time));
	ReportDevMemoryUsage(__func__, dev_mem_used_start);
	return res;
}

std::vector<SecondGradePoint> Ciratefi::RasCorr_AQ(Mat& RQ, Mat& RA, std::vector<FirstGradePoint>& fgps) {
	double time = (double)getTickCount();
	std::vector<SecondGradePoint> res;

	double* RQj_ = RQ.ptr<double>(0);
	Mat RQjs = Mat_<double>(m_, m_);	// cshift_j(RQ)
	for (int row = 0; row < m_; ++row) {
		for (int col = 0; col < m_; ++col) {
			// Counterclockwise shift
			RQjs.at<double>(row, col) = RQj_[(col + row) % m_];
		}
	}

	int fgps_size = fgps.size();
	for (int i = 0; i < fgps_size; ++i) {
		int x, y;
		x = fgps[i].p.x;
		y = fgps[i].p.y;

		int angle_idx = -1;
		double max_corr = 0;
		double* RAxy = RA.ptr<double>(x, y);
		for (int j = 0; j < m_; ++j) {
			double* RQj = RQjs.ptr<double>(j);
			double corr_ = abs(corr(RQj, RAxy, m_));

			if (corr_ > thresholds_.rafi_t2 && corr_ > max_corr) {
				max_corr = corr_;
				angle_idx = j;
			}
		}

		// Check if (x, y) is a second grade point
		if (angle_idx >= 0) {
			SecondGradePoint point;
			point.p.x = x;
			point.p.y = y;
			point.angle_idx = angle_idx;
			point.scale_idx = fgps[i].scale_idx;
			res.push_back(point);
		}
	}

	ReportMethodExecutionTime(__func__, CV_TIME(time));
	return res;
}

CiratefiResult Ciratefi::_cu_Tefi(std::vector<SecondGradePoint>& sgps) {
	double dev_mem_used_start = GetDevMemoryUsedMb();
	double time = (double)getTickCount();
	CiratefiResult res;

	double *h_Q, *h_Q_centered, *d_Q_centered;
	h_Q = Q();
	int w1, h1;
	w1 = I1_.cols;
	h1 = I1_.rows;
	double Q_mean = 0;
	double sum_Q_centered_squared = 0;
	for (int i = 0; i < w1 * h1; ++i) {
		Q_mean += h_Q[i];
	}
	Q_mean /= (w1 * h1);
	checkCudaErrors( cudaMallocHost((void **) &h_Q_centered, h1 * w1 * sizeof(double)) );
	memcpy(h_Q_centered, h_Q, h1 * w1 * sizeof(double));

	for (int i = 0; i < w1 * h1; ++i) {
		double Q_centered = h_Q_centered[i] - Q_mean;
		h_Q_centered[i] = Q_centered;
		sum_Q_centered_squared += Q_centered * Q_centered;
	}

	checkCudaErrors( cudaMalloc((void **) &d_Q_centered, h1 * w1 * sizeof(double)) );
	checkCudaErrors( cudaMemcpy(d_Q_centered, h_Q_centered, h1 * w1 * sizeof(double), cudaMemcpyHostToDevice) );
	checkCudaErrors( cudaFreeHost(h_Q_centered) );

	int sgps_size = sgps.size();
	SecondGradePoint* d_sgps;
	checkCudaErrors( cudaMalloc((void **) &d_sgps, sgps_size * sizeof(SecondGradePoint)) );
	checkCudaErrors( cudaMemcpy(d_sgps, &(sgps[0]), sgps_size * sizeof(SecondGradePoint), cudaMemcpyHostToDevice) );

	_cu_eval_Tefi__global__KernelArgs *h_args, *d_args;
	_cu_eval_Tefi__global__KernelArgs args(thresholds_, bc_thresholds_, bounds_, offset_, w1, h1, Q_mean, sum_Q_centered_squared);
	h_args = &args;
	checkCudaErrors( cudaMalloc((void **) &d_args, sizeof(_cu_eval_Tefi__global__KernelArgs)) );
	checkCudaErrors( cudaMemcpy(d_args, h_args, sizeof(_cu_eval_Tefi__global__KernelArgs), cudaMemcpyHostToDevice) );

	double* d_correlations;
	checkCudaErrors( cudaMalloc((void **) &d_correlations, sgps_size * sizeof(double)) );

	dim3 dimBlock(BLOCK_SIZE * BLOCK_SIZE);
	dim3 dimGrid(EXPAND_TO_FULL_BLOCK_SIZE(sgps_size) / dimBlock.x);
	cudaEvent_t start, stop;
	checkCudaErrors(cudaEventCreate(&start));
	checkCudaErrors(cudaEventCreate(&stop));
	checkCudaErrors(cudaEventRecord(start, NULL));
//	int n_iter = 10;
	int n_iter = 1;
	for (int i = 0; i < n_iter; ++i) {
		_cu_eval_Tefi__global__<<<dimGrid, dimBlock>>>(d_Q_centered, d_A_, d_sgps, sgps_size, d_args, d_correlations);
	}
	checkCudaErrors(cudaEventRecord(stop, NULL));
	checkCudaErrors(cudaEventSynchronize(stop));
	float cu_time_msec = 0.0F;
	checkCudaErrors(cudaEventElapsedTime(&cu_time_msec, start, stop));
	checkCudaErrors( cudaEventDestroy(start) );
	checkCudaErrors( cudaEventDestroy(stop) );
	float cu_time_sec = cu_time_msec / 1000.0F;
	ReportKernelExecutionTime(__func__, cu_time_sec / n_iter);

	double* h_correlations;
	checkCudaErrors( cudaMallocHost((void **) &h_correlations, sgps_size * sizeof(double)) );
	checkCudaErrors( cudaMemcpy(h_correlations, d_correlations, sgps_size * sizeof(double), cudaMemcpyDeviceToHost) );
	SecondGradePoint* h_sgps;
	checkCudaErrors( cudaMallocHost((void **) &h_sgps, sgps_size * sizeof(SecondGradePoint)) );
	checkCudaErrors( cudaMemcpy(h_sgps, d_sgps, sgps_size * sizeof(SecondGradePoint), cudaMemcpyDeviceToHost) );

	double max_corr = 0;
	for (int i = 0; i < sgps_size; ++i) {
		double corr = h_correlations[i];
		if (corr > max_corr) {
			max_corr = corr;
			SecondGradePoint sgp = h_sgps[i];
			res.best_match = sgp;
			if (corr > thresholds_.tefi_t3) {
				res.points.push_back(sgp);
				res.max_corr = corr;
			}
		}
	}

	checkCudaErrors( cudaFreeHost(h_sgps) );
	checkCudaErrors( cudaFreeHost(h_correlations) );
	checkCudaErrors( cudaFree(d_correlations) );
	checkCudaErrors( cudaFree(d_args) );
	checkCudaErrors( cudaFree(d_sgps) );
	checkCudaErrors( cudaFree(d_Q_centered) );

	ReportCiratefiResult(res, max_corr);
//	double best_scale = IdxToScale(res.best_match.scale_idx);
//	double best_angle = IdxToAngle(res.best_match.angle_idx);
//	LOG_INFO(logger, "Maximum correlation: %.5f in (x, y)=(%d, %d) with [s: %.2f (%%%d), phi: %.4f rad (%d deg)]", max_corr,
//			res.best_match.p.x, res.best_match.p.y,
//			best_scale, (int) (best_scale * 100),
//			best_angle, (int) (180 * best_angle / M_PI) );

	ReportMethodExecutionTime(__func__, CV_TIME(time));
	ReportDevMemoryUsage(__func__, dev_mem_used_start);
	return res;
}

CiratefiResult Ciratefi::Tefi(std::vector<SecondGradePoint>& sgps) {
	double time = (double)getTickCount();
	CiratefiResult res;

	Mat I = A_expanded();

	double* Q_elements = Q();
	int Q_size = I1_.rows * I1_.cols;
	double* A_elements = (double*) malloc(sizeof(double) * Q_size);

	int w1, h1, half_w1, half_h1;
	w1 = I1_.cols;
	h1 = I1_.rows;
	half_w1 = w1 / 2;
	half_h1 = h1 / 2;

	double max_corr = 0;
	int sgps_size = sgps.size();
	for (int i = 0; i < sgps_size; ++i) {
		Point2D center(sgps[i].p.x, sgps[i].p.y);

		double s_best, phi_best;
		s_best = bounds_.s.min + sgps[i].scale_idx * bounds_.s.step;
		phi_best = bounds_.phi.min + sgps[i].angle_idx * bounds_.phi.step;

		for (double s = s_best - bounds_.s.step; s <= s_best + bounds_.s.step; s += bounds_.s.step) {
			for (double phi = phi_best - bounds_.phi.step; phi <= phi_best + bounds_.phi.step; phi += bounds_.phi.step) {
				double a11, a12, a21, a22, a13, a23;
				// -phi Counterclockwise
				a11 = s * cos(-phi); a12 = -s * sin(-phi);
				a21 = s * sin(-phi); a22 = s * cos(-phi);
				a13 = center.x + offset_; a23 = center.y + offset_;

				double* A_ptr = A_elements;
				for (int y = -half_h1; y < h1 - half_h1; ++y) {
					for (int x = -half_w1; x < w1 - half_w1; ++x) {
						int target_x, target_y;
						target_x = a11 * x + a12 * y + a13;
						target_y = a21 * x + a22 * y + a23;

						*A_ptr = I.at<double>(target_y, target_x);
						++A_ptr;
					}
				}
				double corr_ = abs(corr(Q_elements, A_elements, Q_size));

				if (corr_ > max_corr) {
					max_corr = corr_;
					SecondGradePoint sgp = sgps[i];
					sgp.scale_idx = ScaleToIdx(s);
					sgp.angle_idx = AngleToIdx(phi);
					res.best_match = sgp;

					if (corr_ > thresholds_.tefi_t3) {
						res.points.push_back(sgp);
						res.max_corr = corr_;
					}
				}

				A_ptr = A_elements;
			}
		}
	}
	free(A_elements);

	ReportCiratefiResult(res, max_corr);
//	double best_scale = IdxToScale(res.best_match.scale_idx);
//	double best_angle = IdxToAngle(res.best_match.angle_idx);
//	LOG_INFO(logger, "Maximum correlation: %.5f in (x, y)=(%d, %d) with [s: %.2f (%%%d), phi: %.4f rad (%d deg)]", max_corr,
//			res.best_match.p.x, res.best_match.p.y,
//			best_scale, (int) (best_scale * 100),
//			best_angle, (int) (180 * best_angle / M_PI) );

	ReportMethodExecutionTime(__func__, CV_TIME(time));
	return res;
}

CiratefiResult Ciratefi::RunCpu() {
	CiratefiResult ciratefi_result;
	LOG_INFO(logger, "Starting CIRATEFI...");
	LOG_INFO(logger, "Executing Cifi...");
	double ciratefi_time = (double)getTickCount();
	double cifi_time = (double)getTickCount();
	Mat CQ = C_Q();
	Mat CA = C_A();
	std::vector<FirstGradePoint> fgps = CisCorr_AQ(CQ, CA);
	LOG_INFO(logger, "Cifi completed with time: %.4fsec. First grade candidates: %zu", CV_TIME(cifi_time), fgps.size());
	if (fgps.size() == 0) {
		LOG_ERROR(logger, "Exiting...");
		return ciratefi_result;
	}

	LOG_INFO(logger, "Executing Rafi...");
	double rafi_time = (double)getTickCount();
	Mat RQ = R_Q();
	Mat RA = R_A(fgps);
	std::vector<SecondGradePoint> sgps = RasCorr_AQ(RQ, RA, fgps);
	LOG_INFO(logger, "Rafi completed with time: %.4fsec. Second grade candidates: %zu", CV_TIME(rafi_time), sgps.size());
	if (sgps.size() == 0) {
		LOG_ERROR(logger, "Exiting...");
		return ciratefi_result;
	}

	LOG_INFO(logger, "Executing Tefi...");
	double tefi_time = (double)getTickCount();
	ciratefi_result = Tefi(sgps);
	LOG_INFO(logger, "Tefi completed with time: %.4fsec", CV_TIME(tefi_time));
	LOG_INFO(logger, "CIRATEFI completed with time: %.4fsec", CV_TIME(ciratefi_time));

	if (ciratefi_result.points.size() == 0) {
		LOG_ERROR(logger, "No best match with given thresholds!");
	}

	return ciratefi_result;
}

CiratefiResult Ciratefi::RunGpu() {
	CiratefiResult ciratefi_result;
	LOG_INFO(logger, "Starting CIRATEFI...");
	ReportDevMemoryUsage(__func__, GetDevMemoryUsedMb());
	// CIFI
	LOG_INFO(logger, "Executing Cifi...");
	double ciratefi_time = (double)getTickCount();
	double cifi_time = (double)getTickCount();
	Mat CQ = C_Q();
	_cu_C_A();
	Mat CA = dev_to_host_C_A();
	std::vector<FirstGradePoint> fgps = _cu_CisCorr_AQ(CQ);
	LOG_INFO(logger, "Cifi completed with time: %.4fsec. First grade candidates: %zu", CV_TIME(cifi_time), fgps.size());
	if (fgps.size() == 0) {
		LOG_ERROR(logger, "Exiting...");
		return ciratefi_result;
	}

	LOG_INFO(logger, "Executing Rafi...");
	double rafi_time = (double)getTickCount();
	Mat RQ = R_Q();
	_cu_R_A(fgps);
	Mat RA = dev_to_host_R_A(fgps);
//	Mat RA1 = ciratefi.R_A(CisCorr_AQ);
//	for (int i = 0; i < 10; ++i) {
//		Point2D p = CisCorr_AQ[i].p;
//		for (int j = 0; j < ciratefi.m(); j++) {
//			printf("[%d: %f %f] ", j, RA.at<double>(p.x, p.y, j), RA1.at<double>(p.x, p.y, j));
//			if (j >= 11 && j <=14) {
//				printf("( %f ) ", IdxToAngle(j, bounds_));
//			}
//		}
//		printf("\n");
//	}
//	printf("\n");
	std::vector<SecondGradePoint> sgps = _cu_RasCorr_AQ(RQ, fgps);
	LOG_INFO(logger, "Rafi completed with time: %.4fsec. Second grade candidates: %zu", CV_TIME(rafi_time), sgps.size());
	if (sgps.size() == 0) {
		LOG_ERROR(logger, "Exiting...");
		return ciratefi_result;
	}

	LOG_INFO(logger, "Executing Tefi...");
	double tefi_time = (double)getTickCount();
	ciratefi_result = _cu_Tefi(sgps);
	LOG_INFO(logger, "Tefi completed with time: %.4fsec", CV_TIME(tefi_time));
	LOG_INFO(logger, "CIRATEFI completed with time: %.4fsec", CV_TIME(ciratefi_time));

	if (ciratefi_result.points.size() == 0) {
		LOG_ERROR(logger, "No best match with given thresholds!");
	}

	return ciratefi_result;
}

void Ciratefi::CreateCircles() {
	double time = (double)getTickCount();
	if (!circles_.empty())
		circles_.clear();

	for (int r = bounds_.r.min; r <= bounds_.r.max; r += bounds_.r.step) {
		std::vector<Point2D> circle = recognition::MidpointCircle(r, bounds_.angle_incr);
		circles_.push_back(circle);
		int size = circle.size();
		if (max_num_of_circle_points_ < size) {
			max_num_of_circle_points_ = size;
			circle_radius_with_max_points_ = r;
		}

		total_num_of_circle_points_ += size;
	}
	ReportMethodExecutionTime(__func__, CV_TIME(time));
}

void Ciratefi::CreateLines() {
	double time = (double)getTickCount();
	if (!lines_.empty())
		lines_.clear();

	int r_i = 0;
	for (int r = 0; r <= max_lambda_; ++r) {
		lines_.push_back(std::vector<std::vector<Point2D> >());
		for (double phi = bounds_.phi.min; phi <= bounds_.phi.max; phi += bounds_.phi.step) {
			std::vector<Point2D> line = recognition::MidpointLine(0, 0, cos(phi) * r, sin(phi) * r, bounds_.line_incr);
			int size = line.size();
			if (max_num_of_line_points_ < size) {
				max_num_of_line_points_ = size;
			}
			lines_[r_i].push_back(line);
		}
		++r_i;
	}
	ReportMethodExecutionTime(__func__, CV_TIME(time));
}

Mat& Ciratefi::A_expanded() {
	if (A_expanded_.empty()) {
		double time = (double)getTickCount();

		Mat I = Mat_<double>(I2_.rows + 2 * offset_, I2_.cols + 2 * offset_);
		I.setTo(Scalar(out_of_boundaries_value_));

		for (int row = 0; row < I2_.rows; ++row) {
			double* I2_ptr = I2_.ptr<double>(row);
			double* I_ptr = I.ptr<double>(row + offset_);
			for (int col = 0; col < I2_.cols; ++col) {
				I_ptr[col + offset_] = I2_ptr[col];
			}
		}

		A_expanded_ = I;
		ReportMethodExecutionTime(__func__, CV_TIME(time));
	}

	return A_expanded_;
}

Matrix2d Ciratefi::A_expanded_and_alligned_by_block_size() {
	if (A_expanded_and_alligned_by_block_size_.width == 0) {
		double time = (double)getTickCount();

		Matrix2d I(EXPAND_TO_BLOCK_SIZE(I2_.rows) + 2 * offset_, EXPAND_TO_BLOCK_SIZE(I2_.cols) + 2 * offset_);
		checkCudaErrors( cudaMallocHost((void**) &I.elements, I.height * I.width * sizeof(double)) );

		// Initialize elements with boundary value
		// TODO: init only offset and block alignment elements
		for (int i = 0; i < I.height * I.width; ++i) {
			I.elements[i] = out_of_boundaries_value_;
		}

		for (int row = 0; row < I2_.rows; ++row) {
			double* I2_ptr = I2_.ptr<double>(row);
			int row_offset = (row + offset_) * I.width;
			for (int col = 0; col < I2_.cols; ++col) {
				I.elements[row_offset + col + offset_] = I2_ptr[col];
			}
		}

		A_expanded_and_alligned_by_block_size_ = I;
		ReportMethodExecutionTime(__func__, CV_TIME(time));
	}

	return A_expanded_and_alligned_by_block_size_;
}

double Ciratefi::Cis(Mat& I, Point2D center, int radius) {
	double res = 0;

	std::vector<Point2D> circle = circles_[RadiusToIdx(radius)];
	int num_inner_points = 0;
	for (int i = 0; i < circle.size(); ++i) {
		int y = circle[i].y + center.y;
		int x = circle[i].x + center.x;

		if (x < 0 || y < 0 || x >= I.cols || y >= I.rows)
			continue;

		res += I.at<double>(circle[i].y + center.y, circle[i].x + center.x);
		++num_inner_points;
	}
	res = res / (num_inner_points == 0 ? 1 : num_inner_points);

	return res;
}

double Ciratefi::Ras(Mat& I, Point2D start, int radius, double angle) {
	double res = 0;

	int r_i = radius;
	int phi_i = AngleToIdx(angle);
	std::vector<Point2D> line = lines_[r_i][phi_i];

	int npoints = line.size();
	for (int i = 0; i < npoints; ++i) {
		res += I.at<double>(start.y + line[i].y, start.x + line[i].x);
	}

	return res / npoints;
}

double Ciratefi::corr(double* x, double* y, int npoints) {
	return recognition::corr(x, y, npoints, bc_thresholds_);
}

double* Ciratefi::Q() {
	if (Q_ == NULL) {
		Q_ = (double*) malloc(I1_.rows * I1_.cols * sizeof(double));
		double* ptr = Q_;
		for (int row = 0; row < I1_.rows; ++row) {
			double* I1_ptr = I1_.ptr<double>(row);
			for (int col = 0; col < I1_.cols; ++col) {
				*ptr = I1_ptr[col];
				++ptr;
			}
		}
	}

	return Q_;
}

void Ciratefi::make_images_continuous() {
	if (!I1_.isContinuous() || !I2_.isContinuous()) {
		double time = (double)getTickCount();
		if (!I1_.isContinuous()) {
			I1_ = I1_.clone();
			LOG_INFO(logger, "I1 is not continuous. Cloning...");
		}

		if (!I2_.isContinuous()) {
			I2_ = I2_.clone();
			LOG_INFO(logger, "I2 is not continuous. Cloning...");
		}
		ReportMethodExecutionTime(__func__, CV_TIME(time));
	}
}

}	// namespace gpu
}	// namespace recognition
