#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>

#include <log4cxx/logger.h>
#include <base/logging.h>
#include <base/types.cuh>
#include <base/methods.cuh>
#include <gpu/cu_methods.cuh>
#include <gpu/fast_match_helper.cuh>

namespace recognition {

__global__ void _cu_create_configs__global__(_cu_create_configs__global__KernelArgs *args, uint nconfigs, double* configs) {
	uint tid = blockIdx.x * blockDim.x + threadIdx.x;

	if (tid >= nconfigs)
		return;

	double* config = &(configs[tid * 6]);

	_cu_create_configs__global__KernelArgs args_ = *args;
	FastMatchBoundaries bounds = args_.bounds;
	uint ntx_steps, nty_steps, nr_steps, nr2_steps, ns_steps;
	ntx_steps = args_.ntx_steps;
	nty_steps = args_.nty_steps;
	nr_steps = args_.nr_steps;
	nr2_steps = args_.nr2_steps;
	ns_steps = args_.ns_steps;

	int tx_idx, ty_idx, r1_idx, r2_idx, sx_idx, sy_idx;
	int tx_tmp, ty_tmp, r1_tmp, r2_tmp, sx_tmp, sy_tmp;

	sy_tmp = tid;
	sy_idx = sy_tmp % ns_steps;
	sx_tmp = (sy_tmp - sy_idx) / ns_steps;
	sx_idx = sx_tmp % ns_steps;
	r2_tmp = (sx_tmp - sx_idx) / ns_steps;
	r2_idx = r2_tmp % nr2_steps;
	r1_tmp = (r2_tmp - r2_idx) / nr2_steps;
	r1_idx = r1_tmp % nr_steps;
	ty_tmp = (r1_tmp - r1_idx) / nr_steps;
	ty_idx = ty_tmp % nty_steps;
	tx_tmp = (ty_tmp - ty_idx) / nty_steps;
	tx_idx = tx_tmp % ntx_steps;

	double tx, ty, r1, sx, sy, r2;
	tx = bounds.tx.min + tx_idx * bounds.tx.step;
	ty = bounds.ty.min + ty_idx * bounds.ty.step;
	r1 = bounds.r.min + r1_idx * bounds.r.step;
	r2 = bounds.r.min + r2_idx * bounds.r.step;
	sx = bounds.s.min + sx_idx * bounds.s.step;
	sy = bounds.s.min + sy_idx * bounds.s.step;

	config[0] = tx;
	config[1] = ty;
	config[2] = r2;
	config[3] = sx;
	config[4] = sy;
	config[5] = r1;
}

__global__ void _cu_configs_to_affine__global__(double* configs, double* affines, uint nconfigs,
		_cu_configs_to_affine__global__KernelArgs *args, bool* filter) {
	uint tid = blockIdx.x * blockDim.x + threadIdx.x;
	if (tid >= nconfigs)
		return;

	double* config = &(configs[tid * 6]);
	double* affine = &(affines[tid * 6]);

	_cu_configs_to_affine__global__KernelArgs args_ = *args;
	int w1, h1, w2, h2;
	int r1x, r1y, r2x, r2y;
	w1 = args_.w1; h1 = args_.h1; w2 = args_.w2; h2 = args_.h2;
	r1x = args_.r1x; r1y = args_.r1y; r2x = args_.r2x; r2y = args_.r2y;


	double tx, ty, r1, sx, sy, r2;
	tx = config[0];
	ty = config[1];
	r2 = config[2];
	sx = config[3];
	sy = config[4];
	r1 = config[5];

	double a11, a12, a13, a21, a22, a23;
	affine[0] = a11 = sx * cos(r1) * cos(r2) - sy * sin(r1) * sin(r2);
	affine[1] = a12 = - sx * cos(r1) * sin(r2) - sy * cos(r2) * sin(r1);
	affine[2] = a13 = tx;
	affine[3] = a21 = sx * cos(r2) * sin(r1) + sy * cos(r1) * sin(r2);
	affine[4] = a22 = sy * cos(r1) * cos(r2) - sx * sin(r1) * sin(r2);
	affine[5] = a23 = ty;

	double c1x, c1y, c2x, c2y, c3x, c3y, c4x, c4y;
	c1x = a11 * (1 - (r1x + 1)) + a12 * (1 - (r1y + 1)) + (r2x + 1) + a13;
	c1y = a21 * (1 - (r1x + 1)) + a22 * (1 - (r1y + 1)) + (r2y + 1) + a23;
	c2x = a11 * (w1 - (r1x + 1)) + a12 * (1 - (r1y + 1)) + (r2x + 1) + a13;
	c2y = a21 * (w1 - (r1x + 1)) + a22 * (1 - (r1y + 1)) + (r2y + 1) + a23;
	c3x = a11 * (w1 - (r1x + 1)) + a12 * (h1 - (r1y + 1)) + (r2x + 1) + a13;
	c3y = a21 * (w1 - (r1x + 1)) + a22 * (h1 - (r1y + 1)) + (r2y + 1) + a23;
	c4x = a11 * (1 - (r1x + 1)) + a12 * (h1 - (r1y + 1)) + (r2x + 1) + a13;
	c4y = a21 * (1 - (r1x + 1)) + a22 * (h1 - (r1y + 1)) + (r2y + 1) + a23;

	// allow to exceed boundary by at most q
	int q = 10;
	int k = (c1x > -q) && (c1x < w2 + q) && (c1y > -q) && (c1y < h2 + q) &&
			(c2x > -q) && (c2x < w2 + q) && (c2y > -q) && (c2y < h2 + q) &&
			(c3x > -q) && (c3x < w2 + q) && (c3y > -q) && (c3y < h2 - q) &&	// FIXME: h2 - q --- BUG? should be h2 + q (bottom vertical boundary)
			(c4x > -q) && (c4x < w2 + q) && (c4y > -q) && (c4y < h2 + q);

	filter[tid] = k > 0;
}


__global__ void _cu_filter_configs_and_affines__global__(uint nconfigs, double* configs, double* affines, bool* filter, uint* idxs,
		double* out_configs, double* out_affines) {
	uint tid = blockIdx.x * blockDim.x + threadIdx.x;

	if (tid >= nconfigs)
		return;

	if (filter[tid]) {
		for (uint i = 0; i < 6; ++i) {
			out_configs[idxs[tid] * 6 + i] = configs[tid * 6 + i];
			out_affines[idxs[tid] * 6 + i] = affines[tid * 6 + i];
		}
	}
}

__global__ void _cu_eval_distances__global__(double* affines, uint naffines, int* xs_centered, int* ys_centered, double* vals_I1, uint npoints,
		double *I, _cu_eval_distances__global__KernelArgs *args, double *distances) {
	uint tid = blockIdx.x * blockDim.x + threadIdx.x;

	if (tid >= naffines)
		return;

	double corr = 0;

	uint w2 = args->w2;
	uint h2 = args->h2;
	uint r2x = args->r2x;
	uint r2y = args->r2y;
	bool use_photometric = args->use_photometric;

	double a11, a12, a13, a21, a22, a23;
	a11 = affines[6 * tid];
	a12 = affines[6 * tid + 1];
	a13 = affines[6 * tid + 2];
	a21 = affines[6 * tid + 3];
	a22 = affines[6 * tid + 4];
	a23 = affines[6 * tid + 5];

	int target_x, target_y, target_idx;

	double tmp1 = (r2x + 1) + a13 + 0.5;
	double tmp2 = (r2y + 1) + a23 + 0.5 + 2 * h2;
	if (!use_photometric) {
		for (uint j = 0; j < npoints ; j++) {
			target_x = a11 * xs_centered[j] + a12 * ys_centered[j] + tmp1; // includes rounding
			target_y = a21 * xs_centered[j] + a22 * ys_centered[j] + tmp2; // includes rounding
			target_idx = (target_y - 1) * w2 + target_x - 1; // -1 is for c
			corr += fabs(vals_I1[j] - I[target_idx]);
		}
	} else {
		/* Store target pixel locations - x and y */
		double sumXiYi = 0;
		double sumXi = 0, sumYi = 0;
		double sumXiSqrd = 0, sumYiSqrd = 0;
		double Xi, Yi;

		for (uint j = 0; j < npoints ; j++) {
			target_x = a11 * xs_centered[j] + a12 * ys_centered[j] + tmp1; // includes rounding
			target_y = a21 * xs_centered[j] + a22 * ys_centered[j] + tmp2; // includes rounding
			target_idx = (target_y - 1) * w2 + (target_x - 1); // -1 for c
			Xi = vals_I1[j] ;
			Yi = I[target_idx];

			sumXi += Xi;
			sumYi += Yi;
			sumXiSqrd += (Xi * Xi);
			sumYiSqrd += (Yi * Yi);
		}
		// new score, based on normalizing mean and std of each of the signals
		double epsilon = 0.0000001;
		double meanX, meanY;
		meanX = sumXi / npoints;
		meanY = sumYi / npoints;

		double sigX, sigY;
		sigX = sqrt((sumXiSqrd - (sumXi * sumXi) / npoints) / npoints) + epsilon;
		sigY = sqrt((sumYiSqrd - (sumYi * sumYi) / npoints) / npoints) + epsilon;

		double sigXoversigY = sigX / sigY;

		// Variable that stores a sum used repeatadly in the computation: -meanX+sigXoversigY*meanY
		double faster = -meanX + sigXoversigY * meanY;

		// Calc again, since it's faster than dynamically allocate memory inside kernel to store temp result
		for (uint j = 0; j < npoints ; j++) {
			target_x = a11 * xs_centered[j] + a12 * ys_centered[j] + tmp1; // includes rounding
			target_y = a21 * xs_centered[j] + a22 * ys_centered[j] + tmp2; // includes rounding
			target_idx = (target_y - 1) * w2 + (target_x - 1); // -1 for c
			Xi = vals_I1[j] ;
			Yi = I[target_idx];

			corr += fabs(Xi - sigXoversigY * Yi + faster);
		}
	}

	distances[tid] = corr / npoints;
}

__global__ void _cu_create_S_random__global__(double* Q, uint nQ, uint extension_scale, uint* rand_vec,
		_cu_create_S_random__global__KernelArgs *args, double* S) {
	uint tid = blockIdx.x * blockDim.x + threadIdx.x;

	if (tid >= nQ * extension_scale)
		return;

	_cu_create_S_random__global__KernelArgs args_ = *args;
	double halfstep_tx, halfstep_ty, halfstep_r, halfstep_s;
	halfstep_tx = args_.halfstep_tx;
	halfstep_ty = args_.halfstep_ty;
	halfstep_r = args_.halfstep_r;
	halfstep_s = args_.halfstep_s;

	uint config_idx = tid / extension_scale;
	double* config = &(Q[config_idx * 6]);
	double* out_config = &(S[tid * 6]);

	int m[6];
	gpu::idx_to_vector_dim6(rand_vec[tid] % 729, m[0], m[1], m[2], m[3], m[4], m[5]);
	for (uint i = 0; i < 6; ++i)
		m[i] -= 1;

	out_config[0] = config[0] + halfstep_tx * m[0];
	out_config[1] = config[1] + halfstep_ty * m[1];
	out_config[2] = config[2] + halfstep_r * m[2];
	out_config[3] = config[3] + halfstep_s * m[3];
	out_config[4] = config[4] + halfstep_s * m[4];
	out_config[5] = config[5] + halfstep_r * m[5];
}

template < typename T >
void FillRangeWithStep(T from, T step, uint size, std::vector<T>& v) {
	if (step <= 0) {
		fprintf(stderr, "%s: step=%.4f <= 0\n", __func__, (double) step);
		exit(1);
	}

	T val = from;
	for (uint i = 0; i < size; ++i) {
		v.push_back(val);
		val += step;
	}
}

template void FillRangeWithStep(double from, double step, uint size, std::vector<double>& v);

template < typename T >
// to exclusive
T mean(std::vector<T>& v, uint from, uint to) {
	assert(from >= 0 && to <= v.size());
	T res = 0;
	for (uint i = from; i < to; ++i) {
		res += v[i];
	}

	return res / (to - from);
}

template double mean(std::vector<double>& v, uint from, uint to);

void FillWithRandoms(uint size, int max_rand, int from, std::vector<int>& v) {
	from = from > 0 ? from : 0;
	max_rand = max_rand + 1 - from;
	for (int i = 0; i < size; ++i) {
		int val = rand() % max_rand + from;
		v.push_back(val);
	}
}

void fill_xsys(uint w, uint h, std::vector<int>& xs, std::vector<int>& ys) {
	uint mod = 5;
	for (uint y = 0; y < h; ++y)
		if (y % mod == 0)
		for (uint x = 0; x < w; ++x)
			if (x % mod == 0) {
				ys.push_back(y + 1);
				xs.push_back(x + 1);
			}
}

void GetRandomPoints(uint npoints, uint w, uint h, std::vector<int>& xs, std::vector<int>& ys) {
	if (npoints >= (w * h) / 4) {
		uint mod = (w * h) % npoints;
		for (uint y = 0; y < h; ++y)
			if (y % mod == 0)
			for (uint x = 0; x < w; ++x)
				if (x % mod == 0) {
					ys.push_back(y + 1);
					xs.push_back(x + 1);
				}
	} else {
		uchar* pts2d = (uchar*) malloc(w * h * sizeof(uchar));
		memset(pts2d, 0, w * h * sizeof(uchar));
		while (npoints > 0) {
			uint y = rand() % h;
			uint x = rand() % w;
			uint idx = y * w + x;
			if (!pts2d[idx]) {
				pts2d[idx] = 1;
				ys.push_back(y + 1);
				xs.push_back(x + 1);
				--npoints;
			}
		}
		free(pts2d);
	}
}

void ReportNumberOfCreatedConfigs(log4cxx::LoggerPtr logger, uint level, uint nconfigs) {
	static const char level_preffix[] = "LEVEL ";
	static const char number_of_configs_preffix[] = ": number of configs";
	size_t level_preffix_len = sizeof(level_preffix) - 1;
	size_t number_of_configs_preffix_len = sizeof(number_of_configs_preffix) - 1;
	size_t padder_len = sizeof(PADDER) - 1;
	size_t level_len = 2;
	size_t pad_amount = padder_len - level_preffix_len - number_of_configs_preffix_len - level_len;
	if ((int) pad_amount > 0) {
		LOG_INFO(logger, "%s%2d%s%s[%d]", level_preffix, level, number_of_configs_preffix,
				PADDER + level_preffix_len + number_of_configs_preffix_len + level_len, nconfigs);
	} else {
		LOG_INFO(logger, "%s%2d%s[%d])", level_preffix, level, number_of_configs_preffix, nconfigs);
	}
}

void ReportNumberOfCreatedAffines(log4cxx::LoggerPtr logger, uint naffines, double time) {
	static const char affines_created_preffix[] = " affines created in";
	size_t naffines_preffix_len = 8 + 2;
	size_t affines_created_preffix_len = sizeof(affines_created_preffix) - 1;
	size_t padder_len = sizeof(PADDER) - 1;
	size_t pad_amount = padder_len - naffines_preffix_len - affines_created_preffix_len;
	if ((int) pad_amount > 0) {
		LOG_INFO(logger, "[%8d]%s%s[%.4f sec]", naffines, affines_created_preffix,
				PADDER + naffines_preffix_len + affines_created_preffix_len, time);
	} else {
		LOG_INFO(logger, "[%8d]%s[%.4f sec]", naffines, affines_created_preffix, time);
	}
}

void ReportAffinesEvaluationTime(log4cxx::LoggerPtr logger, double time) {
	static const char affines_evaluated_preffix[] = "Creating and evaluating affines time:";
	size_t affines_evaluated_preffix_len = sizeof(affines_evaluated_preffix) - 1;
	size_t padder_len = sizeof(PADDER) - 1;
	size_t pad_amount = padder_len - affines_evaluated_preffix_len;
	if ((int) pad_amount > 0) {
		LOG_INFO(logger, "%s%s[%.4f sec]", affines_evaluated_preffix, PADDER + affines_evaluated_preffix_len, time);
	} else {
		LOG_INFO(logger, "%s[%.4f sec]", affines_evaluated_preffix, time);
	}
}

void ReportLevelTime(log4cxx::LoggerPtr logger, uint level, double time) {
	static const char level_preffix[] = "LEVEL ";
	static const char time_preffix[] = ": time";
	size_t level_preffix_len = sizeof(level_preffix) - 1;
	size_t time_preffix_len = sizeof(time_preffix) - 1;
	size_t padder_len = sizeof(PADDER) - 1;
	size_t level_len = 2;
	size_t pad_amount = padder_len - level_preffix_len - time_preffix_len - level_len;
	if ((int) pad_amount > 0) {
		LOG_INFO(logger, "%s%2d%s%s[%.4f]", level_preffix, level, time_preffix,
				PADDER + level_preffix_len + time_preffix_len + level_len, time);
	} else {
		LOG_INFO(logger, "%s%2d%s[%.4f])", level_preffix, level, time_preffix, time);
	}
}

void ReportFastMatchTime(log4cxx::LoggerPtr logger, double time) {
	static const char fast_match_completed_preffix[] = "FAsT-Match completed in";
	size_t fast_match_completed_preffix_len = sizeof(fast_match_completed_preffix) - 1;
	size_t padder_len = sizeof(PADDER) - 1;
	size_t pad_amount = padder_len - fast_match_completed_preffix_len;
	if ((int) pad_amount > 0) {
		LOG_INFO(logger, "%s%s[%.4f sec]", fast_match_completed_preffix, PADDER + fast_match_completed_preffix_len, time);
	} else {
		LOG_INFO(logger, "%s[%.4f sec]", fast_match_completed_preffix, time);
	}
}

void ReportFastMatchResult(log4cxx::LoggerPtr logger, FastMatchResult res) {
	LOG_INFO(logger, "Best affine transformation MATRIX (extended):");
	for (int i = 0; i < 3; ++i)
		LOG_INFO(logger, "[%10.4f, %10.4f, %10.4f]", res.best_affine[i][0], res.best_affine[i][1], res.best_affine[i][2]);
	LOG_INFO(logger, "SAD-error: [%.4f]", res.error);
}

}	// namespace recognition
