#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>

#include <THC/THC.h>
#include <THC/THCDeviceUtils.cuh>

#include <vector>
#include <iostream>

#define CUDA_CHECK(condition) \
  /* Code block avoids redefinition of cudaError_t error */ \
  do { \
    cudaError_t error = condition; \
    if (error != cudaSuccess) { \
      std::cout << cudaGetErrorString(error) << std::endl; \
	    } \
    } while (0)

#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long) * 8;

__device__ inline float trangle_area(float * a, float * b, float * c) {
	return ((a[0] - c[0]) * (b[1] - c[1]) - (a[1] - c[1]) * (b[0] - c[0])) / 2.0;
}

__device__ inline float area(float * int_pts, int num_of_inter) {

	float area = 0.0;
	for (int i = 0; i < num_of_inter - 2; i++) {
		area += fabs(trangle_area(int_pts, int_pts + 2 * i + 2, int_pts + 2 * i + 4));
	}
	return area;
}

__device__ inline void reorder_pts(float * int_pts, int num_of_inter) {



	if (num_of_inter > 0) {

		float center[2];

		center[0] = 0.0;
		center[1] = 0.0;

		for (int i = 0; i < num_of_inter; i++) {
			center[0] += int_pts[2 * i];
			center[1] += int_pts[2 * i + 1];
		}
		center[0] /= num_of_inter;
		center[1] /= num_of_inter;

		float vs[16];
		float v[2];
		float d;
		for (int i = 0; i < num_of_inter; i++) {
			v[0] = int_pts[2 * i] - center[0];
			v[1] = int_pts[2 * i + 1] - center[1];
			d = sqrt(v[0] * v[0] + v[1] * v[1]);
			v[0] = v[0] / d;
			v[1] = v[1] / d;
			if (v[1] < 0) {
				v[0] = -2 - v[0];
			}
			vs[i] = v[0];
		}

		float temp, tx, ty;
		int j;
		for (int i = 1; i<num_of_inter; ++i){
			if (vs[i - 1]>vs[i]){
				temp = vs[i];
				tx = int_pts[2 * i];
				ty = int_pts[2 * i + 1];
				j = i;
				while (j>0 && vs[j - 1]>temp){
					vs[j] = vs[j - 1];
					int_pts[j * 2] = int_pts[j * 2 - 2];
					int_pts[j * 2 + 1] = int_pts[j * 2 - 1];
					j--;
				}
				vs[j] = temp;
				int_pts[j * 2] = tx;
				int_pts[j * 2 + 1] = ty;
			}
		}
	}

}
__device__ inline bool inter2line(float * pts1, float *pts2, int i, int j, float * temp_pts) {

	float a[2];
	float b[2];
	float c[2];
	float d[2];

	float area_abc, area_abd, area_cda, area_cdb;

	a[0] = pts1[2 * i];
	a[1] = pts1[2 * i + 1];

	b[0] = pts1[2 * ((i + 1) % 4)];
	b[1] = pts1[2 * ((i + 1) % 4) + 1];

	c[0] = pts2[2 * j];
	c[1] = pts2[2 * j + 1];

	d[0] = pts2[2 * ((j + 1) % 4)];
	d[1] = pts2[2 * ((j + 1) % 4) + 1];

	area_abc = trangle_area(a, b, c);
	area_abd = trangle_area(a, b, d);

	if (area_abc * area_abd >= 0) {
		return false;
	}

	area_cda = trangle_area(c, d, a);
	area_cdb = area_cda + area_abc - area_abd;

	if (area_cda * area_cdb >= 0) {
		return false;
	}
	float t = area_cda / (area_abd - area_abc);

	float dx = t * (b[0] - a[0]);
	float dy = t * (b[1] - a[1]);
	temp_pts[0] = a[0] + dx;
	temp_pts[1] = a[1] + dy;

	return true;
}

__device__ inline bool in_rect(float pt_x, float pt_y, float * pts) {

	float ab[2];
	float ad[2];
	float ap[2];

	float abab;
	float abap;
	float adad;
	float adap;

	ab[0] = pts[2] - pts[0];
	ab[1] = pts[3] - pts[1];

	ad[0] = pts[6] - pts[0];
	ad[1] = pts[7] - pts[1];

	ap[0] = pt_x - pts[0];
	ap[1] = pt_y - pts[1];

	abab = ab[0] * ab[0] + ab[1] * ab[1];
	abap = ab[0] * ap[0] + ab[1] * ap[1];
	adad = ad[0] * ad[0] + ad[1] * ad[1];
	adap = ad[0] * ap[0] + ad[1] * ap[1];

	return abab >= abap and abap >= 0 and adad >= adap and adap >= 0;
}

__device__ inline int inter_pts(float * pts1, float * pts2, float * int_pts) {

	int num_of_inter = 0;

	for (int i = 0; i < 4; i++) {
		if (in_rect(pts1[2 * i], pts1[2 * i + 1], pts2)) {
			int_pts[num_of_inter * 2] = pts1[2 * i];
			int_pts[num_of_inter * 2 + 1] = pts1[2 * i + 1];
			num_of_inter++;
		}
		if (in_rect(pts2[2 * i], pts2[2 * i + 1], pts1)) {
			int_pts[num_of_inter * 2] = pts2[2 * i];
			int_pts[num_of_inter * 2 + 1] = pts2[2 * i + 1];
			num_of_inter++;
		}
	}

	float temp_pts[2];

	for (int i = 0; i < 4; i++) {
		for (int j = 0; j < 4; j++) {
			bool has_pts = inter2line(pts1, pts2, i, j, temp_pts);
			if (has_pts) {
				int_pts[num_of_inter * 2] = temp_pts[0];
				int_pts[num_of_inter * 2 + 1] = temp_pts[1];
				num_of_inter++;
			}
		}
	}


	return num_of_inter;
}

__device__ inline void convert_region(float * pts, float const * const region) {

	float angle = region[4];
	//float a_cos = cos(angle / 180.0*3.1415926535);
	//float a_sin = sin(angle / 180.0*3.1415926535);
	float a_cos = cos(angle);
	float a_sin = sin(angle);

	float ctr_x = region[0];
	float ctr_y = region[1];

	float w = region[2];
	float h = region[3];

	float pts_x[4];
	float pts_y[4];

	pts_x[0] = -w / 2;
	pts_x[1] = w / 2;
	pts_x[2] = w / 2;
	pts_x[3] = -w / 2;

	pts_y[0] = -h / 2;
	pts_y[1] = -h / 2;
	pts_y[2] = h / 2;
	pts_y[3] = h / 2;

	for (int i = 0; i < 4; i++) {
		pts[7 - 2 * i - 1] = a_cos * pts_x[i] - a_sin * pts_y[i] + ctr_x;
		pts[7 - 2 * i] = a_sin * pts_x[i] + a_cos * pts_y[i] + ctr_y;

	}

}


__device__ inline float inter(float const * const region1, float const * const region2) {

	float pts1[8];
	float pts2[8];
	float int_pts[16];
	int num_of_inter;

	convert_region(pts1, region1);
	convert_region(pts2, region2);

	num_of_inter = inter_pts(pts1, pts2, int_pts);

	reorder_pts(int_pts, num_of_inter);

	return area(int_pts, num_of_inter);


}

__device__ inline float devRotateIoU(float const * const region1, float const * const region2) {

	float area1 = region1[2] * region1[3];
	float area2 = region2[2] * region2[3];
	float area_inter = inter(region1, region2);

	return area_inter / (area1 + area2 - area_inter);


}

__global__ void rotate_nms_kernel(const int n_boxes, const float nms_overlap_thresh,
	const float *dev_boxes, unsigned long long *dev_mask) {
	const int row_start = blockIdx.y;
	const int col_start = blockIdx.x;

	// if (row_start > col_start) return;

	const int row_size =
		min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
	const int col_size =
		min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);

	__shared__ float block_boxes[threadsPerBlock * 6];
	if (threadIdx.x < col_size) {
		block_boxes[threadIdx.x * 6 + 0] =
			dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 0];
		block_boxes[threadIdx.x * 6 + 1] =
			dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 1];
		block_boxes[threadIdx.x * 6 + 2] =
			dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 2];
		block_boxes[threadIdx.x * 6 + 3] =
			dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 3];
		block_boxes[threadIdx.x * 6 + 4] =
			dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 4];
		block_boxes[threadIdx.x * 6 + 5] =
			dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 5];
	}
	__syncthreads();

	if (threadIdx.x < row_size) {
		const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
		const float *cur_box = dev_boxes + cur_box_idx * 6;
		int i = 0;
		unsigned long long t = 0;
		int start = 0;
		if (row_start == col_start) {
			start = threadIdx.x + 1;
		}
		for (i = start; i < col_size; i++) {
			if (devRotateIoU(cur_box, block_boxes + i * 6) > nms_overlap_thresh) {
				t |= 1ULL << i;
			}
		}
		const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
		dev_mask[cur_box_idx * col_blocks + col_start] = t;
	}
}

void _set_device(int device_id) {
	int current_device;
	CUDA_CHECK(cudaGetDevice(&current_device));
	if (current_device == device_id) {
		return;
	}
	// The call to cudaSetDevice must come before any calls to Get, which
	// may perform initialization using the GPU.
	CUDA_CHECK(cudaSetDevice(device_id));
}


// boxes is a N x 5 tensor
at::Tensor nms_cuda(const at::Tensor boxes, float nms_overlap_thresh) {
	using scalar_t = float;
	AT_ASSERTM(boxes.type().is_cuda(), "boxes must be a CUDA tensor");
	auto scores = boxes.select(1, 5);									//dim=1, select the conf_score
	auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));	//conf from high to low
	auto boxes_sorted = boxes.index_select(0, order_t);					// re-rank the boxes via conf 

	int boxes_num = boxes.size(0);

	const int col_blocks = THCCeilDiv(boxes_num, threadsPerBlock);

	scalar_t* boxes_dev = boxes_sorted.data<scalar_t>();

	THCState *state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState

	unsigned long long* mask_dev = NULL;
	//THCudaCheck(THCudaMalloc(state, (void**) &mask_dev,
	//                      boxes_num * col_blocks * sizeof(unsigned long long)));

	mask_dev = (unsigned long long*) THCudaMalloc(state, boxes_num * col_blocks * sizeof(unsigned long long));

	dim3 blocks(THCCeilDiv(boxes_num, threadsPerBlock),
		THCCeilDiv(boxes_num, threadsPerBlock));
	dim3 threads(threadsPerBlock);
	rotate_nms_kernel << <blocks, threads >> >(boxes_num,
		nms_overlap_thresh,
		boxes_dev,
		mask_dev);

	std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
	THCudaCheck(cudaMemcpy(&mask_host[0],
		mask_dev,
		sizeof(unsigned long long) * boxes_num * col_blocks,
		cudaMemcpyDeviceToHost));

	std::vector<unsigned long long> remv(col_blocks);
	memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);

	at::Tensor keep = at::empty({ boxes_num }, boxes.options().dtype(at::kLong).device(at::kCPU));
	int64_t* keep_out = keep.data<int64_t>();

	int num_to_keep = 0;
	for (int i = 0; i < boxes_num; i++) {
		int nblock = i / threadsPerBlock;
		int inblock = i % threadsPerBlock;

		if (!(remv[nblock] & (1ULL << inblock))) {
			keep_out[num_to_keep++] = i;
			unsigned long long *p = &mask_host[0] + i * col_blocks;
			for (int j = nblock; j < col_blocks; j++) {
				remv[j] |= p[j];
			}
		}
	}

	THCudaFree(state, mask_dev);
	// TODO improve this part
	return std::get<0>(order_t.index({
		keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep).to(
		order_t.device(), keep.scalar_type())
	}).sort(0, false));
}