#include "stdafx.h"
#include "Raptor.h"
#include "BinQuery.h"
#include <random>
#include <thread>
#include <iostream>
#include <amp.h>
#include <amp_math.h>
#include "_gpu_computation.h"
#include "amp_tinymt_rng.h"

using namespace concurrency;

void Raptor::motion_planning_start()
{
	thread_probe = new std::thread(&Raptor::motion_planning_probing, this);

	for (unsigned int i = 0; i < n_threads; ++i)
		thread_pool.push_back(new std::thread(&Raptor::motion_planning_threading, this, i, 1024 * 16));
}

void Raptor::motion_planning_probing()
{
	while (true)
	{
		std::vector<double*>* ttt = &coord_current;
		std::vector<void*>* object_current = &probe_object_A;
		if (A_is_ready)
			object_current = &probe_object_B;

		v_active->nearest_neighbor(root->get_q(), 1000000.0, ttt, object_current);

		A_is_ready = !A_is_ready;
		Sleep(100);
	}

}

void Raptor::motion_planning_threading(unsigned int id, unsigned int n_iterations)
{
	// Random generator for CPU thread
	std::uniform_real_distribution<double> double_distribution(0, 1);
	std::mt19937_64 random;
	random.seed(id);

	// Shared variables
	double* q_sample = new double[dim_state];
	double* q_new = new double[dim_state];
	double* c_sample = new double[dim_control];
	double c_duartion = 0;
	std::vector<double*>* results_coord = NULL;
	std::vector<void*>* results_object = NULL;
	double r_best_near = 0.5;
	double r_drain = 0.5;
	State* x_select = NULL;
	Coord_Control_Bounds bounds(this->bounds);

	clock_t t_0 = clock();
	clock_t t_prev = t_0;

	// Motion planning execution
	unsigned int n_gpu_effective = 0;
	unsigned int i_iteration = 0;
	while (i_iteration < n_iterations)
	{
		auto cpu0 = std::chrono::high_resolution_clock::now();
		auto gpu0 = cpu0;
		auto gpu1 = cpu0;

		// Sample state
		for (unsigned int i = 0; i < dim_state; ++i)
			q_sample[i] = double_distribution(random) * (q_max[i] - q_min[i]) + q_min[i];

		// Nearest neighbors
		v_active->nearest_neighbor(q_sample, r_best_near, results_coord, results_object);
		if (results_coord->size() == 0)
		{
			v_active->nearest_neighbor(q_sample, (unsigned int)1, results_coord, results_object);
			x_select = (State*)results_object->at(0);
		}
		else
		{
			double cost_min = std::numeric_limits<double>::max();
			for (unsigned int i = 0; i < results_object->size(); ++i)
			{
				State* x_i = (State*)results_object->at(i);
				double cost_i = x_i->get_cost();
				if (cost_i < cost_min)
				{
					x_select = x_i;
					cost_min = cost_i;
				}
			}
		}

		// Sample control
		for (unsigned int i = 0; i < dim_control; ++i)
			c_sample[i] = double_distribution(random) * (c_max[i] - c_min[i]) + c_min[i];

		// Propagatea
		double cost_step = 0;
		bool propagate_success = propagate_cpu(x_select->get_q(), c_sample, c_sample[dim_control - 1], q_new, cost_step);
		if (propagate_success)
		{
			// Enforced Converge
			{
				tickets.resize(_gpu_jobs);
				results.resize(_gpu_jobs);

				double cost_start = x_select->get_cost();
				double cost_new = cost_start + cost_step;

				unsigned int n_neighbor_for_enforced_convergence = 100;
				unsigned int n_available = n_neighbor_for_enforced_convergence;
				if (n_available > v_active->size())
					n_available = v_active->size();
				v_active->nearest_neighbor(q_sample, n_available, results_coord, results_object);


				unsigned int i_candidate = 0;
				for (unsigned int i = 0; i < _gpu_jobs; ++i, ++i_candidate)
				{
					while (i_candidate >= results_object->size())
						i_candidate -= results_object->size();

					_gpu_ticket ticket;
					double* q_i = results_coord->at(i_candidate);
					State* x_i = (State*)results_object->at(i_candidate);
					ticket.q_start.q0 = q_i[0];
					ticket.q_start.q1 = q_i[1];
					ticket.q_start.q2 = q_i[2];

					ticket.r_target = r_best_near;

					ticket.c_start = x_i->get_cost();
					ticket.c_target = cost_new;
					// TODO : this can be optimized when c_start >= c_target

					ticket.q_target.q0 = q_new[0];
					ticket.q_target.q1 = q_new[1];
					ticket.q_target.q2 = q_new[2];

					tickets[i] = ticket;
				}

				extent<1> e_size(_gpu_jobs);
				array_view<_gpu_ticket, 1> _gpu_in(e_size, tickets);
				array_view<_gpu_result, 1> _gpu_out(e_size, results);

				unsigned int _gpu_rand_seed = clock();
				tinymt_collection<1> rand_collection(e_size, _gpu_rand_seed);

				gpu0 = std::chrono::high_resolution_clock::now();

				// Lambda for GPU
				parallel_for_each(
					e_size,
					[=](index<1> idx) restrict(amp)
				{
					auto rand = rand_collection[idx];
					rand.initialize(_gpu_rand_seed);

					_gpu_ticket ticket = _gpu_in[idx];
					_gpu_result result;

					result.ctrl.t = rand.next_single() * (bounds.t_max - bounds.t_min) + bounds.t_min;
					result.c_new = ticket.c_start + result.ctrl.t;

					//if (result.c_new >= ticket.c_target)
					//	return;	

					result.ctrl.c0 = rand.next_single() * (bounds.c0_max - bounds.c0_min) + bounds.c0_min;
					result.ctrl.c1 = rand.next_single() * (bounds.c1_max - bounds.c1_min) + bounds.c1_min;
					result.ctrl.c2 = rand.next_single() * (bounds.c2_max - bounds.c2_min) + bounds.c2_min;

					// propagation 
					int n_integration = concurrency::fast_math::ceilf(result.ctrl.t / bounds.t_delta);

					result.v1 = result.ctrl.t;
					result.v2 = bounds.t_delta;
					result.n_val = n_integration;

					Coord i_x;
					i_x.q0 = ticket.q_start.q0;
					i_x.q1 = ticket.q_start.q1;
					i_x.q2 = ticket.q_start.q2;

					float length = result.ctrl.c0*result.ctrl.c0 + result.ctrl.c1*result.ctrl.c1 + result.ctrl.c2*result.ctrl.c2;
					length = fast_math::sqrtf(length);
					for (unsigned int i = 0; i < n_integration; ++i)
					{
						i_x.q0 += result.ctrl.c0 / length * bounds.t_delta;
						i_x.q1 += result.ctrl.c1 / length * bounds.t_delta;
						i_x.q2 += result.ctrl.c2 / length * bounds.t_delta;
					}

					// clost to target ? 
					float d_q0 = i_x.q0 - ticket.q_target.q0;
					float d_q1 = i_x.q1 - ticket.q_target.q1;
					float d_q2 = i_x.q2 - ticket.q_target.q2;
					float r_new_to_target = concurrency::fast_math::sqrt(d_q0 * d_q0 + d_q1 * d_q1 + d_q2 * d_q2);

					if (r_new_to_target > ticket.r_target)
					{
						result.c_new += bounds.c_infinity;
					}

					// Return values
					_gpu_out[idx].n_val = result.n_val;
					_gpu_out[idx].v1 = result.v1;
					_gpu_out[idx].v2 = result.v2;

					_gpu_out[idx].c_step = result.ctrl.t;
					_gpu_out[idx].c_new = result.c_new;
					_gpu_out[idx].ctrl = result.ctrl;
					_gpu_out[idx].q_new = i_x;
				});

				_gpu_out.synchronize();
				copy(_gpu_out, results.begin());

				gpu1 = std::chrono::high_resolution_clock::now();

				int improved_job_idx = -1;
				double convergence_cost_min = bounds.c_infinity;
				for (int i = 0; i < results.size(); ++i)
				{
					float cost_i = results[i].c_new;
					if (cost_i < bounds.c_infinity)
					{
						if (cost_i < convergence_cost_min)
						{
							convergence_cost_min = cost_i;
							improved_job_idx = i;
						}
					}
				}

				// When GPU provides improvement
				if (improved_job_idx >= 0)
				{
					n_gpu_effective++;

					cost_step = results[improved_job_idx].c_step;
					q_new[0] = results[improved_job_idx].q_new.q0;
					q_new[1] = results[improved_job_idx].q_new.q1;
					q_new[2] = results[improved_job_idx].q_new.q2;

					c_sample[0] = results[improved_job_idx].ctrl.c0;
					c_sample[1] = results[improved_job_idx].ctrl.c1;
					c_sample[2] = results[improved_job_idx].ctrl.c2;
					c_sample[3] = results[improved_job_idx].ctrl.t;
				}

				float percentage = ceilf((float)n_gpu_effective / (i_iteration + 1) * 1000) / 10;
				int test_stop = 0;
			}

			// Grow G
			{
				double cost_new = x_select->get_cost() + cost_step;

				// drain
				{
					v_active->nearest_neighbor(q_new, r_drain, results_coord, results_object);
					bool q_new_better = true;
					unsigned n_results = results_object->size();
					for (unsigned int i = 0;i<n_results;++i)
					{
						State* x_i = (State*)results_object->at(i);
						double cost_i = x_i->get_cost();
						if (cost_i < cost_new)
							q_new_better = false;
					}

					if (q_new_better == true)
					{
						for (unsigned int i = 0; i < n_results; ++i)
						{
							State* x_i = (State*)results_object->at(i);
							v_active->del_point(x_i->get_q(), x_i);
						}
					}


				}


				State* x_new = new State(dim_state, q_new);
				x_new->set_cost(cost_new);
				x_new->add_parent(x_select, cost_step, dim_control, c_sample);
				v_active->add_point(x_new->get_q(), x_new);
			}
		}

		// Prepare for the next iteration

		using namespace std::chrono;

		auto cpu1 = std::chrono::high_resolution_clock::now();
		auto cpuT = cpu1 - cpu0;
		_int64 vcpu = cpuT.count();

		auto gpuT = gpu1 - gpu0;
		_int64 vgpu = gpuT.count();


		duration<double> time_span_cpu = duration_cast<duration<double>>(cpuT);

		duration<double> time_span_gpu = duration_cast<duration<double>>(gpuT);


		msg_output.clear();

		clock_t t_now = clock();

		float t_all = (t_now - t_0) / (float)CLOCKS_PER_SEC;
		float ips = i_iteration / t_all;
		msg_output.push_back(std::string("ips = ") + std::to_string(ips));

		msg_output.push_back(std::string("cpu call (s)= ") + std::to_string(time_span_cpu.count()));
		msg_output.push_back(std::string("gpu call (s)= ") + std::to_string(time_span_gpu.count()));


		t_prev = t_now;
		i_iteration++;

	}
	delete[] c_sample;
	delete[] q_sample;
	delete[] q_new;
	delete results_coord;
	delete results_object;

}

bool Raptor::propagate_cpu(double* q_start, double* ctrl, double duration, double*& q_new, double& cost_step)
{
	double ctrl_length = sqrt(ctrl[0] * ctrl[0] + ctrl[1] * ctrl[1] + ctrl[2] * ctrl[2]);
	cost_step = 0;

	q_new[0] = q_start[0];
	q_new[1] = q_start[1];
	q_new[2] = q_start[2];
	unsigned int n_integration = (unsigned int)ceil(duration / duration_inc);
	for (unsigned int i = 0; i < n_integration; ++i)
	{
		q_new[0] += ctrl[0] / ctrl_length * duration_inc;
		q_new[1] += ctrl[1] / ctrl_length * duration_inc;
		q_new[2] += ctrl[2] / ctrl_length * duration_inc;
		cost_step += duration_inc;
	}

	return v_active->inSpace(q_new);
}

void Raptor::motion_planning_ending()
{
	for (unsigned int i = 0; i < n_threads; ++i)
		thread_pool.at(i)->join();
}

void Raptor::motion_planning_init()
{
	unsigned int division_depth = 6;
	unsigned int *divisions = new unsigned int[division_depth];
	divisions[0] = 7;
	divisions[1] = 7;
	divisions[2] = 7;
	divisions[3] = 7;
	divisions[4] = 7;
	divisions[5] = 7;
	v_active = new BinQuery(dim_state, q_max, q_min, division_depth, divisions);

	double* q_root = new double[dim_state];
	for (unsigned int i = 0; i < dim_state; ++i)
		q_root[i] = 0;

	root = new State();
	root->set_cost(0);
	root->set_q(dim_state, q_root);
	v_active->add_point(root->get_q(), root);

	delete[] q_root;
}

Raptor::Raptor()
{
	msg_output.push_back("test msg from raptor");

	// Definitions for cpu
	{
		dim_state = 3;
		q_max = new double[dim_state];
		q_min = new double[dim_state];
		q_max[0] = 10;
		q_max[1] = 10;
		q_max[2] = 10;
		q_min[0] = -10;
		q_min[1] = -10;
		q_min[2] = -10;

		dim_control = 4;
		c_max = new float[dim_control];
		c_min = new float[dim_control];
		c_max[0] = 1;
		c_max[1] = 1;
		c_max[2] = 1;
		c_max[3] = 2.5f;
		c_min[0] = -1;
		c_min[1] = -1;
		c_min[2] = -1;
		c_min[3] = 0.5f;

		duration_inc = 0.1;
	}
	n_threads = 1;

	// Definitions for gpu
	{
		bounds.q0_min = q_min[0];
		bounds.q1_min = q_min[1];
		bounds.q2_min = q_min[2];
		bounds.q0_max = q_max[0];
		bounds.q1_max = q_max[1];
		bounds.q2_max = q_max[2];
		bounds.c0_min = c_min[0];
		bounds.c0_max = c_max[0];
		bounds.c1_min = c_min[1];
		bounds.c1_max = c_max[1];
		bounds.c2_min = c_min[2];
		bounds.c2_max = c_max[2];
		bounds.t_min = c_min[3];
		bounds.t_max = c_max[3];
		bounds.c_infinity = 1000000;
		bounds.t_delta = duration_inc;

		_gpu_jobs = 3000;
	}

	motion_planning_init();
}

Raptor::~Raptor()
{
	delete[] q_max;
	delete[] q_min;
	delete[] c_max;
	delete[] c_min;
	delete v_active;
	delete root;
}
