#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <time.h>
#include "random.h"

#include <cuda_runtime.h>

#include <cuda_runtime.h>
#include <cuda_runtime_api.h>

#include "cu_pso.h"

#define L 1
#define C 1
#define R 1

#ifndef END_T
#define END_T 8
#endif
#define DELTA_T (.001f)
#define STEPS (END_T/DELTA_T)

// Max Control effort (0 is no limit)
#define MAX_CONT 10

// These control the weights for both settling time and overshoot in terms
// of fitness
#ifndef OVER_WEIGHT
#define OVER_WEIGHT .5f
#endif

#ifndef SET_WEIGHT
#define SET_WEIGHT .5f
#endif
#ifndef PROP_WEIGHT
#define PROP_WEIGHT .01f
#endif

#ifndef IT
#define IT 10000
#endif

#ifndef IT_SWAP
#define IT_SWAP 100
#endif

int main(int argc, char * argv[])
{
	unsigned int *seeds;
	int j,k;
	long long int i;
	float best;
	int best_index;
	float complete = 0;
	dim3 s_swarm(N_SWARMS, 1);
	dim3 size(N, 1);
	int seed_num;
	int card_num;
	pso_swarm *swarms;
	pso_swarm local_swarms[N_SWARMS] = {{{0}}};

	if (argc < 2) {
		printf("Seeding from time\n");
		srand(time(NULL));
	} else {
		sscanf(argv[1], "%d", &seed_num);
		printf("Seeding with %d\n", seed_num);
		srand(seed_num);
	}

	if (argc < 3) {
		printf("Using first card\n");
		if (cudaSetDevice(0) != cudaSuccess) {
			printf("Error: Unable to set device\n");
			return 3;
		}
	} else {
		sscanf(argv[2], "%d", &card_num);
		printf("Using card %d\n", card_num);
		if (cudaSetDevice(card_num) != cudaSuccess) {
			printf("Error: Unable to set device\n");
			return 3;
		}
	}

	cudaMalloc((void **) &swarms, N_SWARMS * sizeof(pso_swarm));

	if (!swarms) {
		printf("Error: Unable to allocate swarms\n");
		return 1;
	}

	seeds = gen_seeds(N_SWARMS * N);
	cudaThreadSynchronize();

	if (!seeds) {
		printf("Error: Unable to allocate seeds\n");
		return 1;
	}

	init_swarm<<<1, 1>>>(swarms, seeds);

	cudaMemcpy(local_swarms, swarms, N_SWARMS * sizeof(pso_swarm), cudaMemcpyDeviceToHost);
	cudaThreadSynchronize();

	printf("Printing info\n");
	for (j = 0; j < N_SWARMS; j++) {
		printf("Swarm: %d - ", j + 1);
		best = 20000;
		for (k = 0; k < N; k++) {
			if (local_swarms[j].part_swarm[k].fitness < best) {
				best = local_swarms[j].part_swarm[k].fitness;
				best_index = k;
			}
		}
		for (i = 0; i < DIM; i++) {
			printf("%4.2lf ",(double)(local_swarms[j].part_swarm[best_index].location[i]));
		}
		printf("%4.2lf ",(double)(local_swarms[j].part_swarm[best_index].fitness));
		printf("\n");
	}
	printf("Done\n");


	printf("               ");
	for (i = 0; i < IT; i++) {
		if (i > (complete * IT)) {
			for (j = 0; j < 16; j++) printf("\b");
			if (complete < .1) printf(" ");
			printf("%3.1lf%% Complete ", complete * 100);
			fflush(stdout);
			complete += .01;
		}
		run_iterations<<<s_swarm, size>>>(swarms, seeds, IT_SWAP);
		cudaThreadSynchronize();

		for (j = 0; j < N_SWARMS; j++) {
			for (k = 0; k < N_SWARMS; k++) {
				if (frand() < CROSS_RATE) {
					swap_part<<<1, 1>>>(swarms + j, swarms + k, seeds);
				}
			}
		}
		usleep(2000);

		cudaMemcpy(local_swarms, swarms, N_SWARMS * sizeof(pso_swarm), cudaMemcpyDeviceToHost);
		cudaThreadSynchronize();

		if (check_done(local_swarms)) {
			printf("- particles clustered - ending early\n");
			break;
		}
	}
	for (j = 0; j < 16; j++) printf("\b");
	printf("%3.1lf%% Complete\n", 100.0);

	cudaMemcpy(local_swarms, swarms, N_SWARMS * sizeof(pso_swarm), cudaMemcpyDeviceToHost);

	printf("Printing info\n");
	for (j = 0; j < N_SWARMS; j++) {
		printf("Swarm: %d - ", j + 1);
		best = 20000;
		for (i = 0; i < N; i++) {
			//if (local_swarms[j].part_swarm[k].fitness < best) {
		//		best = local_swarms[j].part_swarm[k].fitness;
		//		best_index = k;
			//}
			for (k = 0; k < DIM; k++) {
				printf("%4.2lf ",(double)(local_swarms[j].part_swarm[i].location[k]));
			}
			printf("%4.2lf \n",(double)(local_swarms[j].part_swarm[i].fitness));
		}
		//for (i = 0; i < DIM; i++) {
		//	printf("%4.2lf ",(double)(local_swarms[j].part_swarm[best_index].location[i]));
		//}
		//printf("%4.2lf ",(double)(local_swarms[j].part_swarm[best_index].fitness));
		//printf("\n");
	}
	printf("Done\n");

	return 0;
}

__host__ int check_done(pso_swarm *swarms)
{
	int i,j,tmp=0;
	int numMisses = N * N_SWARMS / 10;
	
	pso_part center;
	center.fitness = 20000;

	//fit_com(center.location, swarms);
	for (i = 0; i < N_SWARMS; i++) {
		for (j = 0; j < N; j++) {
			if (swarms[i].part_swarm[j].fitness < center.fitness) {
				memcpy(&center, &swarms[i].part_swarm[j], sizeof(pso_part));
			}
		}
	}

	for (i = 0; i < N_SWARMS; i++) {
		for (j = 0; j < N; j++) {
			if ( ! part_close(&swarms[i].part_swarm[j], &center) ) {
				tmp++;
			}
			if (tmp >= numMisses) {
				return 0;
			}
		}
	}

	return 1;
}

__host__ int fit_com(pso_type *com, pso_swarm *swarms)
{
	int i,j,k;
	pso_type weights[DIM] = {0};
	pso_type weighted_vals[DIM] = {0};
	for (i = 0; i < N_SWARMS; i++) { 
		for (j = 0; j < N; j++) { 
			for (k = 0; k < DIM; k++) { 
				weighted_vals[k] += swarms[i].part_swarm[j].location[k] / swarms[i].part_swarm[j].fitness;
				weights[k] += 1 / swarms[i].part_swarm[j].fitness;
				com[k] = weighted_vals[k] / weights[k];
			}
		}
	}
	return 0;
}

// returns true if the particles are close, defined by tolerance TOL
__host__ int part_close(pso_part *part1, pso_part *part2)
{
	int i;
	
	for (i = 0; i < DIM; i++) {
		if (TOL <= fabs(part1->location[i] - part2->location[i]) ) return 0;
	}

	return 1;
}

// This function determines the fitness of the individual
__device__ void calc_fitness(pso_part *particle)
{
	pso_type k_p = particle->location[0];
	pso_type k_d = particle->location[1];
	pso_type k_i = particle->location[2];
	pso_type err = 1;
	pso_type err_old;
	pso_type err_sum = 0;
	pso_type err_sum_old;

	pso_type v_in = 0;
	pso_type v_c = 0;
	pso_type i_l = 0;
	pso_type i_tot = 0;
	pso_type v_c_old;
	pso_type i_l_old;
	int i;
	float set_time = 0;
	float over_shoot = 0;

	for (i = 0; i < STEPS; i++) {
		// Move last step values into old locations
		err_old = err;
		err_sum_old = err_sum;
		v_c_old = v_c;
		i_l_old = i_l;

		// Simulate circuit in discrete time quantities
		err = 1 - i_l_old;
		err_sum = err_sum_old + err * DELTA_T;

		v_in = k_p * err + k_d * (err - err_old) / DELTA_T + k_i * err_sum;

		if (MAX_CONT && (v_in > MAX_CONT)) v_in = MAX_CONT;
		if (MAX_CONT && (-v_in > MAX_CONT)) v_in = -MAX_CONT;

		i_tot = (v_in - v_c_old) / R;
		v_c = DELTA_T * (i_tot - i_l_old) / C + v_c_old;

		i_l = DELTA_T * (v_c / L) + i_l_old;

		// Check 2% settling time
		if (i_l > 1.02f) set_time = i * DELTA_T;
		else if (i_l < .98f) set_time = i * DELTA_T;

		// Check max overshoot
		if ((i_l - 1) > over_shoot) over_shoot = i_l - 1;
	}

	// Save fitness based on defined weights
	particle->fitness = OVER_WEIGHT * over_shoot + SET_WEIGHT * set_time;
	if (k_p > 0.0f) {
		particle->fitness += PROP_WEIGHT * k_p;
	} else {
		particle->fitness -= PROP_WEIGHT * k_p;
	}

	return;
}

/* Used for Testing*/
/*
// This function determines the fitness of the individual
__device__ void calc_fitness(pso_part *particle)
{
	pso_type sum = 0;
	pso_type center[DIM] = {0, 0, 0};
	int i;

	for (i = 0; i < DIM; i++) {
		if ((particle->location[i] - center[i]) > 0) {
			sum += (particle->location[i] - center[i]);
		} else {
			sum -= (particle->location[i] - center[i]);
		}
	}

	particle->fitness = sum;

	return;
}
*/

// This function moves an individual by a given amount
__device__ void move_particle(pso_part *particle, pso_type move[DIM])
{
	int i;

	for (i = 0; i < DIM; i++) {
		particle->inertia[i] = (MOVE_RAT * move[i]) + ((1 - MOVE_RAT) * particle->inertia[i]);
		particle->location[i] += particle->inertia[i];
	}

	return;
}

// This function caculates required move for a particle and move it
__device__ void calc_move_particle(pso_part *particle, pso_part *best, unsigned int *seed_rand)
{
	pso_type move_array[DIM];
	int i;


	for (i = 0; i < DIM; i++) {
		move_array[i] = IND_WEIGHT * grand(seed_rand) * (particle->best_location[i] - particle->location[i]);
		move_array[i] += BEST_WEIGHT * grand(seed_rand) * (best->best_location[i] - particle->location[i]);
	}

	move_particle(particle, move_array);

	return;
}

// This function will perform a swap between to swarms, only called when needed
__global__ void swap_part(pso_swarm *swarm1, pso_swarm *swarm2, unsigned int *seed_rand)
{
	pso_part temp;
	int index1, index2;

	index1 = (int)(grand(seed_rand) * N);
	index2 = (int)(grand(seed_rand) * N);

	copy_part(&temp, swarm1->part_swarm + index1);
	copy_part(swarm1->part_swarm + index1, swarm2->part_swarm + index2);
	copy_part(swarm2->part_swarm + index2, &temp);

	return;
}

// This function initializes a particle with random startup values between 
// -MAX_VAL and MAX_VAL for location. Clears Inertia and calculates initial
// fitness for this individual.
__device__ void init_part(pso_part *particle, unsigned int *seed)
{
	int i;
	for (i = 0; i < DIM; i++) {
		particle->location[i] = (grand(seed) * 2 * MAX_VAL) - MAX_VAL;
		particle->best_location[i] = particle->location[i];
		particle->inertia[i] = 0;
	}
	calc_fitness(particle);
	particle->best_fitness = particle->fitness;

	return;
}

// Copies particle data from src to dest.
__device__ void copy_part(pso_part *dest, pso_part *src)
{
	int j;
	
	for (j = 0; j < DIM; j++) {
		dest->location[j] = src->location[j];
		dest->best_location[j] = src->best_location[j];
		dest->inertia[j] = src->inertia[j];
	}
	dest->fitness = src->fitness;
	dest->best_fitness = src->best_fitness;

	return;
}

// This will run a number of movements for a set of swarms. Each block will 
// calculate one swarm, each thread calculating a single particle. When it
// completes it will copy all information back to global memory and return.
__global__ void run_iterations(pso_swarm *swarms, unsigned int *seeds, unsigned int iterations)
{
	int i,j;

	// Swarm shared within block
    __shared__ pso_swarm my_swarm;
	// Particle only accessible by individual thread
	pso_part my_particle;
	// Get seed value for grand()
	unsigned int seed = seeds[threadIdx.x + blockDim.x * blockIdx.x];
	
	if (threadIdx.x == 0) {
		// Memcpy into shared memory
		for (i = 0; i < N; i++) {
			copy_part(&my_swarm.part_swarm[i], &swarms[blockIdx.x].part_swarm[i]);
		}
		my_swarm.best = &my_swarm.part_swarm[0];
		for (j = 1; j < N; j++) {
			if (my_swarm.part_swarm[j].fitness < my_swarm.best->fitness) {
				my_swarm.best = &(my_swarm.part_swarm[j]);
			}
		}
	}
	__syncthreads();

	// Memcpy into local memory
	copy_part(&my_particle, &my_swarm.part_swarm[threadIdx.x]);
	
	for (i = 0; i < iterations; i++) {
		// Calculate movement and move particle
		calc_move_particle(&(my_particle), my_swarm.best, &seed);
		// Update fitness of particle
		calc_fitness(&(my_particle));

		if (my_particle.best_fitness > my_particle.fitness) {
			for (j = 0; j < DIM; j++) {
				my_particle.best_location[j] = my_particle.location[j];
			}
			my_particle.best_fitness = my_particle.fitness;
		}

		// Copy out of local memory
		copy_part(&my_swarm.part_swarm[threadIdx.x], &my_particle);
		__syncthreads();

		// Check for best particle
		if (threadIdx.x == 0) {
			for (j = 0; j < N; j++) {
				if (my_swarm.part_swarm[j].fitness < my_swarm.best->fitness) {
					my_swarm.best = &(my_swarm.part_swarm[j]);
				}
			}
		}
		__syncthreads();
	}


	if (threadIdx.x == 0) {
		// Memcpy from shared memory
		for (i = 0; i < N; i++) {
			copy_part(&swarms[blockIdx.x].part_swarm[i], &my_swarm.part_swarm[i]);
		}
	}
	
	// Update Seed
	seeds[threadIdx.x + blockDim.x * blockIdx.x] = seed;

	return;
}

__global__ void init_swarm(pso_swarm *swarm, unsigned int *seed)
{
	int i,j;

	for (i = 0; i < N_SWARMS; i++) {
		for (j = 0; j < N; j++) {
			init_part(swarm[i].part_swarm + j, seed);
		}
		swarm[i].best = swarm[i].part_swarm;
	}

	return;
}

// This function allocates memory for seeds on the gpu, generates them,
// and copies them over.
unsigned int *gen_seeds(int num_threads)
{
	unsigned int *s_d;
	unsigned int *s_h;
	int i;

	s_h = (unsigned int *)malloc(num_threads * sizeof(unsigned int));

	if (!s_h) {
		return NULL;
	}

	cudaMalloc((void **) &s_d, num_threads * sizeof(unsigned int));

	if (!s_d) {
		free(s_h);
		return NULL;
	}

	for (i = 0; i < num_threads; i++) {
		s_h[i] = rand();
	}

	cudaMemcpy(s_d, s_h, num_threads * sizeof(unsigned int), cudaMemcpyHostToDevice);

	free(s_h);

	return s_d;
}

// Setup seed for a given thread, pass the location of local seed storage,
// the global array of seeds and the absolute thread location
__device__ void gsrand(unsigned int *val, unsigned int *seeds, int thread)
{
	*val = seeds[thread];

	return;
}

// This will generate a random float between 0 and 1, must pass it the local
// location of the seed value for this thread.
__device__ float grand(unsigned int *seed)
{
	*seed = ((*seed * 1664525) + 1013904223);

	return (((float)*seed) / (4294967295.0f));
}
