// BFSTEST : Test breadth-first search in a graph.
// 
// example: cat sample.txt | ./bfstest 1
//
// John R. Gilbert, 17 Feb 2011

#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <float.h>
#include <omp.h>
#include <assert.h>

#include <cuda.h>
#include "cublas.h"
#include <time.h>

#define ceil(x)	( x - (x % 1) + 1 )
#define MINUSONE -1

#define NUM_PWARPS	16
//#define NUM_VWARPS	(NUM_PWARPS * (32/WARP_SIZE) )
#define NUM_VWARPS	(NUM_PWARPS * (32/32) )
#define MIN(X,Y) (((X)<(Y))?(X):(Y))
#define MAX(X,Y) (((X)>(Y))?(X):(Y))

int my_cpu_id,numthreads;
//#define T1 64
//#define T2 

typedef struct graphstruct { // A graph in compressed-adjacency-list (CSR) form
    int nv;            // number of vertices
    int ne;            // number of edges
    int *nbr;          // array of neighbors of all vertices
    int *firstnbr;     // index in nbr[] of first neighbor of each vtx
} graph;

template<int WARP_SIZE>
struct warp_mem 
{
	int levels[WARP_SIZE];
	int nodes[WARP_SIZE+1];
	int scratch ;
};

template<int WARP_SIZE> __global__ void warp_baseline_kn(int, bool *, int *, int , int , int *, int * );

//--------------------------------------------------------------
// For WARP-based SIMD operations
//--------------------------------------------------------------
template <typename T, int WARP_SIZE>
__device__
inline static void gpu_memcpy_SIMD(T* d_dst, T* d_src, int cnt, int warp_offset)
{
	// each thread copies one element
	for(int i=warp_offset; i < cnt; i+= WARP_SIZE)
	{
		d_dst[i] = d_src[i];
	}
    __threadfence_block();
}

template <typename T2, typename T, int WARP_SIZE>
__device__ static inline void gpu_memcpy_SIMD2(T2* d_dst, T* d_src, int cnt, int warp_offset)
{
	// each thread copies one element
	for(int i=warp_offset; i < cnt; i+= WARP_SIZE)
	{
		d_dst[i] = (T2) (d_src[i]);
	}
    __threadfence_block();
}

template<int WARP_SIZE>
__device__ static bool expand_frontier_gpu_SIMD(int M, int from, int to, int level, int * d_edges, int* d_level, int warp_offset, bool* d_finished)
{
    bool finished = true;
	for(int k = from + warp_offset; k < to; k += (WARP_SIZE)) {
		int nbr = d_edges[k]; // this read is coalesced
		if (d_level[nbr] == MINUSONE) {	// only this final read-write is scattererd
			d_level[nbr] = level + 1;
            finished = false;
		}
	}   
    __threadfence_block();
    return finished;
}



int read_edge_list (int **tailp, int **headp) {
    int max_edges = 100000000;
    int nedges, nr, t, h;
    *tailp = (int *) calloc(max_edges, sizeof(int));
    *headp = (int *) calloc(max_edges, sizeof(int));
    nedges = 0;
    nr = scanf("%i %i",&t,&h);
    while (nr == 2) {
	if (nedges >= max_edges) {
	    printf("Limit of %d edges exceeded.\n",max_edges);
	    exit(1);
	}
	(*tailp)[nedges] = t;
	(*headp)[nedges++] = h;
	nr = scanf("%i %i",&t,&h);
    }
    return nedges;
}


graph * graph_from_edge_list (int *tail, int* head, int nedges) {
    graph *G;
    int i, e, v, maxv;
    G = (graph *) calloc(1, sizeof(graph));
    G->ne = nedges;
    maxv = 0;

    // count vertices
    for (e = 0; e < G->ne; e++) {
	if (tail[e] > maxv) maxv = tail[e];
	if (head[e] > maxv) maxv = head[e];
    }
    G->nv = maxv+1;
    G->nbr = (int *) calloc(G->ne, sizeof(int));
    G->firstnbr = (int *) calloc(G->nv+1, sizeof(int));

    // count neighbors of vertex v in firstnbr[v+1],
    for (e = 0; e < G->ne; e++) G->firstnbr[tail[e]+1]++;

    // cumulative sum of neighbors gives firstnbr[] values
    for (v = 0; v < G->nv; v++) G->firstnbr[v+1] += G->firstnbr[v];

    // pass through edges, slotting each one into the CSR structure
    for (e = 0; e < G->ne; e++) {
	i = G->firstnbr[tail[e]]++;
	G->nbr[i] = head[e];
    }
    // the loop above shifted firstnbr[] left; shift it back right
    for (v = G->nv; v > 0; v--) G->firstnbr[v] = G->firstnbr[v-1];
    G->firstnbr[0] = 0;
    return G;
}


void print_CSR_graph (graph *G) {
    int vlimit = 20;
    int elimit = 50;
    int e,v;
    printf("\nGraph has %d vertices and %d edges.\n",G->nv,G->ne);
    printf("firstnbr =");
    if (G->nv < vlimit) vlimit = G->nv;
    for (v = 0; v <= vlimit; v++) printf(" %d",G->firstnbr[v]);
    if (G->nv > vlimit) printf(" ...");
    printf("\n");
    printf("nbr =");
    if (G->ne < elimit) elimit = G->ne;
    for (e = 0; e < elimit; e++) printf(" %d",G->nbr[e]);
    if (G->ne > elimit) printf(" ...");
    printf("\n\n");
}


void bfs (int s, graph *G, int **levelp, int *nlevelsp, 
	int **levelsizep, int **parentp) {
    int *levelsize, *h_level;
    int thislevel;
    int **nnodearray,*cnodearray,*nsize, lqueuesize=10000000;
    int i, v, w, e,j,k,index;
    int size=0;
    int state = 0;
    int T1, T2, T3,alpha,beta;
     
    h_level = *levelp = (int *)calloc(G->nv, sizeof(int));
    levelsize = *levelsizep = (int *)calloc(G->nv, sizeof(int));

    double time_elapsed;
    double start,end;
    cudaError_t err ;

    //My variables
    bool *h_finished , *d_finished;
    int *d_nodes, *d_edges, *d_level;

    h_finished = (bool *)malloc( 1* sizeof(bool) );
    cudaMalloc( (void **)&d_finished, 1 * sizeof(bool)) ; assert(d_finished != NULL);

    err = cudaMalloc( (void **)&d_nodes,(G->nv+1) * sizeof(int)); assert(d_nodes!=NULL);
    err = cudaMalloc( (void **)&d_edges, (G->ne) * sizeof(int)); assert(d_edges!=NULL);
    err = cudaMalloc( (void **)&d_level, (G->nv) * sizeof(int)); assert(d_level!=NULL);

    int num_warps = NUM_VWARPS;	// 512 threads in TB
    int numthreadsperblock = num_warps * 32 ;
    int numblocks = MIN(16384, (G->nv + numthreadsperblock-1)/ numthreadsperblock); // number of threads blocks 
    numblocks = MAX(numblocks, 1);

    // initially, queue is empty, all levels and parents are -1
    T1 = 64;
    if((G->nv)*0.01 > 262144) {
	T2 = (G->nv)*0.01;
    } else {
	T2 = 262144;
    }
    T3 = 2048;
    alpha = 2;
    beta = 2;

    numthreads=omp_get_max_threads();
 
    cnodearray = (int *) malloc(lqueuesize*sizeof(int));
    nnodearray = (int **) malloc(numthreads*sizeof(int *));
    for (i = 0; i < numthreads; i++){
	nnodearray[i] = (int *) malloc(lqueuesize*sizeof(int));
    }
   
    nsize = (int *) calloc(numthreads,sizeof(int));

//Start time before assigning initial levels

    start=omp_get_wtime();

#pragma omp parallel for private(v)
	for (v = 0; v < G->nv; v++) {
	    h_level[v] = -1;
	}

    // assign the starting vertex level 0 and put it on the queue to explore
    thislevel = 0;
    h_level[s] = 0;
    size=0;
    levelsize[0] = 1;
    cnodearray[0] = s;

    // loop over levels, then over vertices at this level, then over neighbors
    printf("T1: %d , T2: %d, T3: %d \n",T1, T2, T3);
    while (levelsize[thislevel] > 0) {
	size=0;
	levelsize[thislevel+1] = 0;
	switch(state) {
	    case 0:
		if (levelsize[thislevel] > T1)
		    state =1;
		break;
	    case 1:
		if (levelsize[thislevel] > T2 )
		    state =2;
		if (levelsize[thislevel] > alpha*levelsize[thislevel-1] && levelsize[thislevel] > T3 )
		    state =5;
		break;
	    case 2:
		state = 3;
		break;
	    case 3:
		if (levelsize[thislevel] < T2)
		    state =4;
		if (levelsize[thislevel] < beta*levelsize[thislevel-1])
		    state =4;
		break;
	    case 4:
		if(levelsize[thislevel] > T1)
		    state = 1;
		else
		    state = 0;
		break;
	    case 5:
	        state = 5;
		break;
	}
	printf("state: %d , levelnum: %d, levelsize: %d \n",state, thislevel, levelsize[thislevel]);

	if (state == 2 || state == 3 ) {
#pragma omp parallel for private(i,e,w) reduction(+:size)
		for (i = 0; i < G->nv; i++) {
			if(h_level[i]== thislevel) {
				for (e = G->firstnbr[i]; e < G->firstnbr[i+1]; e++) {
					w = G->nbr[e];          // w is the current neighbor of v
					if (h_level[w] == -1) {   // w has not already been reached
						h_level[w] = thislevel+1;
						size++;
					}
				}
			}
		}
	} 
	else if (state == 4) {
#pragma omp parallel private(i,e,w,my_cpu_id)
		{
			my_cpu_id=omp_get_thread_num();
# pragma omp for schedule(dynamic,100) nowait
			for (i=0; i < G->nv; i++){
				if(h_level[i]== thislevel) {
					for (e = G->firstnbr[i]; e < G->firstnbr[i+1]; e++) {
						w = G->nbr[e];          // w is the current neighbor of v

						if (h_level[w] == -1) {   // w has not already been reached
							h_level[w] = thislevel+1;
							nnodearray[my_cpu_id][nsize[my_cpu_id]] = w;
							nsize[my_cpu_id]++;		
						}
					}
				}
			}            
		}
		size = 0;
		index=0;
		for (i = 0; i<numthreads ; i++){
			size = size + nsize[i];
			for(j=0;j<nsize[i];j++){
				cnodearray[index] = nnodearray[i][j];
				index++;
			}
			nsize[i] = 0;
		}
	}
	else if (state == 1) {

#pragma omp parallel private(i,e,w,v,my_cpu_id)
		{
		my_cpu_id=omp_get_thread_num();
# pragma omp for schedule(dynamic,100) nowait
		for (i=0; i<levelsize[thislevel]; i++){
		    v = cnodearray[i];
		    //printf("v=%d\n",v);
		    for (e = G->firstnbr[v]; e < G->firstnbr[v+1]; e++) {
			w = G->nbr[e];          // w is the current neighbor of v

			if (h_level[w] == -1) {   // w has not already been reached
			    h_level[w] = thislevel+1;
			    nnodearray[my_cpu_id][nsize[my_cpu_id]] = w;
			    nsize[my_cpu_id]++;		
			}
		    }
		}            
	    }
	    size = 0;
	    index=0;
	    for (i = 0; i<numthreads ; i++){
		size = size + nsize[i];
		for(j=0;j<nsize[i];j++){
		    cnodearray[index] = nnodearray[i][j];
		    index++;
		}
		nsize[i] = 0;
	    }
	}
	else if (state == 5) {

	    time_elapsed = omp_get_wtime()- start;

	    //Copy to GPU
	    err = cudaMemcpy(d_nodes,G->firstnbr, (G->nv+1) * sizeof(int) , cudaMemcpyHostToDevice); assert( err == cudaSuccess);
           err = cudaMemcpy(d_edges,G->nbr, (G->ne) * sizeof(int) ,  cudaMemcpyHostToDevice); assert( err == cudaSuccess);
	    err = cudaMemcpy(d_level,h_level, (G->nv) * sizeof(int) , cudaMemcpyHostToDevice); assert( err == cudaSuccess);

	    start = omp_get_wtime() ;

	    //Start GPU
	    do
	    {
	            *h_finished = true ;

	            cudaMemcpy(d_finished,h_finished,1 * sizeof(bool), cudaMemcpyHostToDevice); assert ( err == cudaSuccess);
	            warp_baseline_kn<32> <<< numblocks, numthreadsperblock >>>(
	        		    thislevel++, d_finished, d_level, G->nv, G->ne, d_edges, d_nodes);
	            cudaMemcpy(h_finished,d_finished,1 * sizeof(bool), cudaMemcpyDeviceToHost);

	    } while(!(*h_finished));

	    time_elapsed += omp_get_wtime()- start;
	    printf("Time to run = %f.\n",time_elapsed);

	    //printf("\n");
	    levelsize[thislevel] = !(*h_finished) ;
	    printf("Copying result to host ...\n");
	    cudaMemcpy(h_level , d_level, (G->nv) * sizeof(int) , cudaMemcpyDeviceToHost);

	    //time_elapsed = omp_get_wtime()- start;

	}

	//GPU ends here

	else {
	    for (i=0; i<levelsize[thislevel]; i++){
		v = cnodearray[i];
		for (e = G->firstnbr[v]; e < G->firstnbr[v+1]; e++) {
		    w = G->nbr[e];          // w is the current neighbor of v
		    if (h_level[w] == -1) {   // w has not already been reached
			h_level[w] = thislevel+1;
			nnodearray[0][nsize[0]] = w;
			nsize[0]++;		
		    }
		}
	    }            
	    size = 0;
	    index=0;
	    size = size + nsize[0];
	    for(j=0;j<nsize[0];j++){
		cnodearray[index] = nnodearray[0][j];
		index++;
	    }
	}
	thislevel = thislevel+1;
	levelsize[thislevel]=size;
    }
    *nlevelsp = thislevel;
    //end=omp_get_wtime();
   // printf("Time to run = %f.\n",time_elapsed);
    //free(queue);
}


int main (int argc, char* argv[]) {
    graph *G;
    int *level, *levelsize, *parent;
    int *tail, *head;
    int nedges;
    int nlevels;
    int startvtx;
    int i, v, reached;
    double time_elapsed;
    double start,end;
    //clock_t start=clock();

    if (argc == 2) {
	startvtx = atoi (argv[1]);
    } else {
	printf("usage:   bfstest <startvtx> < <edgelistfile>\n");
	printf("example: cat sample.txt | ./bfstest 1\n");
	exit(1);
    }

//    omp_set_num_threads(8);
#ifdef _OPENMP
    numthreads=omp_get_max_threads();
#else
    numthreads=1;
#endif

    //time_elapsed = omp_get_wtime();
    //start = time(NULL);
    nedges = read_edge_list (&tail, &head);
    G = graph_from_edge_list (tail, head, nedges);
    free(tail);
    free(head);
    print_CSR_graph (G);

    printf("numthreads = %d\n", numthreads);
    printf("Starting vertex for BFS is %d.\n\n",startvtx);
    bfs (startvtx, G, &level, &nlevels, &levelsize, &parent);

    reached = 0;

    for (i = 0; i < nlevels; i++) levelsize[i]=0;

//#pragma omp parallel for private(v) reduction(+:levelsize)
    for (v = 0; v < G->nv; v++) {
	levelsize[level[v]]++;
    }


    for (i = 0; i < nlevels; i++) reached += levelsize[i];
    printf("Breadth-first search from vertex %d reached %d levels and %d vertices.\n",
	    startvtx, nlevels, reached);
    for (i = 0; i < nlevels; i++) printf("level %d vertices: %d\n", i, levelsize[i]);
    if (G->nv < 20) {
        //printf("\n  vertex parent  level\n");
        //for (v = 0; v < G->nv; v++) printf("%6d%7d%7d\n", v, parent[v], level[v]);
        //printf("\n  vertex level\n");
        for (v = 0; v < G->nv; v++) printf("%6d%7d\n", v, level[v]);
    }
}

template<int WARP_SIZE>
__global__ void warp_baseline_kn(int level, bool* d_finished,  int* d_level, int N, int M, int* d_edges, int* d_nodes)
{
	__shared__ warp_mem<WARP_SIZE> local_mem[NUM_VWARPS];

	int i = blockIdx.x * blockDim.x + threadIdx.x;
	const unsigned int grid_size = gridDim.x * blockDim.x;

	int warp_id = i / (WARP_SIZE);
	int warp_offset = threadIdx.x % (WARP_SIZE);
	int local_warp_id = threadIdx.x / WARP_SIZE;
	warp_mem<WARP_SIZE>* local = &(local_mem[ local_warp_id ]);

	int n = warp_id * (WARP_SIZE);
	bool finished = true;

	while (n < N) {
		int work_sz = ((N-n) > (WARP_SIZE)) ? (WARP_SIZE) : (N-n);

		// copy my work to shared memory 
		gpu_memcpy_SIMD2<int, int , WARP_SIZE> (local->levels, &(d_level[n]), work_sz, warp_offset);
		gpu_memcpy_SIMD<int, WARP_SIZE> (local->nodes, &(d_nodes[n]), work_sz+1, warp_offset);

		for(int j = 0; j < work_sz; j++)
		{

			if (local->levels[j] ==  level)  // found it
			{
				int from = local->nodes[j];
				int to = local->nodes[j + 1];

				// expansion is now done in diverge (SIMD)
				bool temp = expand_frontier_gpu_SIMD<WARP_SIZE>
					(M, from, to, level, d_edges, d_level, warp_offset, d_finished);
				finished = finished  && temp;

			}
		}

		n += grid_size;
  	}

    if (finished == false)
        *d_finished = false;
}
