#include <iostream>
#include <stdio.h>
#include "BFS.hpp"
#include "CommonGPU.h"
#include <string>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/remove.h>
#include <cub/cub.cuh>
#include <thrust/sort.h>

using namespace std;


__global__ void BlockSortKernel(int *d_in, int *d_out)
{
     using namespace cub;

     // Specialize BlockRadixSort, BlockLoad, and BlockStore for 128 threads 
     // owning 16 integer items each
     typedef BlockRadixSort<int, 1024, 2>                     BlockRadixSort;
     typedef BlockLoad<int*, 1024, 2, BLOCK_LOAD_TRANSPOSE>   BlockLoad;
     typedef BlockStore<int*, 1024, 2, BLOCK_STORE_TRANSPOSE> BlockStore;
 
     // Allocate shared memory
     __shared__ union {
         typename BlockRadixSort::TempStorage  sort;
         typename BlockLoad::TempStorage       load; 
         typename BlockStore::TempStorage      store; 
     } temp_storage; 

     int block_offset = blockIdx.x * (1024 * 2);	  // Offset for this block's ment

     // Obtain a segment of 2048 consecutive keys that are blocked across threads
     int thread_keys[2];
     BlockLoad(temp_storage.load).Load(d_in + block_offset, thread_keys);
     __syncthreads();

     // Collectively sort the keys
     BlockRadixSort(temp_storage.sort).Sort(thread_keys);
     __syncthreads();

     // Store the sorted segment 
     BlockStore(temp_storage.store).Store(d_out + block_offset, thread_keys);
}

__global__ void BlockScanKernel(int* d_in, int* d_out){
	using namespace cub;

	typedef BlockScan<int, 8> BlockScan;
	__shared__ typename BlockScan::TempStorage temp_storage;



	int data = d_in[blockIdx.x*8+threadIdx.x];

	 BlockScan(temp_storage).InclusiveSum(data, data);
	 __syncthreads();

	 d_out[blockIdx.x*8+threadIdx.x] = data;
	 __syncthreads();
}

int main(){
	

	/*BFS_GPU test(123);
	test.print(18);*/


	/*BFS_init(70000,100000000,"adj_mat_7_4_1_8.data");
	BFS(2);*/


	//srand(1);
	//thrust::host_vector<int> hvec(40000*1024*2);
	//for(int i = 0; i < hvec.size(); i++){
	//	hvec[i] = rand() % 10000;
	//}
	//thrust::device_vector<int> dvec(hvec.begin(),hvec.end());
	//thrust::device_vector<int> com_dvec(dvec.begin(),dvec.end());
	//thrust::device_vector<int> result(dvec.size());

	//cudaEvent_t start, stop;
	//float elapsedTime;
	//cudaEventCreate(&start);
	//cudaEventCreate(&stop);
	//cudaEventRecord(start, 0);

	//BlockSortKernel<<<20000,1024>>>(
	//	thrust::raw_pointer_cast(dvec.data()),
	//	thrust::raw_pointer_cast(result.data()));

	//cudaEventRecord(stop, 0);
	//cudaEventSynchronize(stop);
	//cudaEventElapsedTime(&elapsedTime, start, stop); 

	//cout << "this takes time: " << elapsedTime << endl;

	//cudaEventRecord(start, 0);
	//thrust::sort(com_dvec.begin(),com_dvec.end());
	//cudaEventRecord(stop, 0);
	//cudaEventSynchronize(stop);
	//cudaEventElapsedTime(&elapsedTime, start, stop); 
	//cout << "thrust takes time:" << elapsedTime << endl;

	thrust::device_vector<int> dvec;
	thrust::device_vector<int> dvec_index;
	

	/*BlockScanKernel<<<10,8>>>(
		thrust::raw_pointer_cast(dvec.data()),
		thrust::raw_pointer_cast(result.data()));*/

	int count = 0;
	for(int i = 0; i < 3; i++){
		for(int j = 0; j < 10; j++){
			dvec.push_back(rand()%10 + i*10);
		}
		count += 10;
		dvec_index.push_back(count);
	}
	thrust::device_vector<int> result(dvec.size());

	for(int i = 0; i < 3; i++){
		for(int j = 0; j < 10; j++){
			cout << dvec[i*10+j] << " ";
		}
		cout << endl;
	}

	cout << "start to compute: -------------------------------" << endl;

	int MAX_BITS = ceil(log((double)30)/log((double)2));

	sort_within_blks_tuned<<<3,SORTED_SHARED_SIZE>>>(
		thrust::raw_pointer_cast(dvec.data()),
		thrust::raw_pointer_cast(dvec_index.data()),
		1,thrust::raw_pointer_cast(result.data()),0,MAX_BITS);
	
	cudaDeviceSynchronize();

	cout << "finish compute" << endl;
	for(int i = 0; i < 3; i++){
		for(int j = 0; j < 10; j++){
			cout << dvec[i*10+j] << " ";
		}
		cout << endl;
	}

}