#include <iostream>
#include <stdio.h>      
#include <stdlib.h>     
#include <time.h> 
#include <thrust/device_vector.h>
#include <math.h>
#include <thrust/sort.h>
#include <climits>
#include <set>
#include <fstream>
#include <cuda_profiler_api.h>
using namespace std;

#define NUM_QUERY 12800
#define NUM_DOCS 12800
#define NUM_KEYWORD 10000

#define MIN_KEY_TO_DOCS 3000 // minimal number documents which contains a particular keyword
#define MAX_KEY_TO_DOCS 8000 // maximum number documents which contains a particular keyword

#define BLOCK_SIZE 500
#define MAX_KEYWORDS_PER_QUERY 5

#define MAX_QUERYS_PER_KEYWORD 20

#define QUERY_DOC_THRESHOLD 10

typedef thrust::device_vector<int> dVec; // device vector
typedef thrust::host_vector<int> hVec; // vector on CPU

/* the following memory reside in GPU global memory */
dVec* d_query_to_doc; // relationship between query and documents (2D vector), output of the program

dVec* d_query_to_keyword; // query contains keyword list
dVec* d_query_to_keyword_idx; // starting position for each query in the previous vector
int d_query_to_keyword_size; // total size of d_query_to_keyword

dVec* d_keyword_to_query; // keyword associate query list
dVec* d_keyword_to_query_idx; // starting position for each query in the previous vector
int d_keyword_to_query_size; // total size of d_query_to_keyword

dVec* d_keyword_to_doc; // inverted list of keyword, which is the document contains the keyword
dVec* d_keyword_to_doc_idx; // starting position for each keyword in the previous vector
int d_keyword_to_doc_size; // total size of d_keyword_to_doc

template <class Vec>
void outputRelation(string filename, Vec& relation, Vec& relationIdx,int total_size){
	string idxfilename = filename;
	ofstream idx_file(idxfilename.append("Index.txt").c_str(),ios::out);
	idx_file << relationIdx.size() << endl;
	for(int i = 0; i < relationIdx.size(); i++) idx_file << relationIdx[i] << " ";
	idx_file << endl << total_size << endl;
	idx_file.close();

	string relfilename = filename;
	ofstream rel_file(relfilename.append(".txt").c_str(),ios::out);
	for(int i = 0; i < relation.size(); i++) rel_file << relation[i] << " ";
	rel_file.close();
}

template <class Vec>
void printRelation(
	Vec& h_query_to_keyword,Vec& h_query_to_keyword_idx,
	Vec& h_keyword_to_query, Vec& h_keyword_to_query_idx,
	Vec& h_keyword_to_doc,Vec& h_keyword_to_doc_idx,bool outputfile){

	if(outputfile){
		ofstream meta("meta_data.txt",ios::out);
		meta << NUM_QUERY << " " << NUM_KEYWORD << " " << NUM_DOCS
			<< " " << MIN_KEY_TO_DOCS << " " << MAX_KEY_TO_DOCS
			<< " " << MAX_QUERYS_PER_KEYWORD << endl;
		meta.close();

		outputRelation("QueryToKeyword",h_query_to_keyword,h_query_to_keyword_idx,d_query_to_keyword_size);
		outputRelation("KeywordToQuery",h_keyword_to_query,h_keyword_to_query_idx,d_keyword_to_query_size);
		outputRelation("KeywordToDoc",h_keyword_to_doc,h_keyword_to_doc_idx,d_keyword_to_doc_size);
		return;
	}


	// print query to keyword
	cout << "query to keyword ----- " << endl;
	for(int i = 0; i < NUM_QUERY; i++)
	{
		int keyword_start_idx = h_query_to_keyword_idx[i];
		int keyword_end_idx = (i == NUM_QUERY-1) ? d_query_to_keyword_size : h_query_to_keyword_idx[i+1];
		cout << "query " << i << "[";
		for(int j = keyword_start_idx; j < keyword_end_idx; j++)
			cout << h_query_to_keyword[j] << " ";
		cout << "]" << endl;
	}

	// print keyword to query
	cout << "keyword to query ----- " << endl;
	for(int i = 0; i < NUM_KEYWORD; i++)
	{
		int query_start_idx = h_keyword_to_query_idx[i];
		int query_end_idx = (i == NUM_KEYWORD-1) ? d_keyword_to_query_size : h_keyword_to_query_idx[i+1];
		cout << "keyword " << i << "[";
		for(int j = query_start_idx; j < query_end_idx; j++)
			cout << h_keyword_to_query[j] << " ";
		cout << "]" << endl;
	}

	// print keyword to doc
	cout << "keyword to documents  ----- " << endl;
	for(int i = 0; i < NUM_KEYWORD; i++)
	{
		int doc_start_idx = h_keyword_to_doc_idx[i];
		int doc_end_idx = (i == NUM_KEYWORD-1) ? d_keyword_to_doc_size : h_keyword_to_doc_idx[i+1];
		cout << "keyword " << i << "[";
		for(int j = doc_start_idx; j < doc_end_idx; j++)
			cout << h_keyword_to_doc[j] << " ";
		cout << "]" << endl;
	}
}

bool isMetaDataCorrect(string filename){
	ifstream meta(filename.append(".txt").c_str());
	int data = 0;
	meta >> data;
	if(data != NUM_QUERY) return false;
	meta >> data;
	if(data != NUM_KEYWORD) return false;
	meta >> data;
	if(data != NUM_DOCS) return false;
	meta >> data;
	if(data != MIN_KEY_TO_DOCS) return false;
	meta >> data;
	if(data != MAX_KEY_TO_DOCS) return false;
	meta >> data;
	if(data != MAX_QUERYS_PER_KEYWORD) return false;
	meta.close();
	return true;
}

void constructRelation(string filename, hVec& relation, hVec& relationIdx, int& total_size){
	string idxfilename = filename;
	ifstream idx_file(idxfilename.append("Index.txt").c_str());
	int idxVecSize = 0;
	idx_file >> idxVecSize;
	relationIdx.resize(idxVecSize);
	for(int i = 0; i < idxVecSize; i++) idx_file >> relationIdx[i];
	idx_file >> total_size;
	idx_file.close();
	
	string relfilename = filename;
	ifstream rel_file(relfilename.append(".txt").c_str(),ios::out);
	relation.resize(total_size);
	for(int i = 0; i < total_size; i++) rel_file >> relation[i];
	rel_file.close();
}

void constructRelationFromFile(){
	if(!isMetaDataCorrect("meta_data")){
		cout << "meta data does not match the program configuration" << endl;
		exit(1);
	}

	d_query_to_doc = new dVec(NUM_QUERY*NUM_DOCS,0);
	cout << "number of Mbyte allocate: " << (double)NUM_QUERY*NUM_DOCS*8/pow(2.0,20) << endl;

	hVec h_query_to_keyword;
	hVec h_query_to_keyword_idx;
	constructRelation("QueryToKeyword",h_query_to_keyword,h_query_to_keyword_idx,d_query_to_keyword_size);
	d_query_to_keyword = new dVec(h_query_to_keyword.size());
	d_query_to_keyword_idx = new dVec(h_query_to_keyword_idx.size());
	*d_query_to_keyword = h_query_to_keyword;
	*d_query_to_keyword_idx = h_query_to_keyword_idx;

	cout << "d_query_to_keyword_size: " << d_query_to_keyword_size << endl;

	hVec h_keyword_to_query;
	hVec h_keyword_to_query_idx;
	constructRelation("KeywordToQuery",h_keyword_to_query,h_keyword_to_query_idx,d_keyword_to_query_size);
	d_keyword_to_query = new dVec(h_keyword_to_query.size());
	d_keyword_to_query_idx = new dVec(h_keyword_to_query_idx.size());
	*d_keyword_to_query = h_keyword_to_query;
	*d_keyword_to_query_idx = h_keyword_to_query_idx;
	cout << "d_keyword_to_query_size: " << d_keyword_to_query_size << endl;

	hVec h_keyword_to_doc;
	hVec h_keyword_to_doc_idx;
	constructRelation("KeywordToDoc",h_keyword_to_doc,h_keyword_to_doc_idx,d_keyword_to_doc_size);
	d_keyword_to_doc = new dVec(h_keyword_to_doc.size());
	d_keyword_to_doc_idx = new dVec(h_keyword_to_doc_idx.size());
	*d_keyword_to_doc = h_keyword_to_doc;
	*d_keyword_to_doc_idx = h_keyword_to_doc_idx;
	cout << "d_keyword_to_doc_size: " << d_keyword_to_doc_size << endl;

	/*printRelation(
		*d_query_to_keyword,*d_query_to_keyword_idx,
		*d_keyword_to_query,*d_keyword_to_query_idx,
		*d_keyword_to_doc,*d_keyword_to_doc_idx,false);*/
}

// sort query based on the computing load, not seem to have effect on performance
void sortQueryBasedOnComputingSize(
	hVec& h_query_to_keyword,hVec& h_query_to_keyword_idx,
	hVec& h_keyword_to_doc,hVec& h_keyword_to_doc_idx){
	
	int query_size[NUM_QUERY]; // each element means the size which a query need to process
	int query_idx[NUM_QUERY];
	for(int i = 0; i < NUM_QUERY; i++)
	{
		query_size[i] = 0;
		query_idx[i] = i;
		int keyword_start_idx = h_query_to_keyword_idx[i];
		int keyword_end_idx = (i == NUM_QUERY-1) ? d_query_to_keyword_size : h_query_to_keyword_idx[i+1];
		for(int j = keyword_start_idx; j < keyword_end_idx; j++)
		{
			int keyword = h_query_to_keyword[j];
			int doc_start_idx = h_keyword_to_doc_idx[keyword];
			int doc_end_idx = (keyword == NUM_KEYWORD-1) ? d_keyword_to_doc_size : h_keyword_to_doc_idx[keyword+1];
			query_size[i] += (doc_end_idx - doc_start_idx);
		}
	}
	
	thrust::sort_by_key(query_size, query_size + NUM_QUERY, query_idx);
	
	hVec tmp_map;// temporary store for transformed h_query_to_keyword
	hVec tmp_idx(NUM_QUERY); // temporary store for transformed h_query_to_keyword_idx
	int total_size = 0;
	for(int i = 0; i < NUM_QUERY; i++)
	{
		int query = query_idx[i];
		
		int keyword_start_idx = h_query_to_keyword_idx[query];
		int keyword_end_idx = (query == NUM_QUERY-1) ? d_query_to_keyword_size : h_query_to_keyword_idx[query+1];
		
		tmp_idx[i] = total_size;
		total_size += keyword_end_idx - keyword_start_idx;
		
		for(int j = keyword_start_idx; j < keyword_end_idx; j++)
			tmp_map.push_back(h_query_to_keyword[j]);
	}
	h_query_to_keyword = tmp_map;
	h_query_to_keyword_idx = tmp_idx;
}

// compute query to document mapping on CPU, benchmark
void cpuComputeQueryDocMapping(
	hVec& h_query_to_keyword,hVec& h_query_to_keyword_idx,
	hVec& h_keyword_to_doc,hVec& h_keyword_to_doc_idx){
	
	vector<int> query_to_doc(NUM_QUERY*NUM_DOCS,0);
	cout << "start CPU computation" << endl;
	
	int total_sum = 0;
	clock_t start = clock();
	
	for(int i = 0; i < NUM_QUERY; i++){
		int keyword_start_idx = h_query_to_keyword_idx[i];
		int keyword_end_idx = (i == NUM_QUERY-1) ? d_query_to_keyword_size : h_query_to_keyword_idx[i+1];
		
		for(int j = keyword_start_idx; j < keyword_end_idx; j++)
		{
			int keyword = h_query_to_keyword[j];
			int doc_start_idx = h_keyword_to_doc_idx[keyword];
			int doc_end_idx = (keyword == NUM_KEYWORD-1) ? d_keyword_to_doc_size : h_keyword_to_doc_idx[keyword+1];
			
			for(int k = doc_start_idx; k < doc_end_idx; k++){
				int doc = h_keyword_to_doc[k];
				query_to_doc[doc*NUM_QUERY+i]++;
			}
		}
	}
	

	double delta = ((double)clock()-start)/CLOCKS_PER_SEC;
	cout << "CPU computation time: " << delta << endl;
	cout << "total sum: " << total_sum << endl;
}


/* random generate sample mappings 
	return the total number of integer need to be computed by GPU for performance analysis*/
double construct_sample(){
	unsigned int seed = 1378995547U;
	//unsigned int seed = time(NULL);
	srand(seed);
	
	//cudaSetDevice(2);
	
	/* allocation memory for query to document matrix using thrust and pass to global pointer*/
	d_query_to_doc = new dVec(NUM_QUERY*NUM_DOCS,0);
	
	cout << "number of Mbyte allocate: " << (double)NUM_QUERY*NUM_DOCS*8/pow(2.0,20) << endl;
	
	/* initialize query to keyword relationship, each query is randomly sampled with 3-5 keywords
		and then pass to global pointer */
	hVec h_query_to_keyword;
	hVec h_query_to_keyword_idx(NUM_QUERY);
	
	vector<vector<int> > keyword_query_mapping(NUM_KEYWORD);
	
	int total_size = 0;
	for(int i = 0; i < NUM_QUERY; i++)
	{
		int num_of_keywords = rand()%3 + 3;
		h_query_to_keyword_idx[i] = total_size; // mark the starting index for different query
		total_size += num_of_keywords;
		
		set<int> distinct_keywords;
		while(distinct_keywords.size() != num_of_keywords){
			int keyword = rand()%NUM_KEYWORD;
			
			set<int>::iterator it = distinct_keywords.find(keyword);
			if(it != distinct_keywords.end()) continue;
			if(keyword_query_mapping[keyword].size() >= MAX_QUERYS_PER_KEYWORD) continue;
			
			keyword_query_mapping[keyword].push_back(i);
			distinct_keywords.insert(keyword);
		}
		
		for(set<int>::iterator iter = distinct_keywords.begin(); iter != distinct_keywords.end(); iter++) 
			h_query_to_keyword.push_back(*iter);
	}
	d_query_to_keyword_size = total_size;
	d_query_to_keyword = new dVec(total_size);
	*d_query_to_keyword = h_query_to_keyword;
	d_query_to_keyword_idx = new dVec(NUM_QUERY);
	*d_query_to_keyword_idx = h_query_to_keyword_idx;

	cout << "finish query to keyword construction .... " << endl;
	
	hVec h_keyword_to_query;
	hVec h_keyword_to_query_idx(NUM_KEYWORD);
	
	total_size = 0;
	for(int i = 0; i < NUM_KEYWORD; i++)
	{
		h_keyword_to_query_idx[i] = total_size;
		total_size += keyword_query_mapping[i].size();
		if(keyword_query_mapping[i].size() > MAX_QUERYS_PER_KEYWORD)
		{
			cout << "construct problem, a keyword associated too much number of queries: " << endl;
			cout << "keyword " << i << " associated with " << keyword_query_mapping[i].size() 
				<< " query where max possible is " << MAX_QUERYS_PER_KEYWORD << endl;
		}
		for(int j = 0; j < keyword_query_mapping[i].size(); j++)
			h_keyword_to_query.push_back(keyword_query_mapping[i][j]);
	}
	d_keyword_to_query_size = total_size;
	d_keyword_to_query = new dVec(total_size);
	*d_keyword_to_query = h_keyword_to_query;
	d_keyword_to_query_idx = new dVec(NUM_KEYWORD);
	*d_keyword_to_query_idx = h_keyword_to_query_idx;
	
	cout << "finish keyword to query construction .... " << endl;
	
	/* initialize keyword to documents relationship and pass to global pointer
		each keyword associate with 1000-2000 documents*/
	hVec h_keyword_to_doc;
	hVec h_keyword_to_doc_idx(NUM_KEYWORD);
	
	total_size = 0;
	for(int i = 0; i < NUM_KEYWORD; i++)
	{
		int num_of_docs = rand()%(MAX_KEY_TO_DOCS-MIN_KEY_TO_DOCS)+MIN_KEY_TO_DOCS;
		h_keyword_to_doc_idx[i] = total_size; // mark the starting index for different keywords
		total_size += num_of_docs;
		
		set<int> distinct_docs;
		while(distinct_docs.size() != num_of_docs)
			distinct_docs.insert(rand()%NUM_DOCS);
		
		for(set<int>::iterator iter = distinct_docs.begin(); iter != distinct_docs.end(); iter++)
			h_keyword_to_doc.push_back(*iter);
	}
	d_keyword_to_doc_size = total_size;
	d_keyword_to_doc = new dVec(total_size);
	*d_keyword_to_doc = h_keyword_to_doc;
	d_keyword_to_doc_idx = new dVec(NUM_KEYWORD);
	*d_keyword_to_doc_idx = h_keyword_to_doc_idx;
	
	 printRelation(
		 h_query_to_keyword,h_query_to_keyword_idx,
		 h_keyword_to_query,h_keyword_to_query_idx,
		 h_keyword_to_doc,h_keyword_to_doc_idx,true);
	// sortQueryBasedOnComputingSize(
		// h_query_to_keyword,h_query_to_keyword_idx,
		// h_keyword_to_doc,h_keyword_to_doc_idx);
	// cpuComputeQueryDocMapping(
		// h_query_to_keyword,h_query_to_keyword_idx,
		// h_keyword_to_doc,h_keyword_to_doc_idx);
	
	// total number of integers which need to be processed by GPU
	double total_int_computed = 0;
	for(int i = 0; i < NUM_QUERY; i++)
	{
		int keyword_start_idx = h_query_to_keyword_idx[i];
		int keyword_end_idx = (i == NUM_QUERY-1) ? d_query_to_keyword_size : h_query_to_keyword_idx[i+1];
		
		for(int j = keyword_start_idx; j < keyword_end_idx; j++)
		{
			int keyword = h_query_to_keyword[j];
			int doc_start_idx = h_keyword_to_doc_idx[keyword];
			int doc_end_idx = (keyword == NUM_KEYWORD-1) ? d_keyword_to_doc_size : h_keyword_to_doc_idx[keyword+1];
			total_int_computed += (doc_end_idx - doc_start_idx);
		}		
	}
	cout << "total number of integer need to be computed for full evaluation: " << total_int_computed << endl;
	return total_int_computed;
}

// each thread takes a query and perform the write to result
__global__ void simpleThreadMapQueryMethod(
	int* query_to_doc, // output
	int* query_to_keyword, int* query_to_keyword_idx,
	int* keyword_to_doc, int* keyword_to_doc_idx,
	int query_to_keyword_size, int keyword_to_doc_size){
	
	int tid = blockIdx.x * blockDim.x + threadIdx.x;
	
	int keyword_start_idx = query_to_keyword_idx[tid];
	int keyword_end_idx = (tid == NUM_QUERY-1) ? query_to_keyword_size : query_to_keyword_idx[tid+1];
	
	// for every query, check all the keywords
	for(int i = keyword_start_idx; i < keyword_end_idx; i++)
	{
		int keyword = query_to_keyword[i];
		
		int doc_start_idx = keyword_to_doc_idx[keyword];
		int doc_end_idx = (keyword == NUM_KEYWORD-1) ? keyword_to_doc_size : keyword_to_doc_idx[keyword+1];
		
		// for each keyword, find all docs contain this keyword
		for(int j = doc_start_idx; j < doc_end_idx; j++)
		{
			int doc = keyword_to_doc[j];
			query_to_doc[doc*NUM_QUERY+tid]++;
			//query_to_doc[tid*NUM_DOCS+doc]++;
		}
	}
	// printf("Processed Query %d: keyword range: %d : %d\n",tid,d_query_to_keyword_size,d_keyword_to_doc_size);
}

// each block takes a query
__global__ void simpleBlockMapQueryMethod(
	int* query_to_doc, // output
	int* query_to_keyword, int* query_to_keyword_idx,
	int* keyword_to_doc, int* keyword_to_doc_idx,
	int query_to_keyword_size, int keyword_to_doc_size,
	int topK){
	
	int bid = blockIdx.x;
	int tid = threadIdx.x + blockDim.x * threadIdx.y;
	int block_size = blockDim.x * blockDim.y;
	
	int keyword_start_idx = query_to_keyword_idx[bid];
	int keyword_end_idx = (bid == NUM_QUERY-1) ? query_to_keyword_size : query_to_keyword_idx[bid+1];
	
	// loop though all keywords
	for(int i = keyword_start_idx; i < keyword_end_idx; i++)
	{
		int keyword = query_to_keyword[i];
		
		// doc range indices for the entire block
		int doc_start_idx = keyword_to_doc_idx[keyword];
		int doc_end_idx = (keyword == NUM_KEYWORD-1) ? keyword_to_doc_size : keyword_to_doc_idx[keyword+1];
		doc_end_idx = min(topK+doc_start_idx,doc_end_idx);
		
		// number of iteration for the block to process this keyword
		int iter = (doc_end_idx - doc_start_idx)/block_size+ ((doc_end_idx - doc_start_idx)%block_size != 0);

		for(int j = 0; j < iter; j++)
		{
			// doc indices for the thread
			int thread_doc_idx = tid + j * block_size + doc_start_idx;
			if(thread_doc_idx < doc_end_idx)
			{
				int doc = keyword_to_doc[thread_doc_idx];
				query_to_doc[bid*NUM_DOCS+doc]++;
			}
		}
		__syncthreads();
	}
}

__global__ void setFinishedQuery(int* query_to_doc,int* remaining_query){

}


// each block takes a query and only evaluate the query which is needed
__global__ void simpleBlockMapQueryMethodWithStop(
	int* query_to_doc, // output
	int* query_to_keyword, int* query_to_keyword_idx,
	int* keyword_to_doc, int* keyword_to_doc_idx,
	int query_to_keyword_size, int keyword_to_doc_size,
	int* remaining_query, int iteration,
	int topK){
	
	int bid = remaining_query[blockIdx.x];
	int tid = threadIdx.x + blockDim.x * threadIdx.y;
	int block_size = blockDim.x * blockDim.y;
	
	int keyword_start_idx = query_to_keyword_idx[bid];
	int keyword_end_idx = (bid == NUM_QUERY-1) ? query_to_keyword_size : query_to_keyword_idx[bid+1];
	
	// loop though all keywords
	for(int i = keyword_start_idx; i < keyword_end_idx; i++)
	{
		int keyword = query_to_keyword[i];
		
		// doc range indices for the entire block
		int doc_start_idx = keyword_to_doc_idx[keyword] + topK*iteration;
		int doc_end_idx = (keyword == NUM_KEYWORD-1) ? keyword_to_doc_size : keyword_to_doc_idx[keyword+1];
		doc_end_idx = min(topK+doc_start_idx,doc_end_idx);
		
		// number of iteration for the block to process this keyword
		int iter = (doc_end_idx - doc_start_idx)/block_size+ ((doc_end_idx - doc_start_idx)%block_size != 0);

		for(int j = 0; j < iter; j++)
		{
			// doc indices for the thread
			int thread_doc_idx = tid + j * block_size + doc_start_idx;
			if(thread_doc_idx < doc_end_idx)
			{
				int doc = keyword_to_doc[thread_doc_idx];
				query_to_doc[bid*NUM_DOCS+doc]++;
			}
		}
		__syncthreads();
	}
}

// each block takes a query and accumulate query to doc relationship in shared memory and write to global memory if possible
__global__ void simpleBlockMapQueryUsingSharedMem(
	int* query_to_doc, // output
	int* query_to_keyword, int* query_to_keyword_idx,
	int* keyword_to_doc, int* keyword_to_doc_idx,
	int query_to_keyword_size, int keyword_to_doc_size){
	
	int bid = blockIdx.x;
	int tid = threadIdx.x;
	
	__shared__ int docs_array_cache[BLOCK_SIZE]; // for keeping records
	__shared__ unsigned char docs_array_aggre[BLOCK_SIZE]; // for aggregation
	
	docs_array_cache[tid] = -1;
	docs_array_aggre[tid] = 0;
	
	int keyword_start_idx = query_to_keyword_idx[bid];
	int keyword_end_idx = (bid == NUM_QUERY-1) ? query_to_keyword_size : query_to_keyword_idx[bid+1];
	
	__shared__ int inv_list_start[MAX_KEYWORDS_PER_QUERY]; 
	__shared__ int inv_list_end[MAX_KEYWORDS_PER_QUERY]; 
	
	// a master thread to read keyword list information
	__shared__ int iter;
	__shared__ int num_keywords;
	if(tid == 0)
	{
		iter = 0;
		for(int i = keyword_start_idx; i < keyword_end_idx; i++)
		{
			int keyword = query_to_keyword[i];
			int keyword_invlist_start = keyword_to_doc_idx[keyword];
			int keyword_invlist_end = (keyword == NUM_KEYWORD-1) ? keyword_to_doc_size : keyword_to_doc_idx[keyword+1];
			
			inv_list_start[i-keyword_start_idx] = keyword_invlist_start;
			inv_list_end[i-keyword_start_idx] = keyword_invlist_end;
			
			int keyword_inv_list_size = keyword_invlist_end - keyword_invlist_start;
			
			// finding the maximum iteration 
			int temp_iter = keyword_inv_list_size / BLOCK_SIZE + (keyword_inv_list_size % BLOCK_SIZE != 0);
			iter = max(temp_iter,iter);
		}
		num_keywords = keyword_end_idx - keyword_start_idx;
	}
	
	__syncthreads();
	
	for(int i = 0 ; i < iter; i++)
	{
		for(int j = 0; j < num_keywords; j++)
		{
			int thread0pos = i * BLOCK_SIZE;
			int list_size = inv_list_end[j] - inv_list_start[j];
	
			if(thread0pos >= list_size) // if the keyword's inverted list is processed
				continue; 
			
			__syncthreads(); // make sure cache status is consistent
			
			int thread_pos = inv_list_start[j] + thread0pos + tid;
			int doc = thread_pos < inv_list_end[j] ? keyword_to_doc[thread_pos] : -1;
			bool doc_cache_hit = false;
			
			// checking possible aggregation
			if(doc != -1)
			{
				for(int k = 0; k < BLOCK_SIZE; k++)
				{
					if(docs_array_cache[k] == doc)
					{
						doc_cache_hit = true;
						docs_array_aggre[k]++;
						break;
					}					
				}
			}
			
			__syncthreads();
			
			if(!doc_cache_hit && doc != -1) // reload the cache if it is a miss
			{
				if(docs_array_cache[tid] != -1)
				{
					int doc_idx = docs_array_cache[tid];
					query_to_doc[bid*NUM_DOCS+doc_idx] += docs_array_aggre[tid];
				}
				docs_array_cache[tid] = doc; 
				docs_array_aggre[tid] = 1;
			}
		}
	}
	
	__syncthreads();
	
	// clean up the cache
	if(docs_array_cache[tid] != -1)
	{
		int doc_idx = docs_array_cache[tid];
		query_to_doc[bid*NUM_DOCS+doc_idx] += docs_array_aggre[tid];
	}
}

// each thread takes a keyword, using atomic operation as conflict writes exist
__global__ void simpleThreadMapKeyword(
	int* query_to_doc, // output
	int* keyword_to_query, int* keyword_to_query_idx,
	int* keyword_to_doc, int* keyword_to_doc_idx,
	int keyword_to_query_size, int keyword_to_doc_size,
	int topK // number of docs read per inverted list
	){
	
	int tid = threadIdx.x + blockDim.x*blockIdx.x;
	
	// each thread takes a portion of this memory and store keyword to query mapping
	__shared__ int local_keyword_to_query_map[BLOCK_SIZE*MAX_QUERYS_PER_KEYWORD];
	
	int query_start = keyword_to_query_idx[tid];
	int query_end = (tid == NUM_KEYWORD-1) ? keyword_to_query_size : keyword_to_query_idx[tid+1];
	int query_size = query_end - query_start; // number of queries this keyword is associated with
	
	// store keyword to queries mapping to shared memory
	int shared_mem_idx = threadIdx.x * MAX_QUERYS_PER_KEYWORD;
	for(int i = query_start; i < query_end; i++)
		local_keyword_to_query_map[shared_mem_idx+(i-query_start)] = keyword_to_query[i];
	
	int doc_start = keyword_to_doc_idx[tid];
	int doc_end = (tid == NUM_KEYWORD-1) ? keyword_to_doc_size : keyword_to_doc_idx[tid+1];
	doc_end = min(doc_end,(doc_start+topK));
	
	for(int i = doc_start; i < doc_end; i++)
	{
		int doc = keyword_to_doc[i];
		for(int j = 0; j < query_size; j++)
		{
			int query = local_keyword_to_query_map[shared_mem_idx+j];
			atomicAdd(&query_to_doc[query*NUM_DOCS+doc],1);
		}
	}

}

// each block takes a keyword, using atomic operation as conflict writes exist
__global__ void simpleBlockMapKeyword(
	int* query_to_doc, // output
	int* keyword_to_query, int* keyword_to_query_idx,
	int* keyword_to_doc, int* keyword_to_doc_idx,
	int keyword_to_query_size, int keyword_to_doc_size,
	int topK // number of docs read per inverted list
	){
	
	int bid = blockIdx.x;
	int tid = threadIdx.x;
	
	__shared__ int local_keyword_to_query_map[MAX_QUERYS_PER_KEYWORD];
	__shared__ int query_start;
	__shared__ int query_end;
	__shared__ int query_size;
	
	if(tid == 0)
	{
		query_start = keyword_to_query_idx[bid];
		query_end = (bid == NUM_KEYWORD-1) ? keyword_to_query_size : keyword_to_query_idx[bid+1];
		query_size = query_end - query_start; // number of queries this keyword is associated with
		
		for(int i = 0; i < query_size; i++)
			local_keyword_to_query_map[i] = keyword_to_query[i+query_start];
	}
	
	__syncthreads();
	
	int doc_start = keyword_to_doc_idx[bid];
	int doc_end = (bid == NUM_KEYWORD-1) ? keyword_to_doc_size : keyword_to_doc_idx[bid+1];
	doc_end = min(doc_end,(doc_start+topK));
	int doc_size = doc_end - doc_start;
	
	int iter = doc_size / blockDim.x + (doc_size % blockDim.x != 0);
	
	for(int i = 0; i < iter; i++)
	{
		int thread_pos = doc_start + i * blockDim.x + tid;
		if(thread_pos < doc_end)
		{
			int doc = keyword_to_doc[thread_pos];
			for(int j = 0; j < query_size; j++)
			{
				int query = local_keyword_to_query_map[j];
				atomicAdd(&query_to_doc[query*NUM_DOCS+doc],1);
			}
		}
	}
	
}


__global__ void test(){
	int tid = threadIdx.x;
	
	if(tid < 10){
		printf("threadidx %d first\n",tid);
		__syncthreads();
		printf("threadidx %d second\n",tid);
	}
	
}

// for testing different k value
void topKtest(vector<int>& k_value,vector<vector<float> >& elapsed_time){
	cudaEvent_t start, stop;
	cudaEventCreate(&start);
	cudaEventCreate(&stop);	
	
	int mem_transfer_size = (int)NUM_QUERY*NUM_DOCS*sizeof(int);
	int* result_vec = (int*)malloc(mem_transfer_size);
	int* reset_vec = (int*)malloc(mem_transfer_size);
	for(int i = 0; i < NUM_QUERY*NUM_DOCS; i++) reset_vec[i] = 0;
	long long test_count = 0;
	
	for(int i = 0; i < k_value.size(); i++)
	{
		float elapsedTime;
		

		cudaEventRecord(start,0);
		cudaProfilerStart();

		simpleBlockMapQueryMethod<<<NUM_QUERY,BLOCK_SIZE>>>(
			thrust::raw_pointer_cast(d_query_to_doc->data()),
			thrust::raw_pointer_cast(d_query_to_keyword->data()),
			thrust::raw_pointer_cast(d_query_to_keyword_idx->data()),
			thrust::raw_pointer_cast(d_keyword_to_doc->data()),
			thrust::raw_pointer_cast(d_keyword_to_doc_idx->data()),
			d_query_to_keyword_size,d_keyword_to_doc_size,
			k_value[i]);			
		

		cudaEventRecord(stop,0);
		cudaEventSynchronize(stop);
		cudaProfilerStop();
		cudaEventElapsedTime(&elapsedTime, start,stop);
		elapsed_time[i].push_back(elapsedTime);
		
		test_count = 0;
		cudaMemcpy(
			result_vec,thrust::raw_pointer_cast(d_query_to_doc->data()),
			mem_transfer_size,cudaMemcpyDeviceToHost);
		for(int m = 0; m < NUM_QUERY*NUM_DOCS; m++) test_count+= result_vec[m];
		cout << "k value: " << k_value[i] << " with method 1:" << test_count << endl;
		cudaMemcpy(
			thrust::raw_pointer_cast(d_query_to_doc->data()),reset_vec,
			mem_transfer_size,cudaMemcpyHostToDevice);
		
		cudaEventRecord(start,0);
		
		simpleThreadMapKeyword<<<NUM_KEYWORD/BLOCK_SIZE,BLOCK_SIZE>>>(
		thrust::raw_pointer_cast(d_query_to_doc->data()),
		thrust::raw_pointer_cast(d_keyword_to_query->data()),
		thrust::raw_pointer_cast(d_keyword_to_query_idx->data()),
		thrust::raw_pointer_cast(d_keyword_to_doc->data()),
		thrust::raw_pointer_cast(d_keyword_to_doc_idx->data()),
		d_keyword_to_query_size,d_keyword_to_doc_size,
		k_value[i]);
		
		cudaEventRecord(stop,0);
		cudaEventSynchronize(stop);
		cudaEventElapsedTime(&elapsedTime, start,stop);
		elapsed_time[i].push_back(elapsedTime);
		
		test_count = 0;
		cudaMemcpy(
			result_vec,thrust::raw_pointer_cast(d_query_to_doc->data()),
			mem_transfer_size,cudaMemcpyDeviceToHost);
		for(int m = 0; m < NUM_QUERY*NUM_DOCS; m++) test_count+= result_vec[m];
		cout << "k value: " << k_value[i] << " with method 2:" << test_count << endl;
		cudaMemcpy(
			thrust::raw_pointer_cast(d_query_to_doc->data()),reset_vec,
			mem_transfer_size,cudaMemcpyHostToDevice);
		
		cudaEventRecord(start,0);
		
		simpleBlockMapKeyword<<<NUM_KEYWORD,BLOCK_SIZE>>>(
			thrust::raw_pointer_cast(d_query_to_doc->data()),
			thrust::raw_pointer_cast(d_keyword_to_query->data()),
			thrust::raw_pointer_cast(d_keyword_to_query_idx->data()),
			thrust::raw_pointer_cast(d_keyword_to_doc->data()),
			thrust::raw_pointer_cast(d_keyword_to_doc_idx->data()),
			d_keyword_to_query_size,d_keyword_to_doc_size,
			k_value[i]);
				
		cudaEventRecord(stop,0);
		cudaEventSynchronize(stop);
		cudaEventElapsedTime(&elapsedTime, start,stop);
		elapsed_time[i].push_back(elapsedTime);
		
		test_count = 0;
		cudaMemcpy(
			result_vec,thrust::raw_pointer_cast(d_query_to_doc->data()),
			mem_transfer_size,cudaMemcpyDeviceToHost);
		for(int m = 0; m < NUM_QUERY*NUM_DOCS; m++) test_count+= result_vec[m];
		cout << "k value: " << k_value[i] << " with method 3:" << test_count << endl;
		cudaMemcpy(
			thrust::raw_pointer_cast(d_query_to_doc->data()),reset_vec,
			mem_transfer_size,cudaMemcpyHostToDevice);


	}
	
	free(result_vec);
	free(reset_vec);
}

int main(){
	double total_int_computed;
	
//	if(!isMetaDataCorrect("meta_data")){
//		cout << "meta_data has problem" << endl;
//		return 1;
//	}
//	cout << "finish meta data" << endl;
	cout << "enter 1 if want to generate a new instance, otherwise enter 0 for reading instance from file" << endl;
	int geneate_new_inst = 0;
	cin >> geneate_new_inst;

	if(geneate_new_inst) total_int_computed = construct_sample();
	else constructRelationFromFile();

	// double total_int_computed = NUM_QUERY * NUM_DOCS;
	// int* arr1;
	// int* arr2;
	// cudaMalloc( (void**)&arr1, total_int_computed * sizeof(int) );
	// cudaMalloc( (void**)&arr2, total_int_computed * sizeof(int) );
	
	// int mem_transfer_size = (int)NUM_QUERY*NUM_DOCS*sizeof(int);
	
	// int* result_vec1 = (int*)malloc(mem_transfer_size);
	// int* result_vec2 = (int*)malloc(mem_transfer_size);
	// int* reset_vec = (int*)malloc(mem_transfer_size);
	// for(int i = 0; i < NUM_QUERY*NUM_DOCS; i++) reset_vec[i] = 0;
	
	cout << "start kernel code ..... " << endl;
	
	int num_k = 1;
	vector<vector<float> > elapsed_time(num_k);
	vector<int> k_vec(num_k);
	
	for(int i = 0; i < num_k; i++)
		k_vec[i] = 10000;

	topKtest(k_vec,elapsed_time);
	
	for(int i = 0; i < num_k; i++)
	{
		cout << NUM_KEYWORD;
		for(int j = 0; j < elapsed_time[i].size(); j++)
			cout << " " << elapsed_time[i][j] << "ms";
		cout << endl;
	}
		
	
	// test<<<1,20>>>();
	// simpleThreadMapQueryMethod<<<NUM_QUERY/BLOCK_SIZE,BLOCK_SIZE>>>(
		// thrust::raw_pointer_cast(d_query_to_doc->data()),
		// thrust::raw_pointer_cast(d_query_to_keyword->data()),
		// thrust::raw_pointer_cast(d_query_to_keyword_idx->data()),
		// thrust::raw_pointer_cast(d_keyword_to_doc->data()),
		// thrust::raw_pointer_cast(d_keyword_to_doc_idx->data()),
		// d_query_to_keyword_size,d_keyword_to_doc_size
		// );
	// cudaMemcpy(
		// result_vec1,thrust::raw_pointer_cast(d_query_to_doc->data()),
		// mem_transfer_size,cudaMemcpyDeviceToHost);
		
	// cudaMemcpy(
		// thrust::raw_pointer_cast(d_query_to_doc->data()),reset_vec,
		// mem_transfer_size,cudaMemcpyHostToDevice);
	
	// simpleBlockMapQueryUsingSharedMem<<<NUM_QUERY,BLOCK_SIZE>>>(
		// thrust::raw_pointer_cast(d_query_to_doc->data()),
		// thrust::raw_pointer_cast(d_query_to_keyword->data()),
		// thrust::raw_pointer_cast(d_query_to_keyword_idx->data()),
		// thrust::raw_pointer_cast(d_keyword_to_doc->data()),
		// thrust::raw_pointer_cast(d_keyword_to_doc_idx->data()),
		// d_query_to_keyword_size,d_keyword_to_doc_size
		// );	
		
	// cudaMemcpy(
		// result_vec2,thrust::raw_pointer_cast(d_query_to_doc->data()),
		// mem_transfer_size,cudaMemcpyDeviceToHost);
	
	
	// long non_zero_elem1 = 0;
	// long non_zero_elem2 = 0;
	// for(int i = 0; i < NUM_QUERY*NUM_DOCS; i++)
	// {
		// non_zero_elem1 += result_vec1[i];
		// non_zero_elem2 += result_vec2[i];
	// }
	
	// cout << "non_zero_1: " << non_zero_elem1 << endl;
	// cout << "non_zero_2: " << non_zero_elem2 << endl;
	
	// for(int i = 0; i < NUM_QUERY; i++)
	// {	
		// for(int j = 0; j < NUM_DOCS; j++)
			// cout << result_vec1[j*NUM_QUERY+i] << " ";
		// cout << endl;
	// }	
	// cout << "------" << endl;
	// for(int i = 0; i < NUM_QUERY; i++)
	// {	
		// for(int j = 0; j < NUM_DOCS; j++)
			// cout << result_vec2[i*NUM_DOCS+j] << " ";
		// cout << endl;
	// }	
	
	// free(result_vec1);
	// free(result_vec2);
	// free(reset_vec);

	return 0;
}

