#include "DataToDevice.cuh"

/*
DataToDevice::DataToDevice(FormulaData data, int number_of_jobs, int number_of_blocks,
			int number_of_threads, int max_implication_per_var, GPUVec<Var> dead_vars)
{
}
*/
DataToDevice::DataToDevice(CUDAClauseVec clauses_database, int number_of_variables,
		int number_of_clauses,
		int number_of_jobs, int number_of_blocks, int number_of_threads,
		int max_implication_per_var,
		GPUVec<Var> dead_vars) :
clauses_db(clauses_database),
#ifdef ENABLE_STATISTICS
statistics(number_of_blocks, number_of_threads),
#endif
dead_vars(dead_vars),
results(number_of_variables, true),
queue(number_of_jobs),
nodes_repository(MAX_NUMBER_OF_NODES)
//,watched_clauses_per_thread(number_of_blocks*number_of_threads)

#ifdef ASSUMPTIONS_USE_DYNAMICALLY_ALLOCATED_VECTOR
,all_assumptions_parallel(number_of_threads*number_of_blocks),
assumptions_sequential(0)
#endif


{
	this->number_of_variables = number_of_variables;
	this->number_of_blocks = number_of_blocks;
	this->number_of_thread = number_of_threads;
	this->max_implication_per_var = max_implication_per_var;


	/*
	WatchedClause * all_watched_clauses;

	check(cudaMalloc(&all_watched_clauses, sizeof(WatchedClause)*
			number_of_blocks*number_of_threads*number_of_clauses));

	for(int i = 0; i < number_of_blocks*number_of_threads; i++)
	{

		GPUVec <WatchedClause> vec(all_watched_clauses,
				number_of_clauses, 0);

		watched_clauses_per_thread.add(vec);
		all_watched_clauses+=number_of_clauses;
	}

	*/

}

void DataToDevice::prepare_sequencial()
{

}

void DataToDevice::prepare_parallel(JobChooser & chooser
#ifdef ASSUMPTIONS_USE_DYNAMICALLY_ALLOCATED_VECTOR
		, GPUVec<Lit> & assumptions
#endif
		)
{
	if(!chooser.is_evaluated())
		chooser.evaluate();

	chooser.getJobs(queue);
	queue.close();

#ifdef ASSUMPTIONS_USE_DYNAMICALLY_ALLOCATED_VECTOR

	int largest_job = queue.largest_job_size();

	for(int i = 0; i < number_of_thread*number_of_blocks; i++)
	{
		GPUVec<Lit>* assump = new GPUVec<Lit>(assumptions.size_of()+largest_job);
		all_assumptions_parallel.add(*assump);
	}
#endif


	int init_value = 0;
	check(cudaMalloc(&found_answer, sizeof(unsigned int)), "Allocating data to send to SAT Solver.");
	check(cudaMemcpy(found_answer, &init_value, sizeof(unsigned int), cudaMemcpyHostToDevice), "Copying data to send to SAT Solver.");

}

#ifdef ASSUMPTIONS_USE_DYNAMICALLY_ALLOCATED_VECTOR
__host__ __device__ GPUVec< GPUVec<Lit> > DataToDevice::get_all_assumptions_parallel()
{
	return all_assumptions_parallel;
}
__device__ GPUVec<Lit> get_assumptions_sequential()
{
	return assumptions_sequential;
}
#endif

__host__ __device__ JobsQueue DataToDevice::get_jobs_queue()
{
	return queue;
}
__host__ __device__ CUDAClauseVec DataToDevice::get_clauses_db()
{
	return clauses_db;
}
__host__ __device__ int DataToDevice::get_number_of_variables()
{
	return number_of_variables;
}

__host__ __device__ int DataToDevice::get_max_implication_per_var()
{
	return max_implication_per_var;
}

__host__ __device__ GPUVec<Var> DataToDevice::get_dead_vars()
{
	return dead_vars;
}

#ifdef ENABLE_STATISTICS
__host__ __device__ RuntimeStatistics DataToDevice::get_statistics()
{
	return statistics;
}

__host__ __device__ RuntimeStatistics * DataToDevice::get_statistics_ptr()
{
	return &statistics;
}

#endif

__host__ __device__ unsigned int * DataToDevice::get_found_answer_ptr()
{
	return found_answer;
}

__host__ __device__ Results DataToDevice::get_results()
{
	return results;
}

__host__ __device__ Results* DataToDevice::get_results_ptr()
{
	return &results;
}

__host__ __device__ NodesRepository<GPULinkedList<WatchedClause*>::Node>*
DataToDevice::get_nodes_repository_ptr()
{
	return &nodes_repository;
}

__host__ __device__ CUDAClauseVec * DataToDevice::get_clauses_db_ptr()
{
	return &clauses_db;
}

/*
__device__ GPUVec <WatchedClause> DataToDevice::get_watched_clauses(int thread_block_index)
{
	return watched_clauses_per_thread.get(thread_block_index);
}
*/
