#include <iostream>
#include <vector>

//#include "curand.h"
#include "cnf_data.h"
#include "cuda_utils.h"
//#include "curand_utils.h"
#include "device_utils.h"
#include "stopwatch.h"
#include "utils.h"
#include "solver.h"
#include "init_kernel.h"
#include "restart_kernel.h"
#include "solver_kernel.h"
#include "thrust/sort.h"
#include "RandomLib/Random.hpp"

extern CNFData* __data;

RandomLib::Random* mtPtr;

struct compare_greater_float
{
	__host__ __device__
	bool operator() ( float a, float b )
	{ return a > b; }
} comp;

void init_varOrder( int unitClaNumber, int varNumber, int* varOrder, unsigned int varOrderPitch )
{
	std::vector <int> varInitVec;

	for ( int i = 0; i < varNumber; ++i )
	{ varInitVec.push_back(i); }

	for ( int i = 0; i < __blocksPerGrid; ++i )
	{
		int* tVarOrder = (int*)( (char*)varOrder + i * varOrderPitch );
#ifndef DEBUG_STATIC_VARIABLE_ORDER
		size_t range = varInitVec.size();
#endif
		memcpy( tVarOrder, &varInitVec[0], sizeof(int) * varInitVec.size() );

// Undefine DEBUG_STATIC_VARIABLE_ORDER to use random variable order.
#ifndef DEBUG_STATIC_VARIABLE_ORDER
		for ( int j = 0; j < varNumber; ++j )
		{
			size_t index = mtPtr->Integer( range );
			int temp = 0;

			temp = tVarOrder[index];
			tVarOrder[index] = tVarOrder[range - 1];
			tVarOrder[range - 1] = temp;
			--range;
		}
#endif
	}

	// Swap unit clauses to the front.
	for ( int i = 0; i < unitClaNumber; ++i )
	{
		int v = (__data->_unitClaArr[i]).var();

		for ( int j = 0; j < __blocksPerGrid; ++j )
		{
			int* tVarOrder = (int*)( (char*)varOrder + j * varOrderPitch );

			for ( int k = 0; k < varNumber; ++k )
			{
				if ( tVarOrder[k] == v )
				{
					int temp = tVarOrder[i];

					tVarOrder[i] = tVarOrder[k];
					tVarOrder[k] = temp;
				}
			}
		}
	}

#ifdef ASSERT
	for ( int i = 0; i < __blocksPerGrid; ++i )
	{
		int* tVarOrder = (int*)( (char*)varOrder + i * varOrderPitch );

		for ( int j = 0; j < unitClaNumber; ++j )
		{
			int v = (__data->_unitClaArr[j]).var();
			bool found = false;

			for ( int k = 0; k < unitClaNumber; ++k )
			{
				if ( tVarOrder[k] == v )
				{ found = true; break; }
			}

			if ( found == false )
			{ printf( "Assertion failed: [init_varOrder] unit clause not in the front of variable order.\n" ); }
		}
	}
#endif

#ifdef DEBUG_DPLL_INIT
	std::cout << "varOrder:" << std::endl;

	for ( int i = 0; i < __blocksPerGrid; ++i )
	{
		int* tVarOrder = (int*)( (char*)varOrder + i * varOrderPitch );
		std::cout << "Block #" << i << std::endl;

		for ( int j = 0; j < varNumber; ++j )
		{
			std::cout << tVarOrder[j] << ", ";
			if ( (j % 10) == 9 )
			{ std::cout << std::endl; }
		}
		std::cout << std::endl;
	}
#endif
/*
#ifdef ASSERT
	for ( int i = 0; i < __blocksPerGrid; ++i )
	{
		int* tVarOrder = (int*)( (char*)varOrder + i * varOrderPitch );

		for ( int j = 0; j < varNumber; ++j )
		{
			for ( int k = 0; k < varNumber; ++k )
			{
				if ( j == k )
				{ continue; }

				if ( tVarOrder[j] > varNumber )
				{ std::cout << "Assertion failed: [init_varOrder] varOrder " << tVarOrder[j] << " in block " << i << ", position " << j << " > varNumber = " << varNumber << std::endl; }
				if ( tVarOrder[j] == tVarOrder[k] )
				{ std::cout << "Assertion failed: [init_varOrder] Duplicated varOrder " << tVarOrder[j] << " in block " << i << ", position " << j << std::endl; }
			}
		}
	}
#endif
*/
	return;
}
/*
void init_varOrder( int varNumber, int* varOrder, unsigned int varOrderPitch )
{
	curandGenerator_t rng;
	unsigned int* dev_randomArr = NULL;
	unsigned int* randomArr = NULL;
	size_t n = varOrderPitch * __blocksPerGrid;

	HANDLE_HOST_NULL( randomArr = (unsigned int*)calloc( n, sizeof(unsigned int) ) );
	HANDLE_CUDA_ERROR( cudaMalloc( (void**)&dev_randomArr, sizeof(unsigned int) * n ) );
	HANDLE_CURAND_ERROR( curandCreateGenerator( &rng, CURAND_RNG_PSEUDO_DEFAULT ) );
	HANDLE_CURAND_ERROR( curandSetPseudoRandomGeneratorSeed( rng, 777ULL ) );
	HANDLE_CURAND_ERROR( curandGenerate( rng, dev_randomArr, n ) );
	HANDLE_CUDA_ERROR( cudaMemcpy( randomArr, dev_randomArr, sizeof(unsigned int) * n, cudaMemcpyDeviceToHost ) );

	std::vector <int> varInitVec;

	for ( int i = 0; i < varNumber; ++i )
	{ varInitVec.push_back(i); }

	for ( unsigned int i = 0; i < __blocksPerGrid; ++i )
	{
		unsigned int* tRandomKey = (unsigned int*)( (char*)randomArr + i * varOrderPitch );
		int* tVarOrder = (int*)( (char*)varOrder + i * varOrderPitch );

		memcpy( tVarOrder, &varInitVec[0], sizeof(int) * varInitVec.size() );

		thrust::sort_by_key( tRandomKey, tRandomKey + varNumber, tVarOrder );
	}

#ifdef DEBUG_CUDASAT
	std::cout << "varOrder:" << std::endl;

	for ( int i = 0; i < __blocksPerGrid; ++i )
	{
		int* tVarOrder = (int*)( (char*)varOrder + i * varOrderPitch );
		std::cout << "Block #" << i << std::endl;

		for ( int j = 0; j < varNumber; ++j )
		{
			std::cout << tVarOrder[j] << ", ";
			if ( (j % 10) == 9 )
			{ std::cout << std::endl; }
		}
		std::cout << std::endl;
	}
#endif

#ifdef ASSERT
	std::cout << "Verifying generated random order for duplicated ones...\n" << endl;

	for ( int i = 0; i < __blocksPerGrid; ++i )
	{
		int* tVarOrder = (int*)( (char*)varOrder + i * varOrderPitch );

		for ( int j = 0; j < varNumber; ++j )
		{
			for ( int k = 0; k < varNumber; ++k )
			{
				if ( j == k )
				{ continue; }

				if ( tVarOrder[j] > varNumber )
				{ std::cout << "Assertion failed: [init_varOrder] varOrder " << tVarOrder[j] << " in block " << i << ", position " << j << " > varNumber = " << varNumber << std::endl; }
				if ( tVarOrder[j] == tVarOrder[k] )
				{ std::cout << "Assertion failed: [init_varOrder] Duplicated varOrder " << tVarOrder[j] << " in block " << i << ", position " << j << std::endl; }
			}
		}
	}
#endif

	cudaFree( dev_randomArr );
	HANDLE_HOST_FREE( randomArr );
}
*/
void solver( void )
{
	int litNumber = __data->_litArrIndexSize - 1;
	int varNumber = litNumber >> 1;
	int unitClaNumber = __data->_unitClaArrSize;
	int claNumber = __data->_claArrIndexSize - 1;
	int watchNumber = __clausePtrIndexCapacity << 1;
	size_t estimatedMemory = 0;
	size_t deviceFreeMemory = 0;

	init_device( 2, 0 );
	print_device_limit();
	print_device_memory_usage();
	std::cout << std::endl;

	RandomLib::Random mt19937( __seed );
	mtPtr = &mt19937;
	std::cout << "MT19937 Random Generator: " << mt19937.Name() << " with seed = " << mt19937.SeedString() << std::endl;
#ifdef RANDOM_POLARITY
	std::cout << "CURAND seed = " << __curandSeed << std::endl;
#endif
	std::cout << "threadsPerBlock = " << __threadsPerBlock << ", blocksPerGrid = " << __blocksPerGrid << std::endl;
	std::cout << "Copy and initializing data..." << std::endl;

	cuda_stopwatch_init();
	cuda_stopwatch_start();

	int				initialLearntClauseLimit = claNumber >> 1;
	float			claActivityIncreaseRate = 1.0f / __claActivityDecayRate;

	Lit*			dev_unitClaArr = NULL;
	Lit*			dev_claArr = NULL;
	int*			dev_claArrIndex = NULL;
	Lbool*			dev_varAssignment = NULL;
	Lbool*			varAssignment = NULL;
	int*			dev_varOrder = NULL;
	int*			varOrder = NULL;
	Lit**			dev_watchPointer = NULL;
	Lit*			dev_trail = NULL;
	int*			dev_decisionLevel = NULL;
	bool*			dev_varSeenMarker = NULL;
	Clause**		dev_antecedentPointer = NULL;

	unsigned int*	dev_conflictCount = NULL;
	unsigned int*	conflictCount = NULL;
	unsigned int*	dev_decisionCount = NULL;
	unsigned int*	decisionCount = NULL;
	unsigned int*	dev_propagationCount = NULL;
	unsigned int*	propagationCount = NULL;
	unsigned int*	dev_conflictLitCount = NULL;
	unsigned int*	conflictLitCount = NULL;
	Lbool*			dev_satResult = NULL;
	Lbool*			satResult = NULL;

#ifdef RANDOM_POLARITY
	curandState*	dev_randState = NULL;
#endif
	Lit*			dev_unitClauseLit = NULL;
	int*			dev_unitVar = NULL;
	Clause**		dev_clausePtrIndex = NULL;
	float*			dev_clauseActivityKey = NULL; // for sorting.
	int*			dev_clausePtrIndexSize = NULL; // for copying to host.

	Vec <Lit>**		dev_localUnitClausePtr;
	Vec <int>**		dev_decisionLevelVarOrderPtr; // For back-tracking.
	Vec <int>**		dev_decisionLevelTrailIdPtr; // For back-tracking.
	Vec <Lit>**		dev_propagationQueuePtr;
	Vec <Lit>**		dev_tempQueuePtr;
	Vec <Clause*>**	dev_antecedentQueuePtr;
	Vec <Clause*>**	dev_tempAntecedentPtr;
	Vec <Clause*>**	dev_tempWatchListPtr;
	Vec <Lit>**		dev_learntClauseLitPtr;

	Vec <Clause*>**	dev_watchList = NULL;

	unsigned int	varAssignmentPitch = 0;
	unsigned int	varOrderPitch = 0;
	unsigned int	watchPointerPitch = 0;
	unsigned int	trailPitch = 0;
	unsigned int	decisionLevelPitch = 0;
	unsigned int	varSeenMarkerPitch = 0;
	unsigned int	antecedentPointerPitch = 0;

	HANDLE_CUDA_ERROR( cudaMalloc( (void**)&dev_unitClaArr, sizeof(Lit) * __data->_unitClaArrSize ) );
	HANDLE_CUDA_ERROR( cudaMemcpyAsync( dev_unitClaArr, __data->_unitClaArr,
							sizeof(Lit) * __data->_unitClaArrSize, cudaMemcpyHostToDevice ) );

	HANDLE_CUDA_ERROR( cudaMalloc( (void**)&dev_claArr, sizeof(int) * __data->_claArrSize ) );
	HANDLE_CUDA_ERROR( cudaMemcpyAsync( dev_claArr, __data->_claArr,
							sizeof(Lit) * __data->_claArrSize, cudaMemcpyHostToDevice ) );

	HANDLE_CUDA_ERROR( cudaMalloc( (void**)&dev_claArrIndex, sizeof(int) * __data->_claArrIndexSize ) );
	HANDLE_CUDA_ERROR( cudaMemcpyAsync( dev_claArrIndex, __data->_claArrIndex,
							sizeof(int) * __data->_claArrIndexSize, cudaMemcpyHostToDevice ) );

	HANDLE_CUDA_ERROR( cudaMallocPitch( (void**)&dev_varAssignment, &varAssignmentPitch,
							sizeof(Lbool) * varNumber, __blocksPerGrid ) );
	HANDLE_CUDA_ERROR( cudaMemset2DAsync( dev_varAssignment, varAssignmentPitch, 0,
							sizeof(Lbool) * varNumber, __blocksPerGrid ) );
	// Use page locked memory for double memory copy speed.
	HANDLE_CUDA_ERROR( cudaHostAlloc( (void**)&varAssignment,
							sizeof(Lbool) * varAssignmentPitch * __blocksPerGrid, cudaHostAllocDefault ) );
	memset( varAssignment, 0, sizeof(Lbool) * varAssignmentPitch * __blocksPerGrid );

	HANDLE_CUDA_ERROR( cudaMallocPitch( (void**)&dev_varOrder, &varOrderPitch,
							sizeof(int) * varNumber, __blocksPerGrid ) );
	HANDLE_CUDA_ERROR( cudaHostAlloc( (void**)&varOrder,
							sizeof(int) * varOrderPitch * __blocksPerGrid, cudaHostAllocDefault ) );
	std::cout << "Initializing random variable order..." << std::endl;
	cuda_stopwatch_stop();
	init_varOrder( unitClaNumber, varNumber, varOrder, varOrderPitch );
	std::cout << "Random variable order generation complete." << std::endl;
	cuda_stopwatch_stop();
	HANDLE_CUDA_ERROR( cudaMemcpy2DAsync( dev_varOrder, varOrderPitch, varOrder, varOrderPitch,
							sizeof(int) * varNumber, __blocksPerGrid, cudaMemcpyHostToDevice ) );

	HANDLE_CUDA_ERROR( cudaMallocPitch( (void**)&dev_watchPointer, &watchPointerPitch,
							sizeof(Lit*) * watchNumber, __blocksPerGrid ) );
	HANDLE_CUDA_ERROR( cudaMemset2DAsync( dev_watchPointer, watchPointerPitch, 0,
							sizeof(Lit*) * watchNumber, __blocksPerGrid ) );

	HANDLE_CUDA_ERROR( cudaMallocPitch( (void**)&dev_trail, &trailPitch,
							sizeof(Lit) * varNumber, __blocksPerGrid ) );
	HANDLE_CUDA_ERROR( cudaMemset2DAsync( dev_trail, trailPitch, 0,
							sizeof(Lit) * varNumber, __blocksPerGrid ) );

	HANDLE_CUDA_ERROR( cudaMallocPitch( (void**)&dev_decisionLevel, &decisionLevelPitch,
							sizeof(int) * varNumber, __blocksPerGrid ) );
	HANDLE_CUDA_ERROR( cudaMemset2DAsync( dev_decisionLevel, decisionLevelPitch, -1,
							sizeof(int) * varNumber, __blocksPerGrid ) );

	HANDLE_CUDA_ERROR( cudaMallocPitch( (void**)&dev_varSeenMarker, &varSeenMarkerPitch,
							sizeof(bool) * varNumber, __blocksPerGrid ) );
	HANDLE_CUDA_ERROR( cudaMemset2DAsync( dev_varSeenMarker, varSeenMarkerPitch, 0,
							sizeof(bool) * varNumber, __blocksPerGrid ) );

	HANDLE_CUDA_ERROR( cudaMallocPitch( (void**)&dev_antecedentPointer, &antecedentPointerPitch,
							sizeof(Clause*) * varNumber, __blocksPerGrid ) );
	HANDLE_CUDA_ERROR( cudaMemset2DAsync( dev_antecedentPointer, antecedentPointerPitch, 0,
							sizeof(Clause*) * varNumber, __blocksPerGrid ) );

	HANDLE_CUDA_ERROR( cudaMalloc( (void**)&dev_conflictCount, sizeof(unsigned int) * __blocksPerGrid ) );
	HANDLE_CUDA_ERROR( cudaMemsetAsync( (void*)dev_conflictCount, 0, sizeof(unsigned int) * __blocksPerGrid ) );
	HANDLE_CUDA_ERROR( cudaHostAlloc( (void**)&conflictCount, sizeof(unsigned int) * __blocksPerGrid, cudaHostAllocDefault ) );
	memset( conflictCount, 0, sizeof(unsigned int) * __blocksPerGrid );

	HANDLE_CUDA_ERROR( cudaMalloc( (void**)&dev_decisionCount, sizeof(unsigned int) * __blocksPerGrid ) );
	HANDLE_CUDA_ERROR( cudaMemsetAsync( (void*)dev_decisionCount, 0, sizeof(unsigned int) * __blocksPerGrid ) );
	HANDLE_CUDA_ERROR( cudaHostAlloc( (void**)&decisionCount, sizeof(unsigned int) * __blocksPerGrid, cudaHostAllocDefault ) );
	memset( decisionCount, 0, sizeof(unsigned int) * __blocksPerGrid );

	HANDLE_CUDA_ERROR( cudaMalloc( (void**)&dev_propagationCount, sizeof(unsigned int) * __blocksPerGrid ) );
	HANDLE_CUDA_ERROR( cudaMemsetAsync( (void*)dev_propagationCount, 0, sizeof(unsigned int) * __blocksPerGrid ) );
	HANDLE_CUDA_ERROR( cudaHostAlloc( (void**)&propagationCount, sizeof(unsigned int) * __blocksPerGrid, cudaHostAllocDefault ) );
	memset( propagationCount, 0, sizeof(unsigned int) * __blocksPerGrid );

	HANDLE_CUDA_ERROR( cudaMalloc( (void**)&dev_conflictLitCount, sizeof(unsigned int) * __blocksPerGrid ) );
	HANDLE_CUDA_ERROR( cudaMemsetAsync( (void*)dev_conflictLitCount, 0, sizeof(unsigned int) * __blocksPerGrid ) );
	HANDLE_CUDA_ERROR( cudaHostAlloc( (void**)&conflictLitCount, sizeof(unsigned int) * __blocksPerGrid, cudaHostAllocDefault ) );
	memset( conflictLitCount, 0, sizeof(unsigned int) * __blocksPerGrid );

	HANDLE_CUDA_ERROR( cudaMalloc( (void**)&dev_satResult, sizeof(Lbool) * __blocksPerGrid ) );
	HANDLE_CUDA_ERROR( cudaMemsetAsync( (void*)dev_satResult, 0, sizeof(Lbool) * __blocksPerGrid ) );
	// Use page locked memory for double memory copy speed.
	HANDLE_CUDA_ERROR( cudaHostAlloc( (void**)&satResult, sizeof(Lbool) * __blocksPerGrid, cudaHostAllocDefault ) );
	memset( satResult, 0, sizeof(Lbool) * __blocksPerGrid );

#ifdef RANDOM_POLARITY
	HANDLE_CUDA_ERROR( cudaMalloc( (void**)&dev_randState, sizeof(curandState) * __blocksPerGrid ) );
#endif
	// Allocate memory for clause pointers.
	// TODO: a memory manager for clause data storage by a large clause pool memory.
	// Thus the clause index size is also determined by the free memory available.
	// Some free memory is used by device to handle print buffer and memory management, etc.
	// It's about 100M per 512M total mamory from others' test in nVidia forum.
	//size_t freeDeviceMemory = get_device_memory_free();
	//size_t totalDeviceMemory = get_device_memory_total();
	//size_t clausePoolLimit = (freeDeviceMemory / sizeof(void*)) >> 1;

	// Static size of clause pointer index now.
	HANDLE_CUDA_ERROR( cudaMalloc( (void**)&dev_unitClauseLit, sizeof(Lit) * varNumber ) );
	HANDLE_CUDA_ERROR( cudaMemsetAsync( (void*)dev_unitClauseLit, 0, sizeof(Lit) * varNumber ) );

	HANDLE_CUDA_ERROR( cudaMalloc( (void**)&dev_unitVar, sizeof(int) * varNumber ) );
	HANDLE_CUDA_ERROR( cudaMemsetAsync( (void*)dev_unitVar, 0, sizeof(int) * varNumber ) );

	HANDLE_CUDA_ERROR( cudaMalloc( (void**)&dev_clausePtrIndex, sizeof(Clause*) * __clausePtrIndexCapacity ) );
	HANDLE_CUDA_ERROR( cudaMemsetAsync( (void*)dev_clausePtrIndex, NULL, sizeof(Clause*) * __clausePtrIndexCapacity ) );

	HANDLE_CUDA_ERROR( cudaMalloc( (void**)&dev_clauseActivityKey, sizeof(float) * __clausePtrIndexCapacity ) );
	HANDLE_CUDA_ERROR( cudaMemsetAsync( (void*)dev_clauseActivityKey, 0, sizeof(float) * __clausePtrIndexCapacity ) );

	HANDLE_CUDA_ERROR( cudaMalloc( (void**)&dev_clausePtrIndexSize, sizeof(int) ) );
	HANDLE_CUDA_ERROR( cudaMemsetAsync( (void*)dev_clausePtrIndexSize, 0, sizeof(int) ) );

	HANDLE_CUDA_ERROR( cudaMalloc( (void**)&dev_localUnitClausePtr, sizeof(Vec <Lit>*) * __blocksPerGrid ) );
	HANDLE_CUDA_ERROR( cudaMemsetAsync( (void*)dev_localUnitClausePtr, NULL, sizeof(Vec <Lit>*) * __blocksPerGrid ) );

	HANDLE_CUDA_ERROR( cudaMalloc( (void**)&dev_decisionLevelVarOrderPtr, sizeof(Vec <int>*) * __blocksPerGrid ) );
	HANDLE_CUDA_ERROR( cudaMemsetAsync( (void*)dev_decisionLevelVarOrderPtr, NULL, sizeof(Vec <int>*) * __blocksPerGrid ) );

	HANDLE_CUDA_ERROR( cudaMalloc( (void**)&dev_decisionLevelTrailIdPtr, sizeof(Vec <int>*) * __blocksPerGrid ) );
	HANDLE_CUDA_ERROR( cudaMemsetAsync( (void*)dev_decisionLevelTrailIdPtr, NULL, sizeof(Vec <int>*) * __blocksPerGrid ) );

	HANDLE_CUDA_ERROR( cudaMalloc( (void**)&dev_propagationQueuePtr, sizeof(Vec <Lit>*) * __blocksPerGrid ) );
	HANDLE_CUDA_ERROR( cudaMemsetAsync( (void*)dev_propagationQueuePtr, NULL, sizeof(Vec <Lit>*) * __blocksPerGrid ) );

	HANDLE_CUDA_ERROR( cudaMalloc( (void**)&dev_tempQueuePtr, sizeof(Vec <Lit>*) * __blocksPerGrid ) );
	HANDLE_CUDA_ERROR( cudaMemsetAsync( (void*)dev_tempQueuePtr, NULL, sizeof(Vec <Lit>*) * __blocksPerGrid ) );

	HANDLE_CUDA_ERROR( cudaMalloc( (void**)&dev_antecedentQueuePtr, sizeof(Vec <Clause*>*) * __blocksPerGrid ) );
	HANDLE_CUDA_ERROR( cudaMemsetAsync( (void*)dev_antecedentQueuePtr, NULL, sizeof(Vec <Clause*>*) * __blocksPerGrid ) );

	HANDLE_CUDA_ERROR( cudaMalloc( (void**)&dev_tempAntecedentPtr, sizeof(Vec <Clause*>*) * __blocksPerGrid ) );
	HANDLE_CUDA_ERROR( cudaMemsetAsync( (void*)dev_tempAntecedentPtr, NULL, sizeof(Vec <Clause*>*) * __blocksPerGrid ) );

	HANDLE_CUDA_ERROR( cudaMalloc( (void**)&dev_tempWatchListPtr, sizeof(Vec <Clause*>*) * __blocksPerGrid ) );
	HANDLE_CUDA_ERROR( cudaMemsetAsync( (void*)dev_tempWatchListPtr, NULL, sizeof(Vec <Clause*>*) * __blocksPerGrid ) );

	HANDLE_CUDA_ERROR( cudaMalloc( (void**)&dev_learntClauseLitPtr, sizeof(Vec <Lit>*) * __blocksPerGrid ) );
	HANDLE_CUDA_ERROR( cudaMemsetAsync( (void*)dev_learntClauseLitPtr, NULL, sizeof(Vec <Lit>*) * __blocksPerGrid ) );

	HANDLE_CUDA_ERROR( cudaMalloc( (void**)&dev_watchList, sizeof(Vec <Clause*>*) * __blocksPerGrid ) );
	HANDLE_CUDA_ERROR( cudaMemsetAsync( (void*)dev_watchList, claNumber, sizeof(Vec<Clause*>*) * __blocksPerGrid ) );

	cudaDeviceSynchronize();

	// Load constants into constant memory.
	HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_initialLearntClauseLimit, &initialLearntClauseLimit, sizeof(int) ) );
	HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_claActivityIncreaseRate, &claActivityIncreaseRate, sizeof(float) ) );

	HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_unitClaArrSize, &__data->_unitClaArrSize, sizeof(int) ) );
	HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_claArrSize, &__data->_claArrSize, sizeof(int) ) );
    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_claArrIndexSize, &__data->_claArrIndexSize, sizeof(int) ) );
    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_varNumber, &varNumber, sizeof(int) ) );
    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_litNumber, &litNumber, sizeof(int) ) );
    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_unitClaNumber, &unitClaNumber, sizeof(int) ) );
    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_claNumber, &claNumber, sizeof(int) ) );
    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_watchNumber, &watchNumber, sizeof(int) ) );

    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_unitClaArr, &dev_unitClaArr, sizeof(Lit*) ) );
    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_claArr, &dev_claArr, sizeof(Lit*) ) );
    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_claArrIndex, &dev_claArrIndex, sizeof(int*) ) );
    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_varAssignment, &dev_varAssignment, sizeof(Lbool*) ) );
    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_varOrder, &dev_varOrder, sizeof(int*) ) );
    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_watchPointer, &dev_watchPointer, sizeof(Lit**) ) );
    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_trail, &dev_trail, sizeof(Lit*) ) );
    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_decisionLevel, &dev_decisionLevel, sizeof(int*) ) );
    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_varSeenMarker, &dev_varSeenMarker, sizeof(bool*) ) );
    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_antecedentPointer, &dev_antecedentPointer, sizeof(Clause**) ) );

	HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_conflictCount, &dev_conflictCount, sizeof(unsigned int*) ) );
	HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_decisionCount, &dev_decisionCount, sizeof(unsigned int*) ) );
	HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_propagationCount, &dev_propagationCount, sizeof(unsigned int*) ) );
	HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_conflictLitCount, &dev_conflictLitCount, sizeof(unsigned int*) ) );
    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_satResult, &dev_satResult, sizeof(Lbool*) ) );
#ifdef RANDOM_POLARITY
    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_randState, &dev_randState, sizeof(curandState*) ) );
#endif
	HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_varAssignmentPitch, &varAssignmentPitch, sizeof(unsigned int) ) );
	HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_varOrderPitch, &varOrderPitch, sizeof(unsigned int) ) );
    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_watchPointerPitch, &watchPointerPitch, sizeof(unsigned int) ) );
    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_trailPitch, &trailPitch, sizeof(unsigned int) ) );
    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_decisionLevelPitch, &decisionLevelPitch, sizeof(unsigned int) ) );
    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_varSeenMarkerPitch, &varSeenMarkerPitch, sizeof(unsigned int) ) );
    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_antecedentPointerPitch, &antecedentPointerPitch, sizeof(unsigned int) ) );

	// Clause memory.
    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_unitClauseLit, &dev_unitClauseLit, sizeof(Lit*) ) );
    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_unitVar, &dev_unitVar, sizeof(int*) ) );
    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_clausePtrIndex, &dev_clausePtrIndex, sizeof(Clause**) ) );
    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_clausePtrIndexSize, &dev_clausePtrIndexSize, sizeof(int*) ) );
    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_clauseActivityKey, &dev_clauseActivityKey, sizeof(float*) ) );

    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_localUnitClausePtr, &dev_localUnitClausePtr, sizeof(Vec <Lit>**) ) );
    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_decisionLevelVarOrderPtr, &dev_decisionLevelVarOrderPtr, sizeof(Vec <int>**) ) );
    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_decisionLevelTrailIdPtr, &dev_decisionLevelTrailIdPtr, sizeof(Vec <int>**) ) );
    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_propagationQueuePtr, &dev_propagationQueuePtr, sizeof(Vec <Lit>**) ) );
    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_tempQueuePtr, &dev_tempQueuePtr, sizeof(Vec <Lit>**) ) );
    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_antecedentQueuePtr, &dev_antecedentQueuePtr, sizeof(Vec <Clause*>**) ) );
    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_tempAntecedentPtr, &dev_tempAntecedentPtr, sizeof(Vec <Clause*>**) ) );
    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_tempWatchListPtr, &dev_tempWatchListPtr, sizeof(Vec <Clause*>**) ) );
    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_learntClauseLitPtr, &dev_learntClauseLitPtr, sizeof(Vec <Lit>**) ) );

    HANDLE_CUDA_ERROR( cudaMemcpyToSymbol( __dev_watchList, &dev_watchList, sizeof(Vec <Clause*>**) ) );

	// Use 16k shared memory, 48k L1 cache configuration.
#ifdef RANDOM_POLARITY
	HANDLE_CUDA_ERROR( cudaFuncSetCacheConfig( kernel_init_rand, cudaFuncCachePreferL1 ) );
#endif
	HANDLE_CUDA_ERROR( cudaFuncSetCacheConfig( kernel_global_init_dpll_sat, cudaFuncCachePreferL1 ) );
	HANDLE_CUDA_ERROR( cudaFuncSetCacheConfig( kernel_block_init_dpll_sat, cudaFuncCachePreferL1 ) );
	HANDLE_CUDA_ERROR( cudaFuncSetCacheConfig( kernel_reduce_learnt_clause, cudaFuncCachePreferL1 ) );
	HANDLE_CUDA_ERROR( cudaFuncSetCacheConfig( kernel_restart_global_settings, cudaFuncCachePreferL1 ) );
	HANDLE_CUDA_ERROR( cudaFuncSetCacheConfig( kernel_restart_block_settings, cudaFuncCachePreferL1 ) );
	HANDLE_CUDA_ERROR( cudaFuncSetCacheConfig( kernel_dpll_sat, cudaFuncCachePreferL1 ) );

	cudaDeviceSynchronize();
	cuda_stopwatch_stop();

	print_device_memory_usage();

	set_device_stack_size_limit( __deviceStackSizeLimit );

	deviceFreeMemory = get_device_memory_free();
	set_device_heap_size_limit( (size_t)((float)deviceFreeMemory * __deviceHeapSizeFraction) );

	estimatedMemory += __clausePtrIndexCapacity * 80;
	estimatedMemory += (litNumber * (20 + claNumber)) * 4 * __blocksPerGrid;
	estimatedMemory += ((varNumber * 8) + (claNumber) + (20 * 9)) * 4 * __blocksPerGrid;

	std::cout << "Estimated maximal memory need for kernel = " << estimatedMemory << " bytes / " << (estimatedMemory / (1024*1024)) << " MB." << std::endl;
	if ( estimatedMemory > deviceFreeMemory )
	{ std::cout << "Possible insufficient device memory." << std::endl; }

	dim3 dimGridOne( 1, 1, 1 );
	dim3 dimGrid( __blocksPerGrid, 1, 1 );
	dim3 dimBlockOne( 1, 1, 1 );
	dim3 dimBlock( __threadsPerBlock, 1, 1 );
	dim3 dimBlock2( __threadsPerBlock2, 1, 1 );

	float time1 = 0.0f;
	float time2 = 0.0f;
	float time3 = 0.0f;
	float initTime = 0.0f;
	float procTime = 0.0f;
	float solverTime = 0.0f;
	unsigned int restart = 0;

	printf( "Initilizing CUDA DPLL SAT solver...\n" );

#ifdef RANDOM_POLARITY
	kernel_init_rand <<< dimGridOne, dimGrid >>> ();
	cudaDeviceSynchronize();
#endif
	kernel_global_init_dpll_sat <<< dimGridOne, dimBlock2 >>> ();
	cudaDeviceSynchronize();

	kernel_block_init_dpll_sat <<< dimGrid, dimBlock >>> ();
	cudaDeviceSynchronize();

	print_device_memory_usage();

	time1 = cuda_stopwatch_stop();
	initTime = time1;
	printf( "Initialization Time = %.3f ms.\n", initTime );

	printf( "Starting CUDA DPLL SAT solver...\n" );

	for ( restart = 0; restart < __maxRestart; ++restart )
	{
		if ( restart > 0 )
		{
			printf( "Restart #%d...\n", restart );

			int clausePtrIndexSizePtr [1];
			HANDLE_CUDA_ERROR( cudaMemcpy( (void*)clausePtrIndexSizePtr, (const void*)dev_clausePtrIndexSize, sizeof(int), cudaMemcpyDeviceToHost ) );
#ifdef DEBUG_CRITICAL
			std::cout << "Host solver(): *clausePtrIndexSizePtr = " << (*clausePtrIndexSizePtr) << ", claNumber = " << claNumber << std::endl; 
#endif
			if ( (*clausePtrIndexSizePtr) > claNumber ) {
				printf( "Sorting...\n", restart );
				// Use thrust sorting algorithm. It's only available on host.
				// But thrust sorting may require additional card memory.
				thrust::device_ptr <float> thrustKeyPtr( dev_clauseActivityKey );
				// thrust has compiler errors with pointer array, thus I have to cast the type to unsigned int here.
				thrust::device_ptr <unsigned int> thrustValuePtr( (unsigned int*)dev_clausePtrIndex );
#ifdef DEBUG_CUDASAT
				std::cout << "Raw activity:" << std::endl;
				for ( int i = claNumber; i < (*clausePtrIndexSizePtr); ++i )
				{
					std::cout << "["<< i << "] = " << thrustKeyPtr[i] << " / " << thrustValuePtr[i] << ", ";
					if ( (i % 5) == 0 )
					{ printf("\n"); }
				}
				std::cout << std::endl;
#endif
				thrust::sort_by_key( (thrustKeyPtr + claNumber), (thrustKeyPtr + (size_t)(*clausePtrIndexSizePtr)), (thrustValuePtr + claNumber), comp );
#ifdef DEBUG_CUDASAT
				std::cout << "Sorted activity:" << std::endl;
				for ( int i = claNumber; i < (*clausePtrIndexSizePtr); ++i )
				{
					std::cout << "["<< i << "] = " << thrustKeyPtr[i] << " / " << thrustValuePtr[i] << ", ";
					if ( (i % 5) == 0 )
					{ printf("\n"); }
				}
				std::cout << std::endl;
#endif
				cudaDeviceSynchronize();
				//time3 = cuda_stopwatch_stop();
			}
			printf( "kernel_restart_global_settings()...\n", restart );

			printf( "Removing duplicated clauses...\n", restart );

			kernel_reduce_learnt_clause <<< dimGridOne, dimBlock2 >>> ();
			cudaDeviceSynchronize();
			time3 = cuda_stopwatch_stop();
			procTime += (time3 - time2);
			printf( "Processing Time = %.3f / %.3f sec.\n", ((time3 - time2) / 1000), (procTime / 1000) );

			kernel_restart_global_settings <<< dimGridOne, dimBlock2 >>> ();
			cudaDeviceSynchronize();
			cuda_stopwatch_stop();

			printf( "kernel_restart_block_settings()...\n");

			kernel_restart_block_settings <<< dimGrid, dimBlock >>> ();
			cudaDeviceSynchronize();
			time1 = cuda_stopwatch_stop();
		}
		
		printf( "kernel_dpll_sat()...\n");

		kernel_dpll_sat <<< dimGrid, dimBlock >>> ();
		cudaDeviceSynchronize();
		time2 = cuda_stopwatch_stop();

		print_device_memory_usage();
		solverTime += (time2 - time1);
		printf( "Solver Time = %.3f / %.3f sec.\n", ((time2 - time1) / 1000), (solverTime / 1000) );

		HANDLE_CUDA_ERROR( cudaMemcpy( (void*)satResult, (const void*)dev_satResult, sizeof(Lbool) * __blocksPerGrid, cudaMemcpyDeviceToHost ) );

#ifdef DEBUG_ALL_SAT
		bool stop = true;
#else
		bool stop = false;
#endif
		for ( unsigned int i = 0; i < __blocksPerGrid; ++i )
		{
#ifdef DEBUG_ALL_SAT
			if ( satResult[i] == __Lbool_Undef )
			{ stop = false; break; }
#else
			if ( satResult[i] != __Lbool_Undef )
			{ stop = true; break; }
#endif
		}

		if ( stop == true )
		{ break; }
	}

	HANDLE_CUDA_ERROR( cudaMemcpy2D( (void*)varAssignment, varAssignmentPitch, (const void*)dev_varAssignment, varAssignmentPitch,
		varAssignmentPitch, __blocksPerGrid, cudaMemcpyDeviceToHost ) );

	HANDLE_CUDA_ERROR( cudaMemcpy( (void*)conflictCount, (const void*)dev_conflictCount, sizeof(unsigned int) * __blocksPerGrid, cudaMemcpyDeviceToHost ) );
	HANDLE_CUDA_ERROR( cudaMemcpy( (void*)decisionCount, (const void*)dev_decisionCount, sizeof(unsigned int) * __blocksPerGrid, cudaMemcpyDeviceToHost ) );
	HANDLE_CUDA_ERROR( cudaMemcpy( (void*)propagationCount, (const void*)dev_propagationCount, sizeof(unsigned int) * __blocksPerGrid, cudaMemcpyDeviceToHost ) );
	HANDLE_CUDA_ERROR( cudaMemcpy( (void*)conflictLitCount, (const void*)dev_conflictLitCount, sizeof(unsigned int) * __blocksPerGrid, cudaMemcpyDeviceToHost ) );

	cudaDeviceSynchronize();

	unsigned int conflictTotal = 0;
	unsigned int decisionTotal = 0;
	unsigned int propagationTotal = 0;
	unsigned int conflictLitTotal = 0;

	for ( unsigned int i = 0; i < __blocksPerGrid; ++i )
	{
		conflictTotal += conflictCount[i];
		decisionTotal += decisionCount[i];
		propagationTotal += propagationCount[i];
		conflictLitTotal += conflictLitCount[i];
	}

	std::cout << "Restarts            = " << restart << std::endl;
	std::cout << "Conflicts           = " << conflictTotal << " [average: " << (conflictTotal / __blocksPerGrid ) << "] (" << (unsigned int)((float)(conflictTotal * 1000) / solverTime) << " /sec)" << std::endl;
	std::cout << "Decisions           = " << decisionTotal << " [average: " << (decisionTotal / __blocksPerGrid ) << "] (" << (unsigned int)((float)(decisionTotal * 1000) / solverTime) << " /sec)" << std::endl;
	std::cout << "Propagations        = " << propagationTotal << " [average: " << (propagationTotal / __blocksPerGrid ) << "] (" << (unsigned int)((float)(propagationTotal * 1000) / solverTime) << " /sec)" << std::endl;
	std::cout << "Conflict literals   = " << conflictLitTotal << " [average: " << (conflictLitTotal / __blocksPerGrid ) << "]" << std::endl;
	std::cout << "Solver time         = " << (solverTime / 1000) << " sec." << std::endl;
	std::cout << "Processing time     = " << (procTime / 1000) << " sec." << std::endl;
	std::cout << "Initialization time = " << (initTime / 1000) << " sec." << std::endl;

	std::vector <unsigned int> satBlockId;
	std::vector <unsigned int> unsatBlockId;
	std::vector <unsigned int> undefBlockId;

	unsigned int blockFailed = 0;

	for ( unsigned int i = 0; i < __blocksPerGrid; ++i )
	{
		if ( satResult[i] == __Lbool_Undef )
		{ break; }

		Lbool* tVarAssignment = (Lbool*)((char*)varAssignment + (varAssignmentPitch * i));
		Lbool blockSat = __Lbool_True;

		for ( int j = 0; j < varNumber; ++j )
		{
			if ( tVarAssignment[j] == __Lbool_Undef )
			{ break; }
		}

		for ( int j = 0; j < claNumber; ++j )
		{
			unsigned int startIndex = __data->_claArrIndex[j];
			unsigned int stopIndex = __data->_claArrIndex[j+1];
			bool sat = false;

			for ( unsigned int k = startIndex; k < stopIndex; ++k )
			{
				bool polarity = (__data->_claArr[k]).sign() ? false : true;
				int varIndex = (__data->_claArr[k]).var();
				bool varAss = (tVarAssignment[varIndex] == __Lbool_True) ? true : false;

				if ( (varAss != polarity) == false )
				{
					sat = true;
					break;
				}
			}

			if ( sat == false )
			{
				blockSat = __Lbool_False;
				break;
			}
		}

		if ( satResult[i] != blockSat )
		{
			printf ( "Instance #%d is inconsistant. satResult[%d] = %d, blockSat = %d.\n", i, i, satResult[i].convert_to_int(), blockSat.convert_to_int() );
			satResult[i] = blockSat;
			++blockFailed;
		}
	}

	for ( unsigned int i = 0; i < __blocksPerGrid; ++i )
	{
		__data->_result.push_back( satResult[i] );

		if ( satResult[i] == __Lbool_True )
		{ satBlockId.push_back(i); }
		else if ( satResult[i] == __Lbool_False )
		{ unsatBlockId.push_back(i); }
		else
		{ undefBlockId.push_back(i); }
	}

	std::cout << std::endl;
	std::cout << "Total   instances = " << __blocksPerGrid << std::endl;
	std::cout << "SAT     instances = " << satBlockId.size() << std::endl;
	std::cout << "UNSAT   instances = " << unsatBlockId.size() << std::endl;
	std::cout << "Unknown instances = " << undefBlockId.size() << std::endl;
	std::cout << std::endl;

	if ( satBlockId.empty() == false )
	{
		std::cout << "SAT instances List:" << std::endl;

		unsigned int i = 0;
		std::vector <unsigned int>::iterator it;

		for ( it = satBlockId.begin(); it != satBlockId.end(); ++it )
		{
			Lbool* tVarAss = (Lbool*)((char*)varAssignment + (varAssignmentPitch * (*it)));

			std::vector < Lbool > varAss ( tVarAss, tVarAss + varNumber );

			__data->_output.push_back( varAss );

			printf( "%5d ", (*it) );
			++i;
			if ( (unsigned int)(i % 10) == 0 )
			{ printf("\n"); }
		}
	}
	std::cout << std::endl;

	if ( unsatBlockId.empty() == false )
	{
		std::cout << "UNSAT instances List:" << std::endl;

		unsigned int i = 0;
		std::vector <unsigned int>::iterator it;

		for ( it = unsatBlockId.begin(); it != unsatBlockId.end(); ++it )
		{
			printf( "%5d ", (*it) );
			++i;
			if ( (unsigned int)(i % 10) == 0 )
			{ printf("\n"); }
		}
	}
	std::cout << std::endl;

	printf( "Total inconsistant instance(s): %d.\n", blockFailed );

	cuda_stopwatch_stop();

	HANDLE_CUDA_ERROR( cudaFree( dev_unitClaArr ) );
	HANDLE_CUDA_ERROR( cudaFree( dev_claArr ) );
	HANDLE_CUDA_ERROR( cudaFree( dev_claArrIndex ) );
	HANDLE_CUDA_ERROR( cudaFree( dev_varAssignment ) );
	HANDLE_CUDA_ERROR( cudaFree( dev_varOrder ) );
	HANDLE_CUDA_ERROR( cudaFree( dev_watchPointer ) );
	HANDLE_CUDA_ERROR( cudaFree( dev_trail ) );
	HANDLE_CUDA_ERROR( cudaFree( dev_decisionLevel ) );
	HANDLE_CUDA_ERROR( cudaFree( dev_varSeenMarker ) );
	HANDLE_CUDA_ERROR( cudaFree( dev_antecedentPointer ) );

	HANDLE_CUDA_ERROR( cudaFree( dev_conflictCount ) );
	HANDLE_CUDA_ERROR( cudaFree( dev_decisionCount ) );
	HANDLE_CUDA_ERROR( cudaFree( dev_propagationCount ) );
	HANDLE_CUDA_ERROR( cudaFree( dev_conflictLitCount ) );
	HANDLE_CUDA_ERROR( cudaFree( dev_satResult ) );

#ifdef RANDOM_POLARITY
	HANDLE_CUDA_ERROR( cudaFree( dev_randState ) );
#endif
	HANDLE_CUDA_ERROR( cudaFree( dev_unitClauseLit ) );
	HANDLE_CUDA_ERROR( cudaFree( dev_unitVar ) );
	HANDLE_CUDA_ERROR( cudaFree( dev_clausePtrIndex ) );
	HANDLE_CUDA_ERROR( cudaFree( dev_clausePtrIndexSize ) );
	HANDLE_CUDA_ERROR( cudaFree( dev_clauseActivityKey ) );

	HANDLE_CUDA_ERROR( cudaFree( dev_localUnitClausePtr ) );
	HANDLE_CUDA_ERROR( cudaFree( dev_decisionLevelVarOrderPtr ) );
	HANDLE_CUDA_ERROR( cudaFree( dev_decisionLevelTrailIdPtr ) );
	HANDLE_CUDA_ERROR( cudaFree( dev_propagationQueuePtr ) );
	HANDLE_CUDA_ERROR( cudaFree( dev_tempQueuePtr ) );
	HANDLE_CUDA_ERROR( cudaFree( dev_antecedentQueuePtr ) );
	HANDLE_CUDA_ERROR( cudaFree( dev_tempAntecedentPtr ) );
	HANDLE_CUDA_ERROR( cudaFree( dev_tempWatchListPtr ) );
	HANDLE_CUDA_ERROR( cudaFree( dev_learntClauseLitPtr ) );

	HANDLE_CUDA_ERROR( cudaFree( dev_watchList ) );

	HANDLE_CUDA_ERROR( cudaFreeHost( varAssignment ) );
	HANDLE_CUDA_ERROR( cudaFreeHost( varOrder ) );
	HANDLE_CUDA_ERROR( cudaFreeHost( satResult ) );
}
