/*
 * GPUVec.h
 *
 *  Created on: Jul 25, 2013
 *      Author: jaime
 */

#ifndef GPUVEC_H_
#define GPUVEC_H_

#include "../SATSolver/SolverTypes.cuh"
#include <assert.h>

template<class T>
class GPUVec {

private:
	unsigned int capacity;
	unsigned int size;
	T* elements;

public:

	__host__ __device__ GPUVec(int capacity){
		this->capacity=capacity;
		this->size = 0;

#ifndef __CUDA_ARCH__
		if(capacity > 0)
			cudaMalloc(&(this->elements), capacity*sizeof(T));
#else

		if(capacity > 0)
		{
			this->elements = (T*) malloc(capacity*sizeof(T));
		}
#endif

	}

	/**
	 * To use an already allocated vector to store the elements!
	 */
	__host__ __device__ GPUVec(T* pointer, int capacity, int size)
	{
		this->elements = pointer;
		this->capacity = capacity;
		this->size = size;
	}

	__host__ __device__ virtual ~GPUVec(){
#ifndef __CUDA_ARCH__
		if(capacity > 0)
			cudaFree(elements);
#else
		if(capacity > 0)
			free(elements);
#endif
	}
	__device__ unsigned int get_capacity()
	{
		return capacity;
	}
	__host__ __device__ unsigned int size_of(){
		return size;
	}

	__host__ __device__ unsigned int capacity_of(){
			return capacity;
	}
	__host__ __device__ bool add(const T &c){
		if(size >= capacity)
			return false;

#ifndef __CUDA_ARCH__
			// If we are on the host, we copy to the device (as the clauses are only on the device)
			cudaMemcpy((elements+size), &(c), sizeof(T), cudaMemcpyHostToDevice);
			size++;
#else
			// On the device, we simply store the value.
			elements[size] = c;
			size++;
			//atomicInc(&(size), capacity);
#endif

			return true;
	}

	__host__ __device__ bool remove(int pos){
		if(pos < 0 || pos >= size)
			return false;

#ifndef __CUDA_ARCH__
			printf("This is not implemented yet!\n");
			assert(false);
			size--;
#else
			for(int i = pos; i < size - 1; i++)
			{
				elements[i] = elements[i+1];
			}
			//atomicDec(&size, capacity);
			size--;
#endif
			return true;
	}

	__device__ bool remove_obj(const T &element)
	{
		for(int i = 0; i < size; i++)
		{
			// TODO will this always work?
			if(elements[i] == element)
			{
				remove(i);
				return true;
			}
		}

		return false;
	}

	__host__ __device__ T get(int pos){
#ifdef USE_ASSERTIONS
		assert(pos >= 0 && pos < size);
#endif


#ifndef __CUDA_ARCH__
			T* element = (T*) malloc(sizeof(T));
			cudaMemcpy(element, (elements+pos), sizeof(T), cudaMemcpyDeviceToHost);
			return *element;
#else
			return elements[pos];
#endif
	}

	__host__ __device__ T* get_ptr(int pos){

#ifdef USE_ASSERTIONS
		assert(pos < size);
#endif

#ifndef __CUDA_ARCH__
			T* element = (T*) malloc(sizeof(T));
			cudaMemcpy(element, (elements+pos), sizeof(T), cudaMemcpyDeviceToHost);
			return element;
#else
			return &(elements[pos]);
#endif
	}


	/**
	 * Resets the value of a position that already contained the element.
	 * element: the element to add.
	 * pos: the position
	 * Return: the old value.
	 */
	__host__ __device__ T reset(T element, int pos)
	{
#ifdef USE_ASSERTIONS
		assert(pos < size && pos >= 0);
#endif
		T old = elements[pos];
		elements[pos] = element;

		return old;
	}

	__host__ __device__ void clear()
	{
		size = 0;
	}

	__host__ __device__ bool empty()
	{
		return size == 0;
	}
	__host__ __device__ bool full()
	{
		return size == capacity;
	}

	__device__ bool contains(const T &element)
	{
		for(int i = 0; i < size; i++)
		{
			if(elements[i] == element)
				return true;
		}

		return false;
	}

	/**
	 * Checks whether "element" is in this list between first_position (including) and
	 * last_position (not include). In the end, position contains the position of the
	 * found element (if return == true, else contains -1).
	 * Return true if the element is found between first_position and last_position,
	 * false otherwise.
	 */
	__device__ bool contains_between(const T &element,
			int first_position, int after_position, int & position)
	{

#ifdef USE_ASSERTIONS
		assert(first_position >= 0 && after_position <= size);
#endif

		for(int i = first_position; i < after_position; i++)
		{
			if(elements[i] == element)
			{
				position = i;
				return true;
			}
		}
		position = -1;
		return false;
	}

	/**
	 * Switches the position of two elements
	 */
	__device__ void switch_positions(int first, int second)
	{
#ifdef USE_ASSERTIONS
		assert(first >= 0 && first < size && second >= 0 && second < size);
#endif
		T element = elements[first];
		elements[first] = elements[second];
		elements[second] = element;

	}


	__host__ T* copyToHost()
	{
		T* destination;

		destination = (T*) malloc(sizeof(T)*size);
		check(cudaMemcpy(destination, elements, sizeof(T)*size, cudaMemcpyDeviceToHost), "Copying to host (GPUVec)");

		return destination;

	}

	__host__ __device__ T last()
	{
		return get(size-1);
	}

	/**
	 * Remove the n last elements of this list (or as many as possible, if n > size)
	 */
	__host__ __device__ void remove_n_last(int n)
	{
		if(size >= n)
		{
#ifndef __CUDA_ARCH__
			size -= n;
#else
			//atomicSub(&size, n);
			size -= n;
#endif
		}
		else
		{
			size = 0;
		}
	}

	__host__ __device__ void copy_to(GPUVec<T> vec)
	{
		for(int i = 0; i < size; i++)
		{
			vec.add(elements[i]);
		}
	}


};


#endif /* GPUVEC_H_ */
