#ifndef __THREADING_H_
#define __THREADING_H_

// OPTIONS AVAILABLE:
// * NO_ATOMIC
// * SEMAPHORE_QUEUE (only if NO_ATOMIC is not defined)

#include <pthread.h>
#include <semaphore.h>
#include <exception>
#include <memory.h>
#include <errno.h>
#include <memory>

#if !defined(NO_ATOMIC)

// All operations on the variable are atomic.
// Write operations offer full barrier.
// Consequently, reads cannot be reordered around writes.
template<class T>
class Atomic
{
	// Be carefull of the ABA problem:
	// Processor #1:
	//  loop:
	// 1:  load the value in location f, if it is 0 goto loop
	// 2: print the value in location x
	//
	// Processor #2:
	// 3: store the value 42 into location x
	// 4: store the value 1 into location f

	// Atomic 64 bit store for x86 (http://www.niallryan.com/node/137):
	//
	// void atomicWrite64(void* p, __int64 value)
	// {
	//    __asm
	//    {
	//       mov edi, p
	//       fild qword ptr [value]
	//       fistp qword ptr [edi]
	//    }
	// }
	//
	// This code uses the FPU to write the value atomically. The value
	// is first converted to a floating point number and read into
	// a floating point register. It is then converted back to
	// an integer and stored to memory. Luckily the FPU registers are
	// all 80-bit internally, with exactly 64 bits for the mantissa,
	// which means they have just enough precision to do the conversions
	// with no loss!

	// http://dandelion-patch.mit.edu/afs/sipb/project/gcc-4.0/include/c++/4.4.0/bits/atomic_2.h

private:
	// Remember kids: 'usually' volatile prevents reordering of instructions
	// at the compiler level (not CPU level), and forces loads and stores
	// at the compiler level, rather caching in a register.
	volatile T mValue __attribute__ ((aligned (sizeof(T))));

	inline Atomic &operator =(const Atomic &original) {return *this;}

public:
	inline Atomic(T value = T())
	{
		mValue = value;
	}

	inline Atomic(const Atomic &original)
	{
		mValue = original;
	}

	inline operator T()
	{
		// If you need additional optimization:
		// "According to Chapter 7 of Part 3A - System Programming Guide of Intel's processor manuals,
		// quadword accesses will be carried out atomically if aligned on a 64-bit boundary,
		// on a Pentium or newer, and unaligned (if still within a cache line) on a P6 or newer."
		// -- Mike Dimmick, on StackOverflow.com

#if defined(LOAD32_IS_ATOMIC)
		if(sizeof(T) <= 4) return mValue;
#elif defined(LOAD64_IS_ATOMIC)
		if(sizeof(T) <= 8) return mValue;
#else
		return __sync_xor_and_fetch(&mValue, 0);
#endif
	}

	inline bool operator ==(const T &operand)
	{
		return operator T() == operand;
	}

	inline bool operator !=(const T &operand)
	{
		return operator T() != operand;
	}

	inline T addAndFetch(T operand)
	{
		return __sync_add_and_fetch(&mValue, operand);
	}

	inline T subAndFetch(T operand)
	{
		return __sync_sub_and_fetch(&mValue, operand);
	}

	inline T orAndFetch(T operand)
	{
		return __sync_or_and_fetch(&mValue, operand);
	}

	inline T andAndFetch(T operand)
	{
		return __sync_and_and_fetch(&mValue, operand);
	}

	inline T nandAndFetch(T operand)
	{
		return __sync_nand_and_fetch(&mValue, operand);
	}

	inline T xorAndFetch(T operand)
	{
		return __sync_xor_and_fetch(&mValue, operand);
	}

	inline T fetchAndAdd(T operand)
	{
		return __sync_fetch_and_add(&mValue, operand);
	}

	inline T fetchAndSub(T operand)
	{
		return __sync_fetch_and_sub(&mValue, operand);
	}

	inline T fetchAndOr(T operand)
	{
		return __sync_fetch_and_or(&mValue, operand);
	}

	inline T fetchAndAnd(T operand)
	{
		return __sync_fetch_and_and(&mValue, operand);
	}

	inline T fetchAndNand(T operand)
	{
		return __sync_fetch_and_nand(&mValue, operand);
	}

	inline T fetchAndXor(T operand)
	{
		return __sync_fetch_and_xor(&mValue, operand);
	}

	inline T compareAndSwap(T compareValue, T swapValue)
	{
		return __sync_val_compare_and_swap(&mValue, compareValue, swapValue);
	}

	inline bool compareAndSet(T compareValue, T setValue)
	{
		return __sync_bool_compare_and_swap(&mValue, compareValue, setValue);
	}
};

class Thread
{
private:
	struct Internal // to be passed to static C function
	{
		Thread *mInstance;
		pthread_t mThread;
		Atomic<int> mRunning;

		Internal(Thread *t) : mInstance(t), mRunning(0) {}
	} internal;

	static void *_run(void *argument)
	{
		Internal *i = static_cast<Internal*>(argument);
		i->mRunning.fetchAndOr(2);
		return i->mInstance->run();
	}

	/*inline Thread(pthread_t t)
	{
		mThread = t;
	}*/

	Thread(const Thread &original) : internal(this) {}
	Thread &operator =(const Thread &original) {return *this;}

public:
	inline Thread()
		: internal(this) { }

	virtual ~Thread();

	int start(void *argument = NULL)
	{
		if(internal.mRunning.fetchAndOr(1))
			return EAGAIN;
		__sync_synchronize();
		return pthread_create(&internal.mThread, NULL, _run, static_cast<void*>(&internal));
	}

	inline int cancel()
	{
		// Note: a thread will be cancelled in a cancellation point only
		if(internal.mRunning)
			return pthread_cancel(internal.mThread);
	}

	int join()
	{
		int ret;
		if(internal.mRunning == 0) return 0;
		do
		{
			ret = pthread_join(internal.mThread, NULL); // Note: no concurrent join on same target
			if(ret) return ret;
		} while(internal.mRunning.compareAndSwap(3, 0));
		return 0;
	}

	int join(void *&status)
	{
		int ret;
		if(internal.mRunning == 0) return 0;
		do
		{
			ret = pthread_join(internal.mThread, &status); // Note: no concurrent join on same target
			if(ret) return ret;
		} while(internal.mRunning.compareAndSwap(3, 0));
		return 0;
	}

	/*inline static Thread self()
	{
		return Thread(pthread_self());
	}*/

	inline static void yield()
	{
		pthread_yield();
	}

	inline static int exit(void *status)
	{
		pthread_exit(status);
	}

	virtual void *run() = 0;
};

#endif // if !defined(NO_ATOMIC)

// Locks granted are owned by calling thread.
// Only the owner can release lock.
// Locks are re-entrant.
class Mutex
{
private:
	pthread_mutex_t mMutex;

	inline Mutex(const Mutex &original) {}
	inline Mutex &operator =(const Mutex &original) {return *this;}

	/*static void cleanup_mutex(void *state)
	{
		pthread_mutex_t *mutex = static_cast<pthread_mutex_t*>(state);
		if(mutex) pthread_mutex_unlock(mutex);
	}*/

public:
	inline Mutex()
	{
		pthread_mutex_init(&mMutex, NULL);
	}

	inline ~Mutex()
	{
		pthread_mutex_destroy(&mMutex);
	}

	inline bool lock()
	{
		bool ret = pthread_mutex_lock(&mMutex) == 0;
		//if(ret) pthread_cleanup_push(cleanup_mutex, &mMutex);
		return ret;
	}

	bool lock(int timeout_ms);

	inline bool tryLock()
	{
		bool ret = pthread_mutex_trylock(&mMutex) == 0;
		//if(ret) pthread_cleanup_push(cleanup_mutex, &mMutex);
		return ret;
	}

	inline bool unlock()
	{
		bool ret = pthread_mutex_unlock(&mMutex) == 0;
		//if(ret) pthread_cleanup_pop(1);
		return ret;
	}
};

class LockException : public std::exception
{
private:
	const char *mMessage;

public:
	LockException(const char *message);
	virtual const char *what() const throw();
};

// Acquires a lock on a mutex. The lock is automatically release
// when the object is destructed.
class Lock
{
private:
	Mutex *mMutex;

	inline Lock(const Lock &original) {}
	inline Lock & operator =(const Lock &original) {return *this;}

public:
	inline Lock(Mutex &mutex)
	{
		if(!mutex.lock())
			throw LockException("Could not acquire lock.");
		mMutex = &mutex;
	}

	inline Lock(Mutex &mutex, int timeout_ms)
	{
		if(!mutex.lock(timeout_ms))
			throw LockException("Could not acquire lock.");
		mMutex = &mutex;
	}

	inline ~Lock()
	{
		if(mMutex) { mMutex->unlock(); mMutex = 0; }
	}

	inline void dispose()
	{
		if(mMutex) { mMutex->unlock(); mMutex = 0; }
	}
};

// If multiple threads are waiting concurrently, they are all unblocked at the same time.
class ResetEvent
{
public:
	virtual bool wait() = 0;
	virtual bool wait(int timeout_ms) = 0;
};

#if !defined(NO_ATOMIC)

class AutoResetEvent
{
private:
	pthread_mutex_t mMutex;
	pthread_cond_t mCond;
	Atomic<int> mState, mWaiters;

public:
	AutoResetEvent(bool initialState = false);
	~AutoResetEvent();

	bool set();
	virtual bool wait();
	virtual bool wait(int timeout_ms);
};

class ManualResetEvent
{
private:
	pthread_mutex_t mMutex;
	pthread_cond_t mCond;
	Atomic<int> mState, mWaiters;

public:
	ManualResetEvent(bool initialState = false);
	~ManualResetEvent();

	bool set()
	{
		mState.fetchAndAnd(1);
		if(mWaiters)
			return pthread_cond_broadcast(&mCond) == 0;
		return true;
	}

	inline void reset()
	{
		mState.fetchAndAnd(0);
	}

	virtual bool wait();
	virtual bool wait(int timeout_ms);
};

class Semaphore // Warning: semaphore is rather slow
{
private:
	sem_t mSem;
	Atomic<int> mIsClosed, mWaiting;

public:
	inline Semaphore(bool initialState = false)
		: mIsClosed(0), mWaiting(0)
	{
		sem_init(&mSem, 0, initialState ? 1 : 0);
	}

	inline ~Semaphore()
	{
		sem_destroy(&mSem);
	}

	inline bool isClosed()
	{
		return mIsClosed;
	}

	inline operator int()
	{
		if(mIsClosed) return -1;
		int ret;
		if(sem_getvalue(&mSem, &ret) != 0) return -1;
		return ret;
	}

	inline bool give()
	{
		return !mIsClosed && sem_post(&mSem) == 0;
	}

	bool take();
	bool take(int timeout_ms);

	inline bool tryTake()
	{
		return sem_trywait(&mSem) == 0 && !mIsClosed;
	}

	bool close();
};

#if !defined(SEMAPHORE_QUEUE)

template<class T>
class FixedSizeQueue
{
private:
	int mSize;
	AutoResetEvent mEvent;
	Atomic<char> *mSet;
	T volatile *mItems;
	Atomic<int> mNextRead, mNextWrite, mIsClosed;

	FixedSizeQueue(const FixedSizeQueue &original) {}
	FixedSizeQueue &operator =(const FixedSizeQueue &original) {return *this;}

public:
	FixedSizeQueue(int size)
		: mSize(size), mSet(NULL), mItems(NULL), mNextRead(0), mNextWrite(0), mIsClosed(0)
	{
		mSet = new Atomic<char>[mSize];
		mItems = new T volatile[mSize];
	}
	
	~FixedSizeQueue()
	{
		if(mSet) delete[] mSet;
		if(mItems) delete[] mItems;
	}
	
	inline bool operator !()
	{
		return !(mSet && mItems);
	}
	
	inline bool isClosed()
	{
		return mIsClosed;
	}

	inline bool close()
	{
		mIsClosed.fetchAndOr(1);
		return mEvent.set();
	}

	inline bool isEmpty()
	{
		int rdr = mNextRead;
		return rdr == (int)mNextWrite && (char)mSet[rdr] == 0;
	}

	bool enqueue(const T &item)
	{
		if(mIsClosed) return false;

		int nxt, nxt2;
		do {
	retry:
			nxt = mNextWrite;
			if(mSet[nxt])
			{
				if(nxt == mNextWrite) // queue is full
					return false;
				else
					goto retry;
			}
			nxt2 = nxt + 1;
			if(nxt2 == mSize) nxt2 = 0;
		} while(!mNextWrite.compareAndSet(nxt, nxt2));
		
		mItems[nxt] = item;
		mSet[nxt].fetchAndOr(1);
		
		return mEvent.set();
	}

	bool dequeue(T &item)
	{
		int nxt, nxt2;
		
		for(;;) // search an item
		{
			int wtr = mNextWrite;
			nxt = mNextRead;
			if(mSet[nxt].compareAndSet(1, 0))
				break;
			// either queue is empty, or another consumer just took it
			if(nxt == wtr) { // empty
				if(mIsClosed || !mEvent.wait())
					return false;
			}
		}
		nxt2 = nxt + 1;
		if(nxt2 == mSize) nxt2 = 0;
		item = mItems[nxt];
		mNextRead.compareAndSet(nxt, nxt2);
		
		return true;
	}

	bool tryDequeue(T &item)
	{
		int nxt, nxt2;
		
		for(;;)
		{
			int wtr = mNextWrite;
			nxt = mNextRead;
			if(mSet[nxt].compareAndSet(1, 0))
				break;
			// either queue is empty, or another consumer just took it
			if(nxt == wtr) // empty
				return false;
		}
		
		nxt2 = nxt + 1;
		if(nxt2 == mSize) nxt2 = 0;
		item = mItems[nxt];
		mNextRead.compareAndSet(nxt, nxt2);
		
		return true;
	}
};

#else // if !defined(SEMAPHORE_QUEUE)

template<class T>
class FixedSizeQueue
{
private:
	int mSize;
	Semaphore mSem;
	char volatile *mSet;
	T volatile *mItems;
	Atomic<int> mNextRead, mNextWrite, mIsClosed;

	FixedSizeQueue(const FixedSizeQueue &original) {}
	FixedSizeQueue &operator =(const FixedSizeQueue &original) {return *this;}

public:
	FixedSizeQueue(int size)
		: mSize(size), mSet(NULL), mItems(NULL), mNextRead(0), mNextWrite(0), mIsClosed(0)
	{
		mSet = new char volatile[mSize];
		mItems = new T volatile[mSize];
		if(mSet) memset((void*)mSet, 0, sizeof(char) * mSize);
	}
	
	~FixedSizeQueue()
	{
		if(mSet) delete[] mSet;
		if(mItems) delete[] mItems;
	}
	
	inline bool operator !()
	{
		return !(mSet && mItems);
	}
	
	inline bool isClosed()
	{
		return mIsClosed;
	}

	inline bool close()
	{
		mIsClosed.fetchAndOr(1);
		return mSem.close();
	}

	bool enqueue(const T &item)
	{
		if(mIsClosed) return false;

		int nxt, nxt2;
		do {
	retry:
			nxt = mNextWrite;
			if(mSet[nxt])
			{
				if(nxt == mNextWrite) // queue is full
					return false;
				else
					goto retry;
			}
			nxt2 = nxt + 1;
			if(nxt2 == mSize) nxt2 = 0;
		} while(!mNextWrite.compareAndSet(nxt, nxt2));
		
		mItems[nxt] = item;
		__sync_synchronize();
		mSet[nxt] = 1;
		return mSem.give();
	}
	
	bool dequeue(T &item)
	{
		if(!mSem.take() && !mIsClosed) return false;

		int nxt, nxt2;
		do {
			for(;;)
			{
				nxt = mNextRead;
				if(mSet[nxt] == 0) { // either queue is empty, or another consumer just took it
					if(mIsClosed && nxt == mNextWrite)
						return false;
				} else {
					break;
				}
			}
			nxt2 = nxt + 1;
			if(nxt2 == mSize) nxt2 = 0;
		} while(!mNextRead.compareAndSet(nxt, nxt2));
		
		item = mItems[nxt];
		__sync_synchronize();
		mSet[nxt] = 0;
		return true;
	}
};

#endif // if !defined(SEMAPHORE_QUEUE)

#else // if !defined(NO_ATOMIC)

class AutoResetEvent : public ResetEvent
{
private:
	pthread_mutex_t mMutex;
	pthread_cond_t mCond;
	int volatile mState, mWaiters;

public:
	AutoResetEvent(bool initialState = false);
	~AutoResetEvent();

	bool set();
	virtual bool wait();
	virtual bool wait(int timeout_ms);
};

class ManualResetEvent : public ResetEvent
{
private:
	pthread_mutex_t mMutex;
	pthread_cond_t mCond;
	int volatile mState;

public:
	ManualResetEvent(bool initialState = false);
	~ManualResetEvent();

	bool set();
	bool reset();
	virtual bool wait();
	virtual bool wait(int timeout_ms);
};

#endif // if !defined(NO_ATOMIC)

#endif
