//
//	(c) 2009 Andrew Kalmacky
//

#ifndef _AK_ATOMIC_H_
#define _AK_ATOMIC_H_

//
// TODO: relax memory order rules on most of operations
// TODO: support other platforms
//

#define WIN32_LEAN_AND_MEAN
#include <windows.h>

namespace std
{
	inline void relax_cpu()
	{
		__asm rep nop;
	}

	namespace this_thread
	{
		inline void yield() throw()
		{
			Sleep(0);
		}
	}

	enum memory_order
	{ 
		memory_order_relaxed,
		memory_order_acquire,
		memory_order_consume = memory_order_acquire,
		memory_order_release, 
		memory_order_acq_rel,
		memory_order_seq_cst = memory_order_acq_rel,
	};

	template<typename T> struct atomic_ptr_step
	{
		enum { value = 1 };
	};
	template<typename T> struct atomic_ptr_step<T*>
	{
		enum { value = sizeof T };
	};
	template<> struct atomic_ptr_step<void*>
	{
	};

	template<typename T> struct atomic_ptr_traits
	{
		typedef T ptr_t;
		typedef T ref_t;
	};
	template<typename T> struct atomic_ptr_traits<T*>
	{
		typedef T* ptr_t;
		typedef T& ref_t;
	};
	template<> struct atomic_ptr_traits<void*>
	{
		typedef void* ptr_t;
		typedef void* ref_t;
	};

	template<typename T> struct atomic
	{
		T val;
		typedef LONG subst_t;

		static subst_t subst(T v) { return subst_t(v); }
		subst_t* subst_this() { return reinterpret_cast<subst_t*>(&val); }
		static T unsubst(subst_t v) { return T(v); }

		atomic()
			: val()
		{
		}
		~atomic()
		{
		}
		atomic(const atomic<T>& s)
			: val(s.val)
		{
		}
		atomic(const T& v)
			: val(v)
		{
		}
		atomic& operator= (const atomic<T>& s)
		{
			store(s.load(memory_order_seq_cst), memory_order_seq_cst);
			return *this;
		}

		//
		// ----- load & store -------
		//
		operator T () const { return load(); }
		atomic<T>& operator= (const T& v)
		{ 
			store(v, memory_order_acq_rel); 
			return *this; 
		}
		void store(const T& v, memory_order mo = memory_order_release) throw()
		{
			if (mo == memory_order_seq_cst)
				InterlockedExchange(subst_this(), subst(v));
			else
				val = v;
		}
		T load(memory_order mo = memory_order_acquire) const
		{
			return val;
		}

		T exchange(const T& v, memory_order mo = memory_order_acq_rel) throw()
		{
			return unsubst(InterlockedExchange(subst_this(), subst(v)));
		}

		bool compare_exchange_strong(T& expected_and_result, const T& new_val, memory_order order = memory_order_seq_cst) throw()
		{
			if (order == memory_order_acq_rel)
				return compare_exchange_strong(expected_and_result, new_val, order, memory_order_acquire);
			else if (order == memory_order_release)
				return compare_exchange_strong(expected_and_result, new_val, order, memory_order_relaxed);
			else 
				return compare_exchange_strong(expected_and_result, new_val, order, order);
		}
		bool compare_exchange_strong(T& expected_and_result, const T& new_val, memory_order success_order, memory_order failure_order) throw()
		{
			subst_t old_val;
			if (success_order == memory_order_acquire)
				old_val = InterlockedCompareExchangeAcquire(subst_this(), subst(new_val), subst(expected_and_result));
			else if (success_order == memory_order_release)
				old_val = InterlockedCompareExchangeRelease(subst_this(), subst(new_val), subst(expected_and_result));
			else
				old_val = InterlockedCompareExchange(subst_this(), subst(new_val), subst(expected_and_result));
			if (old_val == subst(expected_and_result))
				return true;
			expected_and_result = unsubst(old_val);
			return false;
		}
		bool compare_exchange_weak(T& expected_and_result, const T& new_val, memory_order order = memory_order_seq_cst) throw()
		{
			return compare_exchange_strong(expected_and_result, new_val, order);
		}
		bool compare_exchange_weak(T& expected_and_result, const T& new_val, memory_order success_order, memory_order failure_order = memory_order_seq_cst) throw()
		{
			return compare_exchange_strong(expected_and_result, new_val, success_order, failure_order);
		}

		//
		// ----- x_op -------
		//
		T operator+= (int d) { return fetch_add(d) + d; }
		T operator-= (int d) { return fetch_sub(d) - d;	}
		T operator&= (int d) { return fetch_and(d) & d;	}
		T operator|= (int d) { return fetch_or(d) | d;	}
		T operator^= (int d) { return fetch_xor(d) ^ d;	}

		T fetch_add(int t, memory_order order = memory_order_seq_cst) throw()
		{
			return unsubst(InterlockedExchangeAdd(subst_this(), static_cast<subst_t>(t) * atomic_ptr_step<T>::value));
		}
		T fetch_sub(int t, memory_order order = memory_order_seq_cst) throw()
		{
			return fetch_add(-t, order);
		}
		T fetch_or(const T& t, memory_order order = memory_order_seq_cst) throw()
		{
			T old = val;
			while (!compare_exchange_strong(old, old | t, order))
				ak::relax_cpu();
			return old;
		}
		T fetch_and(const T& t, memory_order order = memory_order_seq_cst) throw()
		{
			T old = val;
			while (!compare_exchange_strong(old, old & t, order))
				ak::relax_cpu();
			return old;
		}
		T fetch_xor(const T& t, memory_order order = memory_order_seq_cst) throw()
		{
			T old = val;
			while (!compare_exchange_strong(old, old ^ t, order))
				ak::relax_cpu();
			return old;
		}

		//
		// ----- inc -------
		//
		T operator++ (int) { return _inc(1) - atomic_ptr_step<T>::value; }
		T operator++ () { return _inc(1); }
		T operator-- (int) { return _inc(-1) + atomic_ptr_step<T>::value; }
		T operator-- () { return _inc(-1); }

		//
		// ----- ptrs -------
		//
		typename atomic_ptr_traits<T>::ptr_t operator-> () 
		{
			return load();
		}
		typename atomic_ptr_traits<T>::ref_t operator* ()
		{
			return *load();
		}

		//
		// ----- type casts -----
		//
		template<typename BASE> operator atomic<BASE>&()
		{
			BASE t_should_be_convertible_to_base = static_cast<T>(NULL);
			return reinterpret_cast<atomic<BASE>&>(*this);
		}
		template<typename DESCENDANT> atomic<DESCENDANT>& cast()
		{
			T* descendant_should_be_convertible_to_t = static_cast<DESCENDANT*>(NULL);
			return reinterpret_cast<atomic<DESCENDANT>&>(*this);
		}

	private:
		T _inc(int delta, memory_order order = memory_order_seq_cst)
		{
			if (delta * atomic_ptr_step<T>::value == 1)
			{
				if (order == memory_order_acquire)
					return unsubst(InterlockedIncrementAcquire(subst_this()));
				if (order == memory_order_release)
					return unsubst(InterlockedIncrementRelease(subst_this()));
				return unsubst(InterlockedIncrement(subst_this()));
			}
			else if (delta * atomic_ptr_step<T>::value == -1)
			{
				if (order == memory_order_acquire)
					return unsubst(InterlockedDecrementAcquire(subst_this()));
				if (order == memory_order_release)
					return unsubst(InterlockedDecrementRelease(subst_this()));
				return unsubst(InterlockedDecrement(subst_this()));
			}
			else
				return unsubst(InterlockedExchangeAdd(subst_this(), delta * atomic_ptr_step<T>::value));
		}
	};
}

#endif // _AK_ATOMIC_H_
