//
// RS Game Framework
// Copyright © 2009 Jedd Haberstro
// jhaberstro@gmail.com
// 
// $Id:
//

// memory_order_acquire: guarantees that subsequent loads are not moved before the current load or any preceding loads.
// memory_order_release: preceding stores are not moved past the current store or any subsequent stores.
// memory_order_acq_rel: combines the two previous guarantees.
// memory_order_consume: potentially weaker form of memory_order_acquire that enforces ordering of the current load before other operations that are data-dependent on it (for instance, when a load of a pointer is marked memory_order_consume, subsequent operations that dereference this pointer won’t be moved before it (yes, even that is not guaranteed on all platforms!).
// memory_order_relaxed: all reorderings are okay.

// TODO/WARNING - ALL MEMORY ORDERING HAS BEEN MADE TO WORK CORRECTLY ONLY WITH X86/X64.
//				  OTHER ARCHITECTURES HAVE NOT BEEN CONSIDERED (yet).

#ifndef RS_ATOMIC_HPP
#define RS_ATOMIC_HPP

#if RS_ARCHITECTURE != RS_ARCHITECTURE_X86 && RS_ARCHITECTURE != RS_ARCHITECTURE_X64
#	warning "Atomic class has not been written to work correctly on this platform!"
#endif

#include "rs/Assert.hpp"
#include "rs/Portability.hpp"

namespace rs
{
	namespace MemoryOrder
	{
		enum Enum
		{
			Acquire,
			Release,
			Relaxed,
			SequentiallyConsistent
		};
	}
	
    namespace atomic
    {	
		void MemoryBarrier();
		
		void ReadBarrier();
		
		void WriteBarrier();
		
		void CompilerBarrier();
		
		// 8 bit
		Int8 AddFetch(Int8 volatile* address, Int8 value);
		
		UInt8 AddFetch(UInt8 volatile* address, UInt8 value);
		
		Boolean CompareSwap(Int8 volatile* address, Int8 oldValue, Int8 newValue);
		
		Boolean CompareSwap(UInt8 volatile* address, UInt8 oldValue, UInt8 newValue);
		
		Int8 Exchange(Int8 volatile* address, Int8 value);
		
		UInt8 Exchange(UInt8 volatile* address, UInt8 value);
		
		
		// 16 bit
		Int16 AddFetch(Int16 volatile* address, Int16 value);
        
		UInt16 AddFetch(UInt16 volatile* address, UInt16 value);
        
		Boolean CompareSwap(Int16 volatile* address, Int16 oldValue, Int16 newValue);
        
		Boolean CompareSwap(UInt16 volatile* address, UInt16 oldValue, UInt16 newValue);
        
		Int16 Exchange(Int16 volatile* address, Int16 value);
        
		UInt16 Exchange(UInt16 volatile* address, UInt16 value);
		
		// 32 bit
		Int32 AddFetch(Int32 volatile* address, Int32 value);
        
		UInt32 AddFetch(UInt32 volatile* address, UInt32 value);
        
		Boolean CompareSwap(Int32 volatile* address, Int32 oldValue, Int32 newValue);
        
		Boolean CompareSwap(UInt32 volatile* address, UInt32 oldValue, UInt32 newValue);
        
		Int32 Exchange(Int32 volatile* address, Int32 value);
        
		UInt32 Exchange(UInt32 volatile* address, UInt32 value);
		
		// 64 bit
		Int64 AddFetch(Int64 volatile* address, Int64 value);
        
		UInt64 AddFetch(UInt64 volatile* address, UInt64 value);
        
		Boolean CompareSwap(Int64 volatile* address, Int64 oldValue, Int64 newValue);
        
		Boolean CompareSwap(UInt64 volatile* address, UInt64 oldValue, UInt64 newValue);
        
		Int64 Exchange(Int64 volatile* address, Int64 value);
        
		UInt64 Exchange(UInt64 volatile* address, UInt64 value);
    };

	namespace internal
	{
		template< typename T, Boolean IsFundamentalOrPointer >
		class AtomicHelper;

		template< typename T >
		class AtomicHelper< T, true >
		{
		public:

			typedef T ValueType;

		public:
			
			AtomicHelper() { }
			
			explicit AtomicHelper(ValueType value);

			ValueType operator=(ValueType value);

			void Store(ValueType value, MemoryOrder::Enum memoryOrder = MemoryOrder::SequentiallyConsistent);

			ValueType Load(MemoryOrder::Enum memoryOrder = MemoryOrder::SequentiallyConsistent) const;

			ValueType Exchange(ValueType value, MemoryOrder::Enum memoryOrder = MemoryOrder::SequentiallyConsistent);

			Boolean CompareExchangeStrong(ValueType& oldValue, ValueType newValue, MemoryOrder::Enum memoryOrder = MemoryOrder::SequentiallyConsistent);

			Boolean CompareExchangeStrong(ValueType& oldValue, ValueType newValue, MemoryOrder::Enum success, MemoryOrder::Enum failure);

			Boolean CompareExchangeWeak(ValueType& oldValue, ValueType newValue, MemoryOrder::Enum memoryOrder = MemoryOrder::SequentiallyConsistent);

			Boolean CompareExchangeWeak(ValueType& oldValue, ValueType newValue, MemoryOrder::Enum success, MemoryOrder::Enum failure);

			operator T() const;
			
		protected:
			
			AtomicHelper(AtomicHelper const&);
			AtomicHelper& operator=(AtomicHelper const&);
			
		private:
			
            ValueType data_;
		};
	}
	
	template< typename T >
	class Atomic : public internal::AtomicHelper< T, meta::LogicalOr< meta::IsFundamentalType< T >::Value, meta::IsPointer< T >::Value >::Value >
	{
	public:
		
		typedef T ValueType;
		
	private:
		
		typedef internal::AtomicHelper< T, meta::LogicalOr< meta::IsFundamentalType< T >::Value, meta::IsPointer< T >::Value >::Value > BaseType;
		
	public:
		
		Atomic() : BaseType() { }
		
		explicit Atomic(ValueType value)
		: BaseType(value) {
		}
		
	protected:
		
		Atomic(Atomic const&);
		Atomic& operator=(Atomic const&);
	};
	
	
	namespace internal
	{
		template< typename T > struct BoolToInt { typedef T Type; };
		template< > struct BoolToInt< bool > { typedef int Type; };
		
		
		template< typename T >
		inline AtomicHelper< T, true >::AtomicHelper(ValueType value) {
			Store(value);
		}
		
		template< typename T >
		inline typename AtomicHelper< T, true >::ValueType AtomicHelper< T, true >::operator=(typename AtomicHelper< T, true >::ValueType value) {
			Store(value);
			return value;
		}

		template< typename T >
		inline void AtomicHelper< T, true >::Store(typename AtomicHelper< T, true >::ValueType value, MemoryOrder::Enum memoryOrder) {
			RS_ASSERT(memoryOrder != MemoryOrder::Acquire, "Unable to store atomic variable with MemoryOrder::Acquire");
			if (memoryOrder != MemoryOrder::SequentiallyConsistent) {
				atomic::CompilerBarrier();
				data_ = value;
			}
			else {
				Exchange(value, memoryOrder);
			}
		}
		

		template< typename T >
		inline typename AtomicHelper< T, true >::ValueType AtomicHelper< T, true >::Load(MemoryOrder::Enum memoryOrder) const {
			RS_ASSERT(memoryOrder != MemoryOrder::Release, "Unable to load atomic variable with MemoryOrder::Release");
			if (memoryOrder != MemoryOrder::SequentiallyConsistent) {
				T copy = data_;
				atomic::MemoryBarrier();
				return copy;
			}
			else {
				T copy = data_;
				atomic::CompilerBarrier();
				return copy;
			}
		}

		template< typename T >
		inline typename AtomicHelper< T, true >::ValueType AtomicHelper< T, true >::Exchange(typename AtomicHelper< T, true >::ValueType value, MemoryOrder::Enum memoryOrder) {
			typedef typename BoolToInt< T >::Type NewType;
			return atomic::Exchange(reinterpret_cast< NewType* >(&data_), static_cast< NewType >(value));
		}

		template< typename T >
		inline Boolean AtomicHelper< T, true >::CompareExchangeStrong(typename AtomicHelper< T, true >::ValueType& oldValue, typename AtomicHelper< T, true >::ValueType newValue, MemoryOrder::Enum memoryOrder) {
			RS_ASSERT(memoryOrder != MemoryOrder::Release, "Unable to compare-and-exchange atomic variable with MemoryOrder::Release");
			if (memoryOrder == MemoryOrder::Release) {
				return CompareExchangeStrong(oldValue, newValue, memoryOrder, MemoryOrder::Relaxed);
			}
			
			return CompareExchangeStrong(oldValue, newValue, memoryOrder, memoryOrder);
		}
		
		template< typename T >
		inline Boolean AtomicHelper< T, true >::CompareExchangeStrong(typename AtomicHelper< T, true >::ValueType& oldValue, typename AtomicHelper< T, true >::ValueType newValue, MemoryOrder::Enum success, MemoryOrder::Enum failure) {
			RS_ASSERT(success != MemoryOrder::Release, "Unable to compare-and-exchange atomic variable with MemoryOrder::Release");
			RS_ASSERT(failure != MemoryOrder::Release, "Unable to compare-and-exchange atomic variable with MemoryOrder::Release");
		}

		template< typename T >
		inline Boolean AtomicHelper< T, true >::CompareExchangeWeak(typename AtomicHelper< T, true >::ValueType& oldValue, typename AtomicHelper< T, true >::ValueType newValue, MemoryOrder::Enum memoryOrder) {
			RS_ASSERT(memoryOrder != MemoryOrder::Release, "Unable to compare-and-exchange atomic variable with MemoryOrder::Release");
			if (memoryOrder == MemoryOrder::Release) {
				return CompareExchangeWeak(oldValue, newValue, memoryOrder, MemoryOrder::Relaxed);
			}
			
			return CompareExchangeWeak(oldValue, newValue, memoryOrder, memoryOrder);
		}
		
		template< typename T >
		inline Boolean AtomicHelper< T, true >::CompareExchangeWeak(typename AtomicHelper< T, true >::ValueType& oldValue, typename AtomicHelper< T, true >::ValueType newValue, MemoryOrder::Enum success, MemoryOrder::Enum failure) {
			RS_ASSERT(success != MemoryOrder::Release, "Unable to compare-and-exchange atomic variable with MemoryOrder::Release");
			RS_ASSERT(failure != MemoryOrder::Release, "Unable to compare-and-exchange atomic variable with MemoryOrder::Release");
		}

		template< typename T >
		inline AtomicHelper< T, true >::operator T() const {
			return Load();
		}
		
#		if RS_ARCHITECTURE == RS_ARCHITECTURE_X86
#		define DECLARE_X86_64BIT_SPECIALIZATIONS(T) \
			template< >																																						\
			inline AtomicHelper< T, true >::ValueType AtomicHelper< T, true >::Load(MemoryOrder::Enum memoryOrder) const {													\
				RS_ASSERT(memoryOrder != MemoryOrder::Release, "Unable to load atomic variable with MemoryOrder::Release");													\
				/* I'm not really sure if this is implemented correctly*/																									\
				Boolean beforeFence = (memoryOrder == MemoryOrder::SequentiallyConsistent || memoryOrder == MemoryOrder::Release);											\
				Boolean afterFence = (memoryOrder == MemoryOrder::SequentiallyConsistent || memoryOrder == MemoryOrder::Acquire);											\
				T result;																																					\
				if (beforeFence) atomic::CompilerBarrier();																													\
				atomic::Exchange(&result, data_);																															\
				if (afterFence) atomic::CompilerBarrier();																													\
				return result;																																				\
			}																																								\
																																											\
			template< >																																						\
			inline AtomicHelper< T, true >::ValueType AtomicHelper< T, true >::Exchange(AtomicHelper< T, true >::ValueType value, MemoryOrder::Enum memoryOrder) {			\
				/* Implemented like a CompareExchangeStrong */																												\
				/* Internally, Atomic::Exchange with 64-bit integers on x86 uses a CompareSwap */																			\
				Boolean beforeFence = (memoryOrder == MemoryOrder::SequentiallyConsistent || memoryOrder == MemoryOrder::Release);											\
				Boolean afterFence = (memoryOrder == MemoryOrder::SequentiallyConsistent || memoryOrder == MemoryOrder::Acquire);											\
				if (beforeFence) atomic::CompilerBarrier();																													\
				T result = atomic::Exchange(&data_, value);																													\
				if (afterFence) atomic::CompilerBarrier();																													\
				return result;																																				\
			}																																								\
																																											\
			template< >																																						\
			inline void AtomicHelper< T, true >::Store(AtomicHelper< T, true >::ValueType value, MemoryOrder::Enum memoryOrder) { 											\
				RS_ASSERT(memoryOrder != MemoryOrder::Acquire, "Unable to store atomic variable with MemoryOrder::Acquire");												\
				Exchange(value, memoryOrder);																																\
			}
			
			
		DECLARE_X86_64BIT_SPECIALIZATIONS(UInt64)                                                                                        
		DECLARE_X86_64BIT_SPECIALIZATIONS(Int64)                                                                                         
#		undef DECLARE_X86_64BIT_SPECIALIZATIONS                                                                                            
#		endif
	}
}

#define RS_ATOMIC_IMPLEMENTATION_GUARD
#if RS_ARCHITECTURE == RS_ARCHITECTURE_X86
#	include "rs/portability/architecture/x86/Atomic.ipp"
#elif RS_ARCHITECTURE == RS_ARCHITECTURE_X64
#	include "rs/portability/architecture/x64/Atomic.ipp"
#else
#	warning "Unsupported platform for Atomic operations. Defaulting to generic implementation."
#endif
#undef RS_ATOMIC_IMPLEMENTATION_GUARD

#endif //  RS_ATOMIC_HPP
