//==============================================================================
// Copyright (c) 2008-2013 Niall Ryan. All Rights Reserved.
//==============================================================================

#ifndef HYDRA_ATOMIC_INTERNAL_ATOMIC_INTERNAL_H
#define HYDRA_ATOMIC_INTERNAL_ATOMIC_INTERNAL_H 1

#include <assert.h>
#include <intrin.h>

#pragma intrinsic(_ReadWriteBarrier)

namespace Hydra
{
	namespace Internal
	{
		inline char compareAndSwap8(void* p, char value, char compare)
		{
			__asm
			{
				mov edx, p
				mov cl, value
				mov al, compare
				lock cmpxchg [edx], cl
			}
		}

		inline short compareAndSwap16(void* p, short value, short compare)
		{
			__asm
			{
				mov edx, p
				mov cx, value
				mov ax, compare
				lock cmpxchg [edx], cx
			}
		}

		inline int compareAndSwap32(void* p, int value, int compare)
		{
			__asm
			{
				mov edx, p
				mov ecx, value
				mov eax, compare
				lock cmpxchg [edx], ecx
			}
		}

		inline __int64 compareAndSwap64(void* p, __int64 value, __int64 compare)
		{
			int testLow = static_cast<int>(compare);
			int testHigh = static_cast<int>(compare>>32);
			int valueLow = static_cast<int>(value);
			int valueHigh = static_cast<int>(value>>32);

			__asm 
			{
				mov edx, testHigh
				mov eax, testLow
				mov ecx, valueHigh
				mov ebx, valueLow
				mov esi, p
				lock cmpxchg8b qword ptr [esi]
			}
		}

		inline char getAndSet8(void* p, char value)
		{
			__asm
			{
				mov edx, p
				mov al, value
				lock xchg [edx], al
			}
		}

		inline short getAndSet16(void* p, short value)
		{
			__asm
			{
				mov edx, p
				mov ax, value
				lock xchg [edx], ax
			}
		}

		inline int getAndSet32(void* p, int value)
		{
			__asm
			{
				mov edx, p
				mov eax, value
				lock xchg [edx], eax
			}
		}

		inline __int64 getAndSet64(void* p, __int64 value)
		{
			int valueLow = static_cast<int>(value);
			int valueHigh = static_cast<int>(value>>32);
			__asm
			{
				mov edi, p
				mov ecx, valueHigh
				mov ebx, valueLow
				mov eax, [edi]		//read current value non-atomically here... it's just a guess, if it's wrong we'll try again
				mov edx, [edi+4]
			tryAgain:
				lock cmpxchg8b qword ptr [edi]
				jnz tryAgain
			}
		}

		inline char getAndAdd8(void* p, char addValue)
		{
			__asm
			{
				mov edx, p
				mov al, addValue
				lock xadd [edx], al
			}
		}

		inline short getAndAdd16(void* p, short addValue)
		{
			__asm
			{
				mov edx, p
				mov ax, addValue
				lock xadd [edx], ax
			}
		}

		inline int getAndAdd32(void* p, int addValue)
		{
			__asm
			{
				mov edx, p
				mov eax, addValue
				lock xadd [edx], eax
			}
		}

		inline __int64 getAndAdd64(void* p, __int64 addValue)
		{
			int addValueLow = static_cast<int>(addValue);
			int addValueHigh = static_cast<int>(addValue>>32);
			__asm
			{
				mov edi, p
				mov eax, [edi]		//read current value non-atomically here... it's just a guess, if it's wrong we'll try again
				mov edx, [edi+4]
			tryAgain:
				mov ebx, addValueLow
				mov ecx, addValueHigh
				add ebx, eax
				adc ecx, edx
				lock cmpxchg8b qword ptr [edi]
				jnz tryAgain
			}
		}

		inline __int64 atomicRead64(void* p)
		{
			assert((reinterpret_cast<intptr_t>(p)&7)==0);
			__int64 result;
			__asm
			{
				mov eax, p				//inline assembler syntax limitation, can't write "fild qword ptr [*p]"
				fild qword ptr [eax]
				fistp qword ptr [result]
			}
			return result;
		}

		inline void atomicWrite64(void* p, __int64 value)
		{
			assert((reinterpret_cast<intptr_t>(p)&7)==0);
			__asm
			{
				mov eax, p				//inline assembler syntax limitation, can't write "fistp qword ptr [*p]"
				fild qword ptr [value]
				fistp qword ptr [eax]
			}
		}

		//
		//Internal storage types for different sized atomics, include padding as necessary.
		// Also force alignment, especially useful since VS2005 fails to align __int64s correctly without a declspec(align(8))
		//
		template<typename T, int Size>
		struct AtomicStorage
		{
		};

		template<typename T>
		struct AtomicStorage<T, 1>
		{
			typedef char AtomicType;
			static AtomicType toAtomicType(const T& value) { return *reinterpret_cast<const AtomicType*>(&value); }
			static const T fromAtomicType(const AtomicType& value)	{ return *reinterpret_cast<const T*>(&value); }
			AtomicType* getPtr() const { return reinterpret_cast<AtomicType*>(const_cast<T*>(&m_value)); }
			T m_value;
		};

		template<typename T>
		struct AtomicStorage<T, 2>
		{
			typedef short AtomicType;
			static AtomicType toAtomicType(const T& value) { return *reinterpret_cast<const AtomicType*>(&value); }
			static const T fromAtomicType(const AtomicType& value)	{ return *reinterpret_cast<const T*>(&value); }
			AtomicType* getPtr() const { return reinterpret_cast<AtomicType*>(const_cast<T*>(&m_value)); }
			__declspec(align(2)) T m_value;
		};

		template<typename T>
		struct AtomicStorage<T, 3>
		{
			typedef int AtomicType;
			static AtomicType toAtomicType(const T& value) { return (*reinterpret_cast<const AtomicType*>(&value)) & 0x00ffffff; }
			static const T fromAtomicType(const AtomicType& value)	{ return *reinterpret_cast<const T*>(&value); }
			AtomicType* getPtr() const { return reinterpret_cast<AtomicType*>(const_cast<T*>(&m_value)); }
			__declspec(align(4)) T m_value;
			char m_pad;
		};

		template<typename T>
		struct AtomicStorage<T, 4>
		{
			typedef int AtomicType;
			static AtomicType toAtomicType(const T& value) { return *reinterpret_cast<const AtomicType*>(&value); }
			static const T fromAtomicType(const AtomicType& value)	{ return *reinterpret_cast<const T*>(&value); }
			AtomicType* getPtr() const { return reinterpret_cast<AtomicType*>(const_cast<T*>(&m_value)); }
			__declspec(align(4)) T m_value;
		};

		template<typename T>
		struct AtomicStorage<T, 5>
		{
			typedef __int64 AtomicType;
			static AtomicType toAtomicType(const T& value) { return (*reinterpret_cast<const AtomicType*>(&value)) & 0x000000ffffffffff; }
			static const T fromAtomicType(const AtomicType& value)	{ return *reinterpret_cast<const T*>(&value); }
			AtomicType* getPtr() const { return reinterpret_cast<AtomicType*>(const_cast<T*>(&m_value)); }
			__declspec(align(8)) T m_value;
			char m_pad[3];
		};

		template<typename T>
		struct AtomicStorage<T, 6>
		{
			typedef __int64 AtomicType;
			static AtomicType toAtomicType(const T& value) { return (*reinterpret_cast<const AtomicType*>(&value)) & 0x0000ffffffffffff; }
			static const T fromAtomicType(const AtomicType& value)	{ return *reinterpret_cast<const T*>(&value); }
			AtomicType* getPtr() const { return reinterpret_cast<AtomicType*>(const_cast<T*>(&m_value)); }
			__declspec(align(8)) T m_value;
			short m_pad;
		};

		template<typename T>
		struct AtomicStorage<T, 7>
		{
			typedef __int64 AtomicType;
			static AtomicType toAtomicType(const T& value) { return (*reinterpret_cast<const AtomicType*>(&value)) & 0x00ffffffffffffff; }
			static const T fromAtomicType(const AtomicType& value)	{ return *reinterpret_cast<const T*>(&value); }
			AtomicType* getPtr() const { return reinterpret_cast<AtomicType*>(const_cast<T*>(&m_value)); }
			__declspec(align(8)) T m_value;
			char m_pad;
		};

		template<typename T>
		struct AtomicStorage<T, 8>
		{
			typedef __int64 AtomicType;
			static AtomicType toAtomicType(const T& value) { return *reinterpret_cast<const AtomicType*>(&value); }
			static const T fromAtomicType(const AtomicType& value)	{ return *reinterpret_cast<const T*>(&value); }
			AtomicType* getPtr() const { return reinterpret_cast<AtomicType*>(const_cast<T*>(&m_value)); }
			__declspec(align(8)) T m_value;
		};

		//
		//Helper templates for performing loads and stores using different memory semantics.
		//
		//The dest parameter is volatile to ensure that the compiler cannot optimize away the load/store.
		//Better to have the volatile on a parameter, instead of having the actual variable be volatile, because when on
		// a parameter the volatile behaviour only exists during this function.
		//Using a _ReadWriteBarrier to enforce the acquire/release semantics in the compiler. Perhaps is not necessary
		// on VS 2005 since volatile should have acquire and release semantics automatically.
		//x86 re-ordering rules: stores ordered relative to stores, loads ordered relative to loads, loads can be re-ordered before stores.
		template<typename T, int Size, FenceType F>
		struct LoadStoreHelper
		{
		};

		template<typename T, int Size>
		struct LoadStoreHelper<T, Size, Acquire>
		{
			static inline T load(volatile T* dest)
			{
				T temp = *dest;			//x86 rules: loads can't move before this load, stores can't move before a load
				_ReadWriteBarrier();
				return temp;
			}

			static inline void store(volatile T* dest, T value)
			{
				*dest = value;			//x86 rules: stores can't move before this load, but loads can move before it, so we need...
				MemoryBarrier();		//prevents loads from moving before this store
				_ReadWriteBarrier();
			}
		};

		template<typename T, int Size>
		struct LoadStoreHelper<T, Size, Release>
		{
			static inline T load(volatile T* dest)
			{
				_ReadWriteBarrier();
				MemoryBarrier();		//prevent stores moving after this load, because...
				return *dest;			//x86 rules: loads can't move after this load, stores can move after a load
			}

			static inline void store(volatile T* dest, T value)
			{
				_ReadWriteBarrier();
				*dest = value;			//x86 rules: stores can't move after this store, loads can't move after a store
			}
		};

		template<typename T, int Size>
		struct LoadStoreHelper<T, Size, FullFence>
		{
			static inline T load(volatile T* dest)
			{
				_ReadWriteBarrier();
				MemoryBarrier();		//prevent stores moving after this load, because...
				T temp = *dest;			//x86 rules: loads can't move before or after this load, but stores can move after a load
				_ReadWriteBarrier();
				return temp;
			}

			static inline void store(volatile T* dest, T value)
			{
				_ReadWriteBarrier();
				*dest = value;			//x86 rules: stores can't move before after this store, but loads can move before a store, so we need...
				MemoryBarrier();		//prevents loads from moving before this store
				_ReadWriteBarrier();
			}
		};

		template<typename T, int Size>
		struct LoadStoreHelper<T, Size, NoFence>
		{
			static inline T load(volatile T* dest)
			{
				return *dest;
			}

			static inline void store(volatile T* dest, T value)
			{
				*dest = value;
			}
		};

		//Specialization for 64 bit types, as loads and stores will not be atomic automatically even for aligned types.
		//
		//No explicit compiler barrier or volatile parameter needed, the __asm block provides this behaviour automatically.
		//The regular memory barriers are still necessary however
		template<typename T>
		struct LoadStoreHelper<T,8,Acquire>
		{
			static inline T load(T* dest)
			{
				return atomicRead64(dest);
			}
			static inline void store(T* dest, T value)
			{
				atomicWrite64(dest, value);
				MemoryBarrier();				//prevent loads moving before this store
			}
		};

		template<typename T>
		struct LoadStoreHelper<T,8,Release>
		{
			static inline T load(T* dest)
			{
				MemoryBarrier();
				return atomicRead64(dest);
			}
			static inline void store(T* dest, T value)
			{
				atomicWrite64(dest, value);
			}
		};

		template<typename T>
		struct LoadStoreHelper<T,8,FullFence>
		{
			static inline T load(T* dest)
			{
				MemoryBarrier();
				return atomicRead64(dest);
			}
			static inline void store(T* dest, T value)
			{
				atomicWrite64(dest, value);
				MemoryBarrier();
			}
		};

		template<typename T>
		struct LoadStoreHelper<T,8,NoFence>
		{
			static inline T load(volatile T* dest)
			{
				return atomicRead64(dest);
			}
			static inline void store(volatile T* dest, T value)
			{
				atomicWrite64(dest, value);
			}
		};

		//
		//Helper template for atomic operations.
		//Type T is the primitive atomic type, type U is the original type specified by the user.
		//
		template<typename T, typename U, size_t Size, FenceType F>
		struct AtomicOpHelper
		{
		};

		template<typename T, typename U, FenceType F>
		struct AtomicOpHelper<T, U, 1, F>
		{
			static T compareAndSwap(T* p, T value, T compare) { return Internal::compareAndSwap8(p, value, compare); }
			static T getAndSet(T* p, T value)                 { return Internal::getAndSet8(p, value); }
			static T getAndAdd(T* p, T value)                 { return Internal::getAndAdd8(p, value); }
			static T getAndIncrement(T* p)                    { return Internal::getAndAdd8(p, 1); }
			static T getAndDecrement(T* p)                    { return Internal::getAndAdd8(p, -1); }
		};

		template<typename T, typename U, FenceType F>
		struct AtomicOpHelper<T, U, 2, F>
		{
			static T compareAndSwap(T* p, T value, T compare) { return Internal::compareAndSwap16(p, value, compare); }
			static T getAndSet(T* p, T value)                 { return Internal::getAndSet16(p, value); }
			static T getAndAdd(T* p, T value)                 { return Internal::getAndAdd16(p, value); }
			static T getAndIncrement(T* p)                    { return Internal::getAndAdd16(p, 1); }
			static T getAndDecrement(T* p)                    { return Internal::getAndAdd16(p, -1); }
		};

		template<typename T, typename U, FenceType F>
		struct AtomicOpHelper<T, U, 4, F>
		{
			static T compareAndSwap(T* p, T value, T compare) { return Internal::compareAndSwap32(p, value, compare); }
			static T getAndSet(T* p, T value)                 { return Internal::getAndSet32(p, value); }
			static T getAndAdd(T* p, T value)                 { return Internal::getAndAdd32(p, value); }
			static T getAndIncrement(T* p)                    { return Internal::getAndAdd32(p, 1); }
			static T getAndDecrement(T* p)                    { return Internal::getAndAdd32(p, -1); }
		};

		template<typename T, typename U, FenceType F>
		struct AtomicOpHelper<T, U, 8, F>
		{
			static T compareAndSwap(T* p, T value, T compare) { return Internal::compareAndSwap64(p, value, compare); }
			static T getAndSet(T* p, T value)                 { return Internal::getAndSet64(p, value); }
			static T getAndAdd(T* p, T value)                 { return Internal::getAndAdd64(p, value); }
			static T getAndIncrement(T* p)                    { return Internal::getAndAdd64(p, 1); }
			static T getAndDecrement(T* p)                    { return Internal::getAndAdd64(p, -1); }
		};

		//specialization for pointer types, uses the size of the pointed type for add/subtract operations
		template<typename T, typename U, FenceType F>
		struct AtomicOpHelper<T, U*, 4, F>
		{
			static T compareAndSwap(T* p, T value, T compare) { return Internal::compareAndSwap32(p, value, compare); }
			static T getAndSet(T* p, T value)                 { return Internal::getAndSet32(p, value); }
			static T getAndAdd(T* p, T value)                 { return Internal::getAndAdd32(p, value*sizeof(U)); }
			static T getAndIncrement(T* p)                    { return Internal::getAndAdd32(p, sizeof(U)); }
			static T getAndDecrement(T* p)                    { return Internal::getAndAdd32(p, 0-sizeof(U)); }
		};
	}
}

#endif
