/**
 * 该模块定义了一些原子操作。
 * License: BSD
 * Authors: Lucifer (786325481@QQ.com)
 * Copyright: Copyright (C) 2008 Lucifer. All rights reserved.
 */

/**
 * The atomic module is intended to provide some basic support for lock-free
 * concurrent programming.  Some common operations are defined, each of which
 * may be performed using the specified memory barrier or a less granular
 * barrier if the hardware does not support the version requested.  This
 * model is based on a design by Alexander Terekhov as outlined in
 * <a href=http://groups.google.com/groups?threadm=3E4820EE.6F408B25%40web.de>
 * this thread</a>.  Another useful reference for memory ordering on modern
 * architectures is <a href=http://www.linuxjournal.com/article/8211>this
 * article by Paul McKenney</a>.
 *
 * Copyright: Copyright (C) 2005-2006 Sean Kelly.  All rights reserved.
 * License:   BSD style: $(LICENSE)
 * Authors:   Sean Kelly
 */
module system.threading.Atomic;

import system.Traits;

/*
version(Windows)
{
    private extern(Windows) int InterlockedIncrement(ref int);
    private extern(Windows) int InterlockedDecrement(ref int);
    private extern(Windows) int InterlockedExchange(ref int, int);
    private extern(Windows) int InterlockedExchangeAdd(ref int, int);
    private extern(C) void* InterlockedExchangePointer(ref void*, void*);
	private extern(Windows) int InterlockedCompareExchange(ref int, int, int);
    private extern(C) void* InterlockedCompareExchangePointer(ref void*, void*, void*);
}
*/

/**
 * Memory synchronization flag.  If the supplied option is not available on the
 * current platform then a stronger method will be used instead.
 */
public enum MemorySemantics
{
    Raw,                        /// not sequenced
    HoistLoadBarrier,     		/// hoist-load barrier
    HoistStoreBarrier,    		/// hoist-store barrier
    SinkLoadBarrier,      		/// sink-load barrier
    SinkStoreBarrier,     		/// sink-store barrier
    Acquire,                  	/// hoist-load + hoist-store barrier
    Release,                  	/// sink-load + sink-store barrier
    FullFence                	/// fully sequenced (acquire + release)
}

//Internal Type Checking

private template IsValidAtomicType(T)
{
    const bool IsValidAtomicType = T.sizeof == byte.sizeof ||
                                   T.sizeof == short.sizeof ||
                                   T.sizeof == int.sizeof ||
                                   T.sizeof == long.sizeof;
}

private template IsValidNumericType(T)
{
    const bool IsValidNumericType = IsIntegerType!(T) ||
                                    IsPointerType!(T) ;
}

private template IsHoistOperation(MemorySemantics ms)
{
    const bool IsHoistOperation = ms == MemorySemantics.HoistLoadBarrier ||
                                  ms == MemorySemantics.HoistStoreBarrier ||
                                  ms == MemorySemantics.Acquire ||
                                  ms == MemorySemantics.FullFence;
}

private template IsSinkOperation(MemorySemantics ms)
{
    const bool IsSinkOperation = ms == MemorySemantics.SinkLoadBarrier ||
                                 ms == MemorySemantics.SinkStoreBarrier ||
                                 ms == MemorySemantics.Release ||
                                 ms == MemorySemantics.FullFence;
}

// x86 Value Requirements

// NOTE: Strictly speaking, the x86 supports atomic operations on
//       unaligned values.  However, this is far slower than the
//       common case, so such behavior should be prohibited.
private template AtomicValueIsProperlyAligned(T)
{
    bool AtomicValueIsProperlyAligned(size_t address)
    {
        return address % T.sizeof == 0;
    }
}

//x86 Synchronization Requirements

// NOTE: While x86 loads have acquire semantics for stores, it appears
//       that independent loads may be reordered by some processors
//       (notably the AMD64).  This implies that the hoist-load barrier
//       op requires an ordering instruction, which also extends this
//       requirement to acquire ops (though hoist-store should not need
//       one if support is added for this later).  However, since no
//       modern architectures will reorder dependent loads to occur
//       before the load they depend on (except the Alpha), raw loads
//       are actually a possible means of ordering specific sequences
//       of loads in some instances.  The original atomic<>
//       implementation provides a 'ddhlb' ordering specifier for
//       data-dependent loads to handle this situation, but as there
//       are no plans to support the Alpha there is no reason to add
//       that option here.
//
//       For reference, the old behavior (acquire semantics for loads)
//       required a memory barrier if: ms == MemorySemantics.FullFence || IsSinkOperation!(ms)
private template NeedLoadBarrier(MemorySemantics ms)
{
    const bool NeedLoadBarrier = ms != MemorySemantics.Raw;
}

// NOTE: x86 stores implicitly have release semantics so a membar is only
//       necessary on acquires.
private template NeedStoreBarrier(MemorySemantics ms)
{
    const bool NeedStoreBarrier = ms == MemorySemantics.FullFence || IsHoistOperation!(ms) ;
}

version(D_InlineAsm_X86)
{
    version(X86)
    {
        pragma( msg, "system.threading.Atomic: using IA-32 inline asm" );

        version = Has32BitOps;
        version = Has64BitCAS;
    }
    version(X86_64)
    {
        pragma( msg, "system.threading.Atomic: using AMD64 inline asm" );

        version = Has64BitOps;
    }

    /**
	 * Supported MemorySemantics values:
	 * MemorySemantics.Raw
	 * MemorySemantics.HoistLoadBarrier
	 * MemorySemantics.Acquire
	 * MemorySemantics.FullFence
	 */
	template atomicLoad( T, MemorySemantics ms = MemorySemantics.FullFence )
	{
		static assert( ms == MemorySemantics.Raw ||
					ms == MemorySemantics.HoistLoadBarrier ||
					ms == MemorySemantics.Acquire ||
					ms == MemorySemantics.FullFence,
					"ms must be one of: MemorySemantics.Raw, MemorySemantics.HoistLoadBarrier, MemorySemantics.Acquire, MemorySemantics.FullFence.");

		/**
		 * Refreshes the contents of 'value' from main memory.  This operation is
		 * both lock-free and atomic.
		 *
		 * Params:
		 *  value = The value to load.  This value must be properly aligned.
		 *
		 * Returns:
		 *  The loaded value.
		 */

		T atomicLoad(ref T value)
		in
		{
			assert( AtomicValueIsProperlyAligned!(T)(cast(size_t)(&value)) );
		}
		body
		{
			static if( T.sizeof == byte.sizeof )
			{
				//1 Byte Load
				static if( NeedLoadBarrier!(ms) )
				{
					asm
					{
						mov DL, 42;
						mov AL, 42;
						mov ECX, value;
						lock;
						cmpxchg [ECX], DL;
					}
				}
				else
				{
					synchronized
					{
						return value;
					}
				}
			}//static if( T.sizeof == byte.sizeof )
			else static if( T.sizeof == short.sizeof )
			{
				//2 Byte Load
				static if( NeedLoadBarrier!(ms) )
				{
					asm
					{
						mov DX, 42;
						mov AX, 42;
						mov ECX, value;
						lock;
						cmpxchg [ECX], DX;
					}
				}
				else
				{
					synchronized
					{
						return value;
					}
				}
			}//else static if( T.sizeof == short.sizeof )
			else static if( T.sizeof == int.sizeof )
			{
				//4 Byte Load
				static if( NeedLoadBarrier!(ms) )
				{
					asm
					{
						mov EDX, 42;
						mov EAX, 42;
						mov ECX, value;
						lock;
						cmpxchg [ECX], EDX;
					}
				}
				else
				{
					synchronized
					{
						return value;
					}
				}
			}//else static if( T.sizeof == int.sizeof )
			else static if( T.sizeof == long.sizeof )
			{
				//8 Byte Load
				version(Has64BitOps)
				{
					//8 Byte Load on 64-Bit Processor
					static if( NeedLoadBarrier!(ms) )
					{
						asm
						{
							mov RAX, value;
							lock;
							mov RAX, [RAX];
						}
					}
					else
					{
						synchronized
						{
							return value;
						}
					}
				}//version(Has64BitOps)
				else
				{
					//8 Byte Load on 32-Bit Processor
					static assert(false, "This operation is only available on 64-bit platforms.");
				}
			}//else static if( T.sizeof == long.sizeof )
			else
			{
				//Not a 1, 2, 4, or 8 Byte Type
				static assert(false, "Invalid template type specified.");
			}
		}
	}

	/**
	 * Supported MemorySemantics values:
	 *  MemorySemantics.Raw
	 *  MemorySemantics.SinkStoreBarrier
	 *  MemorySemantics.Acquire
	 *  MemorySemantics.Release
	 *  MemorySemantics.FullFence
	 */
	template atomicStore(T, MemorySemantics ms = MemorySemantics.FullFence )
	{
		static assert( ms == MemorySemantics.Raw ||
					ms == MemorySemantics.SinkStoreBarrier ||
					ms == MemorySemantics.Acquire ||
					ms == MemorySemantics.Release ||
					ms == MemorySemantics.FullFence,
					"ms must be one of: MemorySemantics.Raw, MemorySemantics.SinkStoreBarrier, MemorySemantics.Acquire, MemorySemantics.Acquire, MemorySemantics.Release, MemorySemantics.FullFence." );
		/**
		 * Stores 'newval' to the memory referenced by 'value'.  This operation
		 * is both lock-free and atomic.
		 *
		 * Params:
		 *  value     = The destination variable.
		 *  newValue  = The value to store.
		 */
		void atomicStore(ref T value, T newValue)
		in
		{
			assert( AtomicValueIsProperlyAligned!(T)(cast(size_t)(&value)) );
		}
		body
		{
			static if( T.sizeof == byte.sizeof )
			{
				//1 Byte Store
				static if( NeedStoreBarrier!(ms) )
				{
					asm
					{
						mov EAX, value;
						mov DL, newValue;
						lock;
						xchg [EAX], DL;
					}
				}
				else
				{
					asm
					{
						mov EAX, val;
						mov DL, newval;
						mov [EAX], DL;
					}
				}
			}
			else static if( T.sizeof == short.sizeof )
			{
				//2 Byte Store
				static if( NeedStoreBarrier!(ms) )
				{
					asm
					{
						mov EAX, value;
						mov DX, newValue;
						lock;
						xchg [EAX], DX;
					}
				}
				else
				{
					mov EAX, value;
					mov DX, newValue;
					xchg [EAX], DX;
				}
			}
			else static if( T.sizeof == int.sizeof )
			{
				//4 Byte Store
				static if( NeedStoreBarrier!(ms) )
				{
					asm
					{
						mov EAX, value;
						mov EDX, newValue;
						lock;
						xchg [EAX], EDX;
					}
				}
				else
				{
					asm
					{
						mov EAX, value;
						mov EDX, newValue;
						xchg [EAX], EDX;
					}
				}
			}
			else static if( T.sizeof == long.sizeof )
			{
				//8 Byte Store
				version( Has64BitOps )
				{
					//8 Byte Store on 64-Bit Processor
					static if( NeedStoreBarrier!(ms) )
					{
						asm
						{
							mov RAX, value;
							mov RBX, newValue;
							lock;
							xchg [RAX], RBX;
						}
					}
					else
					{
						asm
						{
							mov RAX, value;
							mov RDX, newValue;
							xchg [RAX], RDX;
						}
					}
				}
				else
				{
					//8 Byte Store on 32-Bit Processor
					static assert(false, "This operation is only available on 64-bit platforms.");
				}
			}
			else
			{
				//Not a 1, 2, 4, or 8 Byte Type
				static assert(false, "Invalid template type specified.");
			}
		}
	}

	/**
	 * Supported MemorySemantics values:
	 *  MemorySemantics.Raw
	 *  MemorySemantics.SinkStoreBarrier
	 *  MemorySemantics.Acquire
	 *  MemorySemantics.Release
	 *  MemorySemantics.FullFence
	 */
	template atomicCAS(T, MemorySemantics ms = MemorySemantics.FullFence)
	{
		static assert( ms == MemorySemantics.Raw ||
					ms == MemorySemantics.SinkStoreBarrier ||
					ms == MemorySemantics.Acquire ||
					ms == MemorySemantics.Release ||
					ms == MemorySemantics.FullFence,
					"ms must be one of: MemorySemantics.Raw, MemorySemantics.SinkStoreBarrier, MemorySemantics.Acquire, MemorySemantics.Acquire, MemorySemantics.Release, MemorySemantics.FullFence." );
		/**
		 * Stores 'newValue' to the memory referenced by 'value' if val is equal to
		 * 'equalTo'.  This operation is both lock-free and atomic.
		 *
		 * Params:
		 *  value     = The destination variable.
		 *  newValue  = The value to store.
		 *  equalTo = The comparison value.
		 *
		 * Returns:
		 *  true if the store occurred, false if not.
		 */
		bool atomicCAS(ref T value, T newValue, T equalTo)
		in
		{
			// NOTE: 32 bit x86 systems support 8 byte CAS, which only requires
			//       4 byte alignment, so use size_t as the align type here.
			static if( T.sizeof > size_t.sizeof )
				assert( AtomicValueIsProperlyAligned!(size_t)( cast(size_t) (&value) ) );
			else
				assert( AtomicValueIsProperlyAligned!(T)( cast(size_t) (&value) ) );
		}
		body
		{
			static if( T.sizeof == byte.sizeof )
			{
				//1 Byte CAS
				asm
				{
					mov DL, newValue;
					mov AL, equalTo;
					mov ECX, value;
					lock;
					cmpxchg [ECX], DL;
					setz AL;
				}
			}
			else static if( T.sizeof == short.sizeof )
			{
				//2 Byte CAS
				asm
				{
					mov DX, newValue;
					mov AX, equalTo;
					mov ECX, value;
					lock;
					cmpxchg [ECX], DX;
					setz AL;
				}
			}
			else static if( T.sizeof == int.sizeof )
			{
				//4 Byte CAS
				asm
				{
					mov EDX, newValue;
					mov EAX, equalTo;
					mov ECX, value;
					lock;
					cmpxchg [ECX], EDX;
					setz AL;
				}
			}
			else static if( T.sizeof == long.sizeof )
			{
				//8 Byte CAS
				version(Has64BitOps)
				{
					//8 Byte CAS on 64-bit Processor
					asm
					{
						mov RDX, newValue;
						mov RAX, equalTo;
						mov RCX, value;
						lock;
						cmpxchg [RCX], RDX;
						setz AL;
					}
				}
				else version(Has64BitCAS)
				{
					//8 Byte CAS on 32-Bit Processor
					asm
					{
						push EDI;
						push EBX;
						lea EDI, newValue;
						mov EBX, [EDI];
						mov ECX, 4[EDI];
						lea EDI, equalTo;
						mov EAX, [EDI];
						mov EDX, 4[EDI];
						mov EDI, value;
						lock;
						cmpxch8b [EDI];
						setz AL;
						pop EBX;
						pop EDI;
					}
				}
			}
			else
			{
				//Not a 1, 2, 4, or 8 Byte Type
				static assert(false, "Invalid template type specified.");
			}
		}
	}

	/**
	 * Supported MemorySemantics values:
	 *  MemorySemantics.Raw
	 *  MemorySemantics.SinkStoreBarrier
	 *  MemorySemantics.Acquire
	 *  MemorySemantics.Release
	 *  MemorySemantics.FullFence
	 */
	template atomicIncrement(T, MemorySemantics ms = MemorySemantics.FullFence)
	{
		// NOTE: This operation is only valid for integer or pointer types
		static assert( IsValidNumericType!(T) );

		static assert( ms == MemorySemantics.Raw ||
					ms == MemorySemantics.SinkStoreBarrier ||
					ms == MemorySemantics.Acquire ||
					ms == MemorySemantics.Release ||
					ms == MemorySemantics.FullFence,
					"ms must be one of: MemorySemantics.Raw, MemorySemantics.SinkStoreBarrier, MemorySemantics.Acquire, MemorySemantics.Acquire, MemorySemantics.Release, MemorySemantics.FullFence." );

		/**
		 * This operation is only legal for built-in value and pointer types,
		 * and is equivalent to an atomic "value = value + 1" operation.  This
		 * function exists to facilitate use of the optimized increment
		 * instructions provided by some architecures.  If no such instruction
		 * exists on the target platform then the behavior will perform the
		 * operation using more traditional means.  This operation is both
		 * lock-free and atomic.
		 *
		 * Params:
		 *  value = The value to increment.
		 *
		 * Returns:
		 *  The result of an atomicLoad of val immediately following the
		 *  increment operation.  This value is not required to be equal to the
		 *  newly stored value.  Thus, competing writes are allowed to occur
		 *  between the increment and successive load operation.
		 */
		T atomicIncrement(ref T value)
		in
		{
			assert( AtomicTypeIsProperlyAligned!(T)(cast(size_t)(&value)) );
		}
		body
		{
			static if( T.sizeof == byte.sizeof )
			{
				//1 Byte Increment
				asm
				{
					mov EAX, value;
					lock;
					inc [EAX];
					mov AL, [EAX];
				}
			}
			else static if( T.sizeof == short.sizeof )
			{
				//2 Byte Increment
				asm
				{
					mov EAX, value;
					lock;
					inc short ptr [EAX];
					mov AX, [EAX];
				}
			}
			else static if( T.sizeof == int.sizeof )
			{
				//4 Byte Increment
				asm
				{
					mov EAX, value;
					lock;
					inc int ptr [EAX];
					mov EAX, [EAX];
				}
			}
			else static if( T.sizeof == long.sizeof )
			{
				//8 Byte Increment
				version(Has64BitOps)
				{
					//8 Byte Increment on 64-Bit Processor
					asm
					{
						mov RAX, value;
						lock;
						inc qword ptr [RAX];
						mov RAX, [RAX];
					}
				}
				else
				{
					//8 Byte Increment on 32-Bit Processor
					static assert(false, "This operation is only available on 64-bit platforms.");
				}
			}
			else
			{
				//Not a 1, 2, 4, or 8 Byte Type
				static assert(false, "Invalid template type specified.");
			}
		}
	}

	/**
	 * Supported MemorySemantics values:
	 *  MemorySemantics.Raw
	 *  MemorySemantics.SinkStoreBarrier
	 *  MemorySemantics.Acquire
	 *  MemorySemantics.Release
	 *  MemorySemantics.FullFence
	 */
	template atomicDecrement(T, MemorySemantics ms = MemorySemantics.FullFence)
	{
		//NOTE: This operation is only valid for integer or pointer types
		static assert( IsValidNumericType!(T) );

		static assert( ms == MemorySemantics.Raw ||
					ms == MemorySemantics.SinkStoreBarrier ||
					ms == MemorySemantics.Acquire ||
					ms == MemorySemantics.Release ||
					ms == MemorySemantics.FullFence,
					"ms must be one of: MemorySemantics.Raw, MemorySemantics.SinkStoreBarrier, MemorySemantics.Acquire, MemorySemantics.Acquire, MemorySemantics.Release, MemorySemantics.FullFence." );

		/**
		 * This operation is only legal for built-in value and pointer types,
		 * and is equivalent to an atomic "value = value - 1" operation.  This
		 * function exists to facilitate use of the optimized decrement
		 * instructions provided by some architecures.  If no such instruction
		 * exists on the target platform then the behavior will perform the
		 * operation using more traditional means.  This operation is both
		 * lock-free and atomic.
		 *
		 * Params:
		 *  value = The value to decrement.
		 *
		 * Returns:
		 *  The result of an atomicLoad of val immediately following the
		 *  increment operation.  This value is not required to be equal to the
		 *  newly stored value.  Thus, competing writes are allowed to occur
		 *  between the increment and successive load operation.
		 */
		T atomicDecrement(ref T value)
		in
		{
			assert( AtomicValueIsProperlyAligned!(T)(cast(size_t)(&value)) );
		}
		body
		{
			static if( T.sizeof == byte.sizeof )
			{
				//1 Byte Decrement
				asm
				{
					mov EAX, value;
					lock;
					dec [EAX];
					mov AL, [EAX];
				}
			}
			else static if( T.sizeof == short.sizeof )
			{
				//2 Byte Decrement
				asm
				{
					mov EAX, value;
					lock;
					dec short ptr [EAX];
					mov AX, [EAX];
				}
			}
			else static if( T.sizeof == int.sizeof )
			{
				//4 Byte Decrement
				asm
				{
					mov EAX, value;
					lock;
					dec int ptr [EAX];
					mov EAX, [EAX];
				}
			}
			else static if( T.sizeof == long.sizeof )
			{
				//8 Byte Decrement
				version(Has64BitOps)
				{
					// 8 Byte Decrement on 64-Bit Processor
					asm
					{
						mov RAX, value;
						lock;
						dec qword ptr [RAX];
						mov RAX, [RAX];
					}
				}
				else
				{
					//8 Byte Decrement on 32-Bit Processor
					static assert(false, "This operation is only available on 64-bit platforms.");
				}
			}
			else
			{
				static assert(false, "Invalid template type specified.");
			}
		}
	}

	/**
	 * Supported MemorySemantics values:
	 *  MemorySemantics.Raw
	 *  MemorySemantics.SinkStoreBarrier
	 *  MemorySemantics.Acquire
	 *  MemorySemantics.Release
	 *  MemorySemantics.FullFence
	 */
	template atomicAdd(T, MemorySemantics ms = MemorySemantics.FullFence)
	{
		static assert( IsValidNumericType!(T) );

		static assert( ms == MemorySemantics.Raw ||
					ms == MemorySemantics.SinkStoreBarrier ||
					ms == MemorySemantics.Acquire ||
					ms == MemorySemantics.Release ||
					ms == MemorySemantics.FullFence,
					"ms must be one of: MemorySemantics.Raw, MemorySemantics.SinkStoreBarrier, MemorySemantics.Acquire, MemorySemantics.Acquire, MemorySemantics.Release, MemorySemantics.FullFence." );

		/**
		 * Adds two integers and replaces the first integer with the sum, as an atomic operation.
		 */
		void atomicAdd(ref T value, T newValue)
		in
		{
			assert( AtomicValueIsProperlyAligned!(T)(cast(size_t)(&value)) );
		}
		body
		{
			static if( T.sizeof == byte.sizeof )
			{
				//1 Byte Add
				asm
				{
					mov EAX, value;
					mov DL, newValue;
					lock;
					xadd [EAX], DL;
				}
			}
			else static if( T.sizeof == short.sizeof )
			{
				//2 Byte Add
				asm
				{
					mov EAX, value;
					mov DX, newValue;
					lock;
					xadd [EAX], DX;
				}
			}
			else static if( T.sizeof == int.sizeof )
			{
				//4 Byte Add
				asm
				{
					mov EAX, value;
					mov EDX, newValue;
					lock;
					xadd [EAX], EDX;
				}
			}
			else static if( T.sizeof == long.sizeof )
			{
				//8 Byte Add
				version(Has64BitOps)
				{
					asm
					{
						mov RAX, value;
						mov RDX, newValue;
						lock;
						xadd [RAX], RDX;
					}
				}
				else
				{
					//8 Byte Store on 32-Bit Processor
					static assert(false, "This operation is only available on 64-bit platforms.");
				}
			}
			else
			{
				//Not a 1, 2, 4, or 8 Byte Type
				static assert(false, "Invalid template type specified.");
			}
		}
	}
}
else version(LDC)
{
	/*******************************
	 * LDC Atomics Implementation
	 *******************************/
    import ldc.intrinsics;

	/*******************************
	 * Atomic Load
	 *******************************/

	public enum MemorySemantics
{
    Raw,                        /// not sequenced
    HoistLoadBarrier,     		/// hoist-load barrier
    HoistStoreBarrier,    		/// hoist-store barrier
    SinkLoadBarrier,      		/// sink-load barrier
    SinkStoreBarrier,     		/// sink-store barrier
    Acquire,                  	/// hoist-load + hoist-store barrier
    Release,                  	/// sink-load + sink-store barrier
    FullFence                	/// fully sequenced (acquire + release)
}

    template atomicLoad(T, MemorySemantics ms = MemorySemantics.FullFence)
    {
        T atomicLoad(ref T val)
        {
            llvm_memory_barrier(
								ms == MemorySemantics.HoistLoadBarrier || ms == MemorySemantics.Acquire || ms == MemorySemantics.FullFence,
								ms == MemorySemantics.HoistStoreBarrier || ms == MemorySemantics.Acquir || ms == MemorySemantics.FullFence,
								ms == MemorySemantics.SinkLoadBarrier || ms == MemorySemantics.Release || ms == MemorySemantics.FullFence,
								ms == MemorySemantics.SinkStoreBarrier || ms == MemorySemantics.Release || ms == MemorySemantics.FullFence,
								false);
            static if (IsPointerType!(T))
            {
                return cast(T)llvm_atomic_load_add!(size_t)(cast(size_t*)&val, 0);
            }
            else static if (is(T == bool))
            {
                return llvm_atomic_load_add!(ubyte)(cast(ubyte*)&val, cast(ubyte)0) ? 1 : 0;
            }
            else
            {
                return llvm_atomic_load_add!(T)(&val, cast(T)0);
            }
        }
    }


    /***************
     * Atomic Store
	 ***************/


    template atomicStore(T, MemorySemantics ms = MemorySemantics.FullFence)
    {
        void atomicStore( ref T val, T newval )
        {
            llvm_memory_barrier(
								ms == MemorySemantics.HoistLoadBarrier || ms == MemorySemantics.Acquire || ms == MemorySemantics.FullFence,
								ms == MemorySemantics.HoistStoreBarrier || ms == MemorySemantics.Acquir || ms == MemorySemantics.FullFence,
								ms == MemorySemantics.SinkLoadBarrier || ms == MemorySemantics.Release || ms == MemorySemantics.FullFence,
								ms == MemorySemantics.SinkStoreBarrier || ms == MemorySemantics.Release || ms == MemorySemantics.FullFence,
								false);
            static if (IsPointerType!(T))
            {
                llvm_atomic_swap!(size_t)(cast(size_t*)&val, cast(size_t)newval);
            }
            else static if (is(T == bool))
            {
                llvm_atomic_swap!(ubyte)(cast(ubyte*)&val, newval?1:0);
            }
            else
            {
                llvm_atomic_swap!(T)(&val, newval);
            }
        }
    }


    /********************
     * Atomic CAS
     ********************/


    template atomicCAS(T, MemorySemantics ms = MemorySemantics.FullFence)
    {
        bool atomicStoreIf( ref T val, T newval, T equalTo )
        {
            llvm_memory_barrier(
								ms == MemorySemantics.HoistLoadBarrier || ms == MemorySemantics.Acquire || ms == MemorySemantics.FullFence,
								ms == MemorySemantics.HoistStoreBarrier || ms == MemorySemantics.Acquir || ms == MemorySemantics.FullFence,
								ms == MemorySemantics.SinkLoadBarrier || ms == MemorySemantics.Release || ms == MemorySemantics.FullFence,
								ms == MemorySemantics.SinkStoreBarrier || ms == MemorySemantics.Release || ms == MemorySemantics.FullFence,
								false);
            T oldval = void;
            static if (IsPointerType!(T))
            {
                oldval = cast(T)llvm_atomic_cmp_swap!(size_t)(cast(size_t*)&val, cast(size_t)equalTo, cast(size_t)newval);
            }
            else static if (is(T == bool))
            {
                oldval = llvm_atomic_cmp_swap!(ubyte)(cast(ubyte*)&val, equalTo?1:0, newval?1:0)?0:1;
            }
            else
            {
                oldval = llvm_atomic_cmp_swap!(T)(&val, equalTo, newval);
            }
            return oldval == equalTo;
        }
    }
    
    
    /*****************************
     * Atomic Increment
     *****************************/


    template atomicIncrement(T, MemorySemantics ms = MemorySemantics.FullFence)
    {
        //
        // NOTE: This operation is only valid for integer or pointer types
        //
        static assert( IsValidNumericType!(T) );


        T atomicIncrement( ref T val )
        {
            static if (IsPointerType!(T))
            {
                llvm_atomic_load_add!(size_t)(cast(size_t*)&val, 1);
            }
            else
            {
                llvm_atomic_load_add!(T)(&val, cast(T)1);
            }
            return val;
        }
    }
    
    
    /***************************
     * Atomic Decrement
     ***************************/


    template atomicDecrement( msync ms = msync.seq, T )
    {
        //
        // NOTE: This operation is only valid for integer or pointer types
        //
        static assert( IsValidNumericType!(T) );


        T atomicDecrement( ref T val )
        {
            static if (IsPointerType!(T))
            {
                llvm_atomic_load_sub!(size_t)(cast(size_t*)&val, 1);
            }
            else
            {
                llvm_atomic_load_sub!(T)(&val, cast(T)1);
            }
            return val;
        }
    }
}