﻿/*
 * atomics.h
 */
#ifndef __GDK_ATOMICS_H__
#define __GDK_ATOMICS_H__


#include <gdk/core/Types.h>

#if defined(_WIN32)
#	if (_MSC_VER > 1200)
#		include <intrin.h>
#	endif
#elif defined(__ANDROID__)
#	include <sys/atomics.h>
#elif defined(__IOS__)
#	include <libkern/OSAtomic.h>
#endif





namespace gdk
{



//////////////////////////////////////////////////////////////////
__INLINE__ int32_t atomic_increment(volatile int32_t *target)
{
	int32_t ret;

#if defined(_MSC_VER)

#	if _MSC_VER > 1200
		ret = ::_InterlockedIncrement((LONG*)target);
	#else
		ret = ::InterlockedIncrement((LONG*)target);
#	endif

#elif defined(__ANDROID__)

	//__atomic_inc/__atomic_dec 的返回值是修改之前的旧值。
	ret = __atomic_inc(target) + 1;
	/*
#	if __ANDROID__ >= 6

		__asm__ __volatile__
		(
			"1:	ldrex	lr, [%1]\n"
			"	adds	lr, lr, %2\n"
			"	strex	ip, lr, [%1]\n"
			"	teq	ip, #0\n"
			"	bne	1b\n"
			: "=&r" (ret)
			: "r" (ptr), "I" (1)
			: "ip", "lr", "cc"
		);

#	else

		__asm__ __volatile__
		(
			"	mrs	ip, cpsr\n"
			"	orr	lr, ip, #128\n"
			"	msr	cpsr_c, lr\n"
			"	ldr	lr, [%1]\n"
			"	adds	lr, lr, %2\n"
			"	str	lr, [%1]\n"
			"	msr	cpsr_c, ip\n"
			: "=&r" (ret)
			: "r" (ptr), "I" (1)
			: "ip", "lr", "cc"
		);

#	endif
	*/

#elif defined(__IOS__)
    
    ret = OSAtomicIncrement32Barrier(target);
    
#else

	__asm__ __volatile__
	(
		"lock xaddl	%2, (%1)\n"
		"incl	%2\n"
		:"=a"(ret)
		:"r"(target), "a"(1)
		:"2"
	);

#endif

	return ret;
}
__INLINE__ uint32_t atomic_increment(volatile uint32_t *target)
{
	return (uint32_t)atomic_increment((volatile int32_t*)target);
}


__INLINE__ int32_t atomic_decrement(volatile int32_t *target)
{
	int32_t ret;

#if defined(_MSC_VER)

#	if _MSC_VER > 1200
		ret = ::_InterlockedDecrement((LONG*)target);
#	else
		ret = ::InterlockedDecrement((LONG*)target);
#	endif

#elif defined(__ANDROID__)

	//__atomic_inc/__atomic_dec 的返回值是修改之前的旧值。
	ret = __atomic_dec(target) - 1;
	/*
#	if __ANDROID__ >= 6

		__asm__ __volatile__
		(
			"1:	ldrex	lr, [%1]\n"
			"	subs	lr, lr, %2\n"
			"	strex	ip, lr, [%1]\n"
			"	teq	ip, #0\n"
			"	bne	1b\n"
			: "=&r" (ret)
			: "r" (ptr), "I" (1)
			: "ip", "lr", "cc"
		);

#	else

		__asm__ __volatile__
		(
			"	mrs	ip, cpsr\n"
			"	orr	lr, ip, #128\n"
			"	msr	cpsr_c, lr\n"
			"	ldr	lr, [%1]\n"
			"	subs	lr, lr, %2\n"
			"	str	lr, [%1]\n"
			"	msr	cpsr_c, ip\n"
			: "=&r" (ret)
			: "r" (ptr), "I" (1)
			: "ip", "lr", "cc"
		);

#	endif
	*/

#elif defined(__IOS__)
    
    ret = OSAtomicDecrement32Barrier(target);
    
#else

	__asm__ __volatile__
	(
		"lock xaddl	%2, (%1)\n"
		"decl	%2\n"
		:"=a"(ret)
		:"r"(target), "a"(-1)
		:"2"
	);

#endif

	return ret;
}
__INLINE__ uint32_t atomic_decrement(volatile uint32_t *target)
{
	return (uint32_t)atomic_decrement((volatile int32_t*)target);
}


__INLINE__ int32_t atomic_swap(volatile int32_t *target, int32_t value)
{
	int32_t ret;

#if defined(_MSC_VER)

#	if _MSC_VER > 1200
		ret = ::_InterlockedExchange((LONG*)target, value);
#	else
		ret = ::InterlockedExchange((LONG*)target, value);
#	endif

#elif defined(__ANDROID__)

	ret = __atomic_swap(value, target);
    
#elif defined(__IOS__)
    
    int32_t oldval;
    do {
        oldval = *target;
    } while (!OSAtomicCompareAndSwap32Barrier(oldval, value, target));
    ret = oldval;

#else

	__asm__ __volatile__
	(
	"1:\n"
		"lock cmpxchgl	%2, (%1)\n"
		"jne	1b\n"
		:"=a"(ret)
		:"r"(target), "r"(value), "0"(*target)
	);

#endif

	return ret;
}
__INLINE__ uint32_t atomic_swap(volatile uint32_t *target, uint32_t value)
{
	return (uint32_t)atomic_swap((volatile int32_t*)target, (int32_t)value);
}


__INLINE__ int32_t atomic_compare_and_swap(volatile int32_t *target, int32_t oldval, int32_t newval)
{
	int32_t ret;

#if defined(_MSC_VER)

#	if _MSC_VER > 1200
		ret = ::_InterlockedCompareExchange((LONG*)target, newval, oldval);
#	else
		ret = ::InterlockedCompareExchange((LONG*)target, newval, oldval);
#	endif

#elif defined(__ANDROID__)

	ret = __sync_val_compare_and_swap(target, oldval, newval);

#elif defined(__IOS__)
    
    ret = (OSAtomicCompareAndSwap32Barrier(oldval, newval, target) ? oldval : newval);

#else

	__asm__ __volatile__
	(
		"lock cmpxchgl	%3, (%1)\n"
		:"=a"(ret)
		:"r"(target), "a"(oldval), "r"(newval)
		:"2"
	);

#endif

	return ret;
}
__INLINE__ uint32_t atomic_compare_and_swap(volatile uint32_t *target,uint32_t oldval,uint32_t newval)
{
	return (uint32_t)atomic_compare_and_swap((volatile int32_t*)target, (int32_t)oldval, (int32_t)newval);
}



//////////////////////////////////////////////////////////////////
class Exclusive
{
private:
	int32_t	volatile  *_mutex;

private:
	Exclusive& operator =(const Exclusive &)
	{
		GDK_ASSERT(false);

		return *this;
	}

public:
	__INLINE__ Exclusive(int32_t volatile *mutex)
		: _mutex(mutex)
	{
		while (atomic_compare_and_swap(_mutex, 0, 1));
	}

	__INLINE__ ~Exclusive(void)
	{
		if (!atomic_compare_and_swap(_mutex, 1, 0)) GDK_ASSERT(false);
	}
};


#define BEGIN_EXCLUSIVE(mutex)	{ DECLARE_FUTILITY_VARIABLE(const gdk::Exclusive, &mutex) {
#define END_EXCLUSIVE()		    } }



}//namespace gdk



#endif //__GDK_ATOMICS_H__
