/* interlocked.h */

#ifndef __AURA_ATOMIC_H__
#define __AURA_ATOMIC_H__

#ifdef _x86

inline static long
interlocked_inc (
        long volatile* addend)
{
    long ret;

    __asm__ __volatile__ (
        "lock; xaddl %0, (%1)"
        : "=r" (ret)
        : "r" (addend), "0" (1)
        : "memory");
    return ret + 1;
}

inline static long
interlocked_dec (
        long volatile* addend)
{
    long ret;

    __asm__ __volatile__ (
        "lock; xaddl %0, (%1)"
        : "=r" (ret)
        : "r" (addend), "0" (-1)
        : "memory");
    return ret - 1;
}

inline static long
interlocked_xchg(
        long volatile* dest,
        long val)
{
    long ret;
    __asm__ __volatile__ (
        "lock; xchgl %0,(%1)"
        : "=r" (ret) : "r" (dest), "0" (val) : "memory");
    return ret;
}

inline static long
interlocked_xchg_add (
        long volatile* dest,
        int addend)
{
    long ret;
    __asm__ __volatile__ (
        "lock; xaddl %0, (%1)"
        : "=r"(ret) : "r"(dest), "0"(addend) : "memory");
    return ret;
}

inline static long
interlocked_cmpxchg (
        long volatile *dest,
        long xchg,
        long compare)
{
    long ret;

    __asm__ __volatile__(
        "lock; cmpxchgl %2,(%1)"
        : "=a" (ret)
        : "r" (dest), "r" (xchg), "0" (compare)
        : "memory");

    return ret;
}

#elif defined(_arm)

inline static long atomic_add (volatile long *mem, long val)
{
  int tmp1;
  int tmp2;
  int tmp3;
  __asm__ ("\n"
	   "0: ldr      %0, [%3]\n\t"
	   "   add      %1, %0, %4\n\t"
	   "   swp      %2, %1, [%3]\n\t"
	   "   cmp      %0, %2\n\t"
	   "   swpne    %1, %2, [%3]\n\t"
	   "   bne      0b"
	   : "=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3)
	   : "r" (mem), "r"(val)
	   : "cc", "memory");
  return tmp2;
}

inline static long
exchange_and_add (volatile long *mem, long val)
{
  long tmp1;
  long tmp2;
  long result;
  __asm__ ("\n"
	   "0: ldr      %0, [%3]\n\t"
	   "   add      %1, %0, %4\n\t"
	   "   swp      %2, %1, [%3]\n\t"
	   "   cmp      %0, %2\n\t"
	   "   swpne    %1, %2, [%3]\n\t"
	   "   bne      0b"
	   : "=&r" (result), "=&r" (tmp1), "=&r" (tmp2)
	   : "r" (mem), "r"(val)
	   : "cc", "memory");
  return result;
}

inline static long
compare_and_swap (volatile long *p, long oldval, long newval)
{
  long result, tmp;
  __asm__ ("\n"
	   "0: ldr      %0, [%2]\n\t"
	   "   cmp      %0,%4\n\t"
	   "   bne      1f\n\t"
	   "   swp      %1,%3,[%2]\n\t"
	   "   cmp      %0,%1\n\t"
	   "   swpne    %0,%1,[%2]\n\t"
	   "   bne      0b\n\t"
	   "1:"
	   : "=&r" (result), "=&r" (tmp)
	   : "r" (p), "r" (newval), "r" (oldval)
	   : "cc", "memory");
  return result;
}

inline static long
interlocked_inc (
        long volatile* addend)
{
   return atomic_add (addend, 1);
}

inline static long
interlocked_dec (
        long volatile* addend)
{
    return atomic_add (addend, -1);
}

inline static long
interlocked_xchg(
        long volatile* dest,
        long val)
{
    long result;

    __asm__ ("swp   %0, %2, [%1]"
        : "=&r" (result)
        : "r" (dest), "r" (val)
	: "cc", "memory");

    return result;
}

inline static long
interlocked_xchg_add (
        long volatile* dest,
        int addend)
{
    return exchange_and_add (dest, addend);
}

inline static long
interlocked_cmpxchg (
        long volatile *dest,
        long xchg,
        long compare)
{
    return compare_and_swap(dest, compare, xchg);
}

#else
#error Implement interlocked XXX for your architecture
#endif

#endif /* __AURA_ATOMIC_H__ */
