#ifndef _ATOMIC_X86_64_H_
#define _ATOMIC_X86_64_H_

#ifdef __APPLE__ 
#include <libkern/OSAtomic.h>
#endif

#define LOCK_PREFIX "lock ; "

/*
 * Make sure gcc doesn't try to be clever and move things around
 * on us. We need to use _exactly_ the address the user gave us,
 * not some alias that contains the same information.
 */
typedef struct {
    volatile int32_t counter;
} atomic_t;

static __inline__ int32_t Barrier_AtomicIncrement32(volatile atomic_t *ptr,
                                                    int32_t increment) {
#ifdef __APPLE__ 
  return OSAtomicAdd32Barrier(increment, const_cast<int32_t*>(&(ptr->counter) ) );
#endif

#ifdef __linux__
  int32_t temp = increment;
  __asm__ __volatile__("lock; xaddl %0,%1"
      : "+r" (temp), "+m" (ptr->counter)
      : : "memory");
  // temp now holds the old value of *ptr
  return temp + increment;
#endif
}

// the guaranteed useful range of an atomic_t is only 24 bits

#define ATOMIC_INIT(i)	{ (i) }

/**
 * atomic_read - read atomic variable
 * @v: pointer of type atomic_t
 *
 * Atomically reads the value of @v.
 */
#define atomic_read(v)		((v)->counter)

/**
 * atomic_set - set atomic variable
 * @v: pointer of type atomic_t
 * @i: required value
 *
 * Atomically sets the value of @v to @i.
 */
#define atomic_set(v,i)		(((v)->counter) = (i))

/**
 * atomic_add - add integer to atomic variable
 * @i: integer value to add
 * @v: pointer of type atomic_t
 *
 * Atomically adds @i to @v.
 */
static __inline__ void atomic_add(int32_t i, atomic_t *v) {
  // __asm__ __volatile__(
  //     LOCK_PREFIX "addl %1,%0"
  //     :"=m" (v->counter)
  //     :"ir" (i), "m" (v->counter));
  Barrier_AtomicIncrement32(v, i);
}

/**
 * atomic_sub - subtract the atomic variable
 * @i: integer value to subtract
 * @v: pointer of type atomic_t
 *
 * Atomically subtracts @i from @v.
 */
static __inline__ void atomic_sub(int32_t i, atomic_t *v) {
  // __asm__ __volatile__(
  //     LOCK_PREFIX "subl %1,%0"
  //     :"=m" (v->counter)
  //     :"ir" (i), "m" (v->counter));
  Barrier_AtomicIncrement32(v, -i);
}

/**
 * atomic_sub_and_test - subtract value from variable and test result
 * @i: integer value to subtract
 * @v: pointer of type atomic_t
 *
 * Atomically subtracts @i from @v and returns
 * true if the result is zero, or false for all
 * other cases.
 */
static __inline__ bool atomic_sub_and_test(int32_t i, atomic_t *v) {
  // unsigned char c;

  // __asm__ __volatile__(
  //     LOCK_PREFIX "subl %2,%0; sete %1"
  //     :"=m" (v->counter), "=qm" (c)
  //     :"ir" (i), "m" (v->counter) : "memory");
  // return c;
  return Barrier_AtomicIncrement32(v, -i) == 0;
}

/**
 * atomic_inc - increment atomic variable
 * @v: pointer of type atomic_t
 *
 * Atomically increments @v by 1.
 */
static __inline__ void atomic_inc(atomic_t *v) {
  // __asm__ __volatile__(
  //     LOCK_PREFIX "incl %0"
  //     :"=m" (v->counter)
  //     :"m" (v->counter));
  Barrier_AtomicIncrement32(v, 1);
}

/**
 * atomic_dec - decrement atomic variable
 * @v: pointer of type atomic_t
 *
 * Atomically decrements @v by 1.
 */
static __inline__ void atomic_dec(atomic_t *v) {
  // __asm__ __volatile__(
  //     LOCK_PREFIX "decl %0"
  //     :"=m" (v->counter)
  //     :"m" (v->counter));
  Barrier_AtomicIncrement32(v, -1);
}

/**
 * atomic_dec_and_test - decrement and test
 * @v: pointer of type atomic_t
 *
 * Atomically decrements @v by 1 and
 * returns true if the result is 0, or false for all other
 * cases.
 */
static __inline__ bool atomic_dec_and_test(atomic_t *v) {
  // unsigned char c;

  // __asm__ __volatile__(
  //     LOCK_PREFIX "decl %0; sete %1"
  //     :"=m" (v->counter), "=qm" (c)
  //     :"m" (v->counter) : "memory");
  // return c != 0;
  return Barrier_AtomicIncrement32(v, -1) == 0;
}

/**
 * atomic_inc_and_test - increment and test
 * @v: pointer of type atomic_t
 *
 * Atomically increments @v by 1
 * and returns true if the result is zero, or false for all
 * other cases.
 */
static __inline__ bool atomic_inc_and_test(atomic_t *v) {
  // unsigned char c;

  // __asm__ __volatile__(
  //     LOCK_PREFIX "incl %0; sete %1"
  //     :"=m" (v->counter), "=qm" (c)
  //     :"m" (v->counter) : "memory");
  // return c != 0;
  return Barrier_AtomicIncrement32(v, 1) == 0;
}

#ifdef __x86_64__
/* An 64bit atomic type */

typedef struct {
    volatile int64_t counter;
} atomic64_t;

static __inline__ int64_t Barrier_AtomicIncrement64(volatile atomic64_t* ptr,
                                                    int64_t increment) {
#ifdef __APPLE__
  return OSAtomicAdd64Barrier(increment,
      reinterpret_cast<volatile int64_t*>(&(ptr->counter) ) );
#endif

#ifdef __linux__
  int64_t temp = increment;
  __asm__ __volatile__("lock; xaddq %0,%1"
      : "+r" (temp), "+m" (ptr->counter)
      : : "memory");
  // temp now contains the previous value of *ptr
  return temp + increment;
#endif
}

#define ATOMIC64_INIT(i)	{ (i) }

/**
 * atomic64_read - read atomic64 variable
 * @v: pointer of type atomic64_t
 *
 * Atomically reads the value of @v.
 * Doesn't imply a read memory barrier.
 */
#define atomic64_read(v)		((v)->counter)

/**
 * atomic64_set - set atomic64 variable
 * @v: pointer to type atomic64_t
 * @i: required value
 *
 * Atomically sets the value of @v to @i.
 */
#define atomic64_set(v,i)		(((v)->counter) = (i))

/**
 * atomic64_add - add integer to atomic64 variable
 * @i: integer value to add
 * @v: pointer to type atomic64_t
 *
 * Atomically adds @i to @v.
 */
static __inline__ void atomic64_add(int64_t i, atomic64_t *v) {
  // __asm__ __volatile__(
  //     LOCK_PREFIX "addq %1,%0"
  //     :"=m" (v->counter)
  //     :"ir" (i), "m" (v->counter));
  Barrier_AtomicIncrement64(v, i);
}

/**
 * atomic64_sub - subtract the atomic64 variable
 * @i: integer value to subtract
 * @v: pointer to type atomic64_t
 *
 * Atomically subtracts @i from @v.
 */
static __inline__ void atomic64_sub(int64_t i, atomic64_t *v) {
  // __asm__ __volatile__(
  //     LOCK_PREFIX "subq %1,%0"
  //     :"=m" (v->counter)
  //     :"ir" (i), "m" (v->counter));
  Barrier_AtomicIncrement64(v, -i);
}

/**
 * atomic64_sub_and_test - subtract value from variable and test result
 * @i: integer value to subtract
 * @v: pointer to type atomic64_t
 *
 * Atomically subtracts @i from @v and returns
 * true if the result is zero, or false for all
 * other cases.
 */
static __inline__ bool atomic64_sub_and_test(int64_t i, atomic64_t *v) {
  // unsigned char c;

  // __asm__ __volatile__(
  //     LOCK_PREFIX "subq %2,%0; sete %1"
  //     :"=m" (v->counter), "=qm" (c)
  //     :"ir" (i), "m" (v->counter) : "memory");
  // return c;
  return Barrier_AtomicIncrement64(v, -i) == 0;
}

/**
 * atomic64_inc - increment atomic64 variable
 * @v: pointer to type atomic64_t
 *
 * Atomically increments @v by 1.
 */
static __inline__ void atomic64_inc(atomic64_t *v) {
  // __asm__ __volatile__(
  //     LOCK_PREFIX "incq %0"
  //     :"=m" (v->counter)
  //     :"m" (v->counter));
  Barrier_AtomicIncrement64(v, 1);
}

/**
 * atomic64_dec - decrement atomic64 variable
 * @v: pointer to type atomic64_t
 *
 * Atomically decrements @v by 1.
 */
static __inline__ void atomic64_dec(atomic64_t *v) {
  // __asm__ __volatile__(
  //     LOCK_PREFIX "decq %0"
  //     :"=m" (v->counter)
  //     :"m" (v->counter));
  Barrier_AtomicIncrement64(v, -1);
}

/**
 * atomic64_dec_and_test - decrement and test
 * @v: pointer to type atomic64_t
 *
 * Atomically decrements @v by 1 and
 * returns true if the result is 0, or false for all other
 * cases.
 */
static __inline__ bool atomic64_dec_and_test(atomic64_t *v) {
  // unsigned char c;

  // __asm__ __volatile__(
  //     LOCK_PREFIX "decq %0; sete %1"
  //     :"=m" (v->counter), "=qm" (c)
  //     :"m" (v->counter) : "memory");
  // return c != 0;
  return Barrier_AtomicIncrement64(v, -1) == 0;
}

/**
 * atomic64_inc_and_test - increment and test
 * @v: pointer to type atomic64_t
 *
 * Atomically increments @v by 1
 * and returns true if the result is zero, or false for all
 * other cases.
 */
static __inline__ bool atomic64_inc_and_test(atomic64_t *v) {
  // unsigned char c;

  // __asm__ __volatile__(
  //     LOCK_PREFIX "incq %0; sete %1"
  //     :"=m" (v->counter), "=qm" (c)
  //     :"m" (v->counter) : "memory");
  // return c != 0;
  return Barrier_AtomicIncrement64(v, 1) == 0;
}

#endif  // defined(__x86_64__)
#endif
