/* Definitions for atomic operations.

   This file is part of khipu.

   khipu is free software: you can redistribute it and/or modify
   it under the terms of the GNU Lesser General Public License as published by
   the Free Software Foundation; either version 3 of the License, or
   (at your option) any later version.

   This program is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU Lesser General Public License for more details.

   You should have received a copy of the GNU Lesser General Public License
   along with this program.  If not, see <https://www.gnu.org/licenses/>.  */

#ifndef __KP_ATOMIC__
#define __KP_ATOMIC__

#include "../defs.hpp"

#ifdef KP_NO_THREADS

KP_DECLS_BEGIN

typedef intptr_t atomic_t;

inline atomic_t
atomic_add (atomic_t *ptr, atomic_t val)
{
  atomic_t ret = *ptr;
  *ptr += val;
  return (ret);
}

inline atomic_t
atomic_cas (atomic_t *ptr, atomic_t exp, atomic_t nval)
{
  atomic_t ret = *ptr;
  if (ret == exp)
    *ptr = nval;
  return (ret);
}

inline atomic_t
atomic_cas_bool (atomic_t *ptr, atomic_t exp, atomic_t nval)
{
  if (*ptr != exp)
    return (false);

  *ptr = nval;
  return (true);
}

inline atomic_t
atomic_swap (atomic_t *ptr, atomic_t val)
{
  atomic_t ret = *ptr;
  *ptr = val;
  return (ret);
}

inline atomic_t
atomic_or (atomic_t *ptr, atomic_t val)
{
  atomic_t ret = *ptr;
  *ptr |= val;
  return (ret);
}

inline atomic
atomic_and (atomic_t *ptr, atomic_t val)
{
  atomic_t ret = *ptr;
  *ptr &= val;
  return (ret);
}

inline void atomic_mfence ()
{
}

inline void atomic_mfence_acq ()
{
}

inline void atomic_mfence_rel ()
{
}

#elif (defined (__GNUC__) && (__GNUC__ > 4 ||   \
    (__GNUC__ == 4 && __GNUC_MINOR__ >= 7))) || (defined (__clang__) &&   \
     defined (__clang_major__) && (__clang_major__ >= 4 ||   \
     (__clang_major__ == 3 && __clang_minor__ >= 8)))

KP_DECLS_BEGIN

typedef intptr_t atomic_t;

inline atomic_t
atomic_add (atomic_t *ptr, atomic_t val)
{
  return (__atomic_fetch_add (ptr, val, __ATOMIC_ACQ_REL));
}

inline atomic_t
atomic_cas (atomic_t *ptr, atomic_t exp, atomic_t nval)
{
  __atomic_compare_exchange_n (ptr, &exp, nval, 0,
    __ATOMIC_ACQ_REL, __ATOMIC_RELAXED);
  return (exp);
}

inline bool
atomic_cas_bool (atomic_t *ptr, atomic_t exp, atomic_t nval)
{
  return (__atomic_compare_exchange_n (ptr, &exp, nval, 0,
    __ATOMIC_ACQ_REL, __ATOMIC_RELAXED));
}

inline atomic_t
atomic_swap (atomic_t *ptr, atomic_t val)
{
  return (__atomic_exchange_n (ptr, val, __ATOMIC_ACQ_REL));
}

inline atomic_t
atomic_or (atomic_t *ptr, atomic_t val)
{
  return (__atomic_fetch_or (ptr, val, __ATOMIC_ACQ_REL));
}

inline atomic_t
atomic_and (atomic_t *ptr, atomic_t val)
{
  return (__atomic_fetch_and (ptr, val, __ATOMIC_ACQ_REL));
}

inline void atomic_mfence_acq ()
{
  __atomic_thread_fence (__ATOMIC_ACQUIRE);
}

inline void atomic_mfence_rel ()
{
  __atomic_thread_fence (__ATOMIC_RELEASE);
}

inline void atomic_mfence ()
{
  __atomic_thread_fence (__ATOMIC_SEQ_CST);
}


#else

#include <atomic>

KP_DECLS_BEGIN

typedef intptr_t atomic_t;

static_assert (sizeof (atomic_t) == sizeof (std::atomic_intptr_t) &&
  alignof (atomic_t) == alignof (std::atomic_intptr_t),
  "unsupported compiler (atomic_t and atomic_intptr_t mismatch)");

#define AS_ATOMIC(x)   ((std::atomic_intptr_t *)(x))

inline atomic_t
xatomic_cas (atomic_t *ptr, atomic_t exp, atomic_t nval)
{
  AS_ATOMIC(ptr)->compare_exchange_weak (exp, nval,
    std::memory_order_acq_rel, std::memory_order_relaxed);
  return (exp);
}

inline atomic_t
xatomic_swap (atomic_t *ptr, atomic_t val)
{
  return (AS_ATOMIC(ptr)->exchange (val, std::memory_order_acq_rel));
}

inline atomic_t
xatomic_add (atomic_t *ptr, atomic_t val)
{
  return (AS_ATOMIC(ptr)->fetch_add (val, std::memory_order_acq_rel));
}

inline atomic_t
xatomic_or (atomic_t *ptr, atomic_t val)
{
  return (AS_ATOMIC(ptr)->fetch_or (val, std::memory_order_acq_rel));
}

inline atomic_t
xatomic_and (atomic_t *ptr, atomic_t val)
{
  return (AS_ATOMIC(ptr)->fetch_and (val, std::memory_order_acq_rel));
}

#undef AS_ATOMIC

inline void atomic_mfence ()
{
  std::atomic_thread_fence (std::memory_order_seq_cst);
}

inline void atomic_mfence_acq ()
{
  std::atomic_thread_fence (std::memory_order_acquire);
}

inline void atomic_mfence_rel ()
{
  std::atomic_thread_fence (std::memory_order_release);
}

#endif

// Now define additional atomic operations.
#ifdef __GNUC__
#  if defined (KP_ARCH_I386)

#    define KP_HAS_ATOMIC_CASX

#    if defined (__PIC__) && __GNUC__ < 5

    template <class T>
    bool atomic_casx (T *ptr, atomic_t elo, atomic_t ehi,
                      atomic_t nlo, atomic_t nhi)
      {
        atomic_t s;
        char r;
        __asm__ __volatile__
          (
            "movl %%ebx, %2\n\t"
            "leal %0, %%edi\n\t"
            "movl %7, %%ebx\n\t"
            "lock; cmpxchg8b (%%edi)\n\t"
            "movl %2, %%ebx\n\t"
            "setz %1"
            : "=m" (*ptr), "=a" (r), "=m" (s)
            : "m" (*ptr), "d" (ehi), "a" (elo),
              "c" (nhi), "m" (nlo)
            : "%edi", "memory"
          );

        return ((bool)r);
      }

#    else

    template <class T>
    bool atomic_casx (T *ptr, atomic_t elo, atomic_t ehi,
                      atomic_t nlo, atomic_t nhi)
      {
        char r;
        __asm__ __volatile__
          (
            "lock; cmpxchg8b %0\n\t"
            "setz %1"
            : "+m" (*ptr), "=a" (r)
            : "d" (ehi), "a" (elo),
              "c" (nhi), "b" (nlo)
            : "memory"
          );

        return ((bool)r);
      }

#    endif

#  elif defined (KP_ARCH_X32)

#    define KP_HAS_ATOMIC_CASX

  template <class T>
  bool atomic_casx (T *ptr, atomic_t elo, atomic_t ehi,
                    atomic_t nlo, atomic_t nhi)
    {
      uint64_t exp = ((uint64_t)ehi << 32) | elo;
      uint64_t nval = ((uint64_t)nhi << 32) | nlo;

      return (__atomic_compare_exchange_n ((uint64_t *)ptr,
        &exp, nval, 0, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED));
    }

#  elif defined (KP_ARCH_X8664)

#    define KP_HAS_ATOMIC_CASX

  template <class T>
  bool atomic_casx (T *ptr, atomic_t elo, atomic_t ehi,
                    atomic_t nlo, atomic_t nhi)
    {
      char r;
      __asm__ __volatile__
        (
          "lock; cmpxchg16b %0\n\t"
          "setz %1"
          : "+m" (*ptr), "=q" (r)
          : "d" (ehi), "a" (elo),
            "c" (nhi), "b" (nlo)
          : "memory"
        );

      return ((bool)r);
    }

#  elif defined (KP_ARCH_ARM32) && ((!defined (__thumb__) ||   \
      (defined (__thumb2__) && !defined (__ARM_ARCH_7__)) &&   \
        !defined (__ARM_ARCH_7M__) && !defined (__ARM_ARCH_7EM__)) &&   \
      (!defined (__clang__) || __clang_major__ > 3 ||   \
        (__clang_major__ == 3 && __clang_minor__ >= 3)))

#    define KP_HAS_ATOMIC_CASX

  template <class T>
  bool atomic_casx (T *ptr, atomic_t elo, atomic_t ehi,
                    atomic_t nlo, atomic_t nhi)
    {
      uint64_t qv = ((uint64_t)ehi << 32) | elo;
      uint64_t nv = ((uint64_t)nhi << 32) | nlo;

      while (true)
        {
          uint64_t tmp;
          __asm__ __volatile__
            (
              "ldrexd %0, %H0, [%1]"
              : "=&r" (tmp) : "r" (ptr)
            );

          if (tmp != qv)
            return (false);

          int r;
          __asm__ __volatile__
            (
              "strexd %0, %3, %H3, [%2]"
              : "=&r" (r), "+m" (*ptr)
              : "r" (ptr), "r" (nv)
              : "cc"
            );

          if (r == 0)
            return (true);
        }
    }

#  elif defined (KP_ARCH_ARM64)

#    define KP_HAS_ATOMIC_CASX

  template <class T>
  bool atomic_casx (T *ptr, atomic_t elo, atomic_t ehi,
                    atomic_t nlo, atomic_t nhi)
    {
      while (true)
        {
          atomic_t t1, t2;
          __asm__ __volatile__
            (
              "ldaxp %0, %1, %2"
              : "=&r" (t1), "=&r" (t2)
              : "Q" (*ptr)
            );

          if (t1 != elo || t2 != ehi)
            return (false);

          int r;
          __asm__ __volatile__
            (
              "stxp %w0, %2, %3, %1"
              : "=&r" (r), "=Q" (*ptr)
              : "r" (nlo), "r" (nhi)
            );

          if (r == 0)
            return (true);
        }
    }

#  endif

#  if defined (KP_ARCH_I386) || defined (KP_ARCH_X32) ||   \
       defined (KP_ARCH_X8664)

#   define KP_HAS_ATOMIC_SPIN_NOP

inline void
atomic_spin_nop ()
{
  __asm__ __volatile__ ("pause" : : : "memory");
}

#  elif defined (KP_ARCH_ARM32) || defined (KP_ARCH_ARM64)

#   define KP_HAS_ATOMIC_SPIN_NOP

inline void
atomic_spin_nop ()
{
  __asm__ __volatile__ ("wfe" : : : "memory");
}


#  endif

#endif   // defined (__GNUC__)

#ifndef KP_HAS_ATOMIC_SPIN_NOP

inline void
atomic_spin_nop ()
{
  atomic_mfence_acq ();
}

#else

#  undef KP_HAS_ATOMIC_SPIN_NOP

#endif   // !defined KP_HAS_ATOMIC_SPIN_NOP

KP_DECLS_END

#endif
