﻿#ifndef ATOMICOPS_20COPY_H_
#define ATOMICOPS_20COPY_H_

#include <cassert>
#include <cstdint>
#include <ctime>
#include <atomic>
#include <utility>

// AE_UNUSED
#define AE_UNUSED(x) ((void)x)

// Portable atomic fences implemented below:
namespace moodycamel {

enum memory_order {
  memory_order_relaxed,
  memory_order_acquire,
  memory_order_release,
  memory_order_acq_rel,
  memory_order_seq_cst,
  memory_order_sync = memory_order_seq_cst
};

} // end namespace moodycamel

// Use standard library of atomics
namespace moodycamel {

template <typename T>
using atomic = std::atomic<T>;

inline void compiler_fence(moodycamel::memory_order order) {
  switch (order) {
    case moodycamel::memory_order_relaxed:
      break;
    case moodycamel::memory_order_acquire:
      std::atomic_signal_fence(std::memory_order_acquire);
      break;
    case moodycamel::memory_order_release:
      std::atomic_signal_fence(std::memory_order_release);
      break;
    case moodycamel::memory_order_acq_rel:
      std::atomic_signal_fence(std::memory_order_acq_rel);
      break;
    case moodycamel::memory_order_seq_cst:
      std::atomic_signal_fence(std::memory_order_seq_cst);
      break;
    default:
      assert(false);
  }
}

inline void fence(moodycamel::memory_order order) {
  switch (order) {
    case moodycamel::memory_order_relaxed:
      break;
    case moodycamel::memory_order_acquire:
      std::atomic_thread_fence(std::memory_order_acquire);
      break;
    case moodycamel::memory_order_release:
      std::atomic_thread_fence(std::memory_order_release);
      break;
    case moodycamel::memory_order_acq_rel:
      std::atomic_thread_fence(std::memory_order_acq_rel);
      break;
    case moodycamel::memory_order_seq_cst:
      std::atomic_thread_fence(std::memory_order_seq_cst);
      break;
    default:
      assert(false);
  }
}

} // end namespace moodycamel

namespace moodycamel {

template <typename T>
class weak_atomic {
 public:
  weak_atomic() : value() {}
  template <typename U>
  weak_atomic(U&& x) : value(std::forward<U>(x)) {}
  weak_atomic(const weak_atomic& other) : value(other.load()) {}
  weak_atomic(weak_atomic&& other) : value(std::move(other.load())) {}

  operator T() const {
    return load();
  }

  template <typename U>
  const weak_atomic& operator=(U&& x) {
    value.store(std::forward<U>(x), std::memory_order_relaxed);
    return *this;
  }

  const weak_atomic& operator=(const weak_atomic& other) {
    value.store(other.value.load(std::memory_order_relaxed), std::memory_order_relaxed);
    return *this;
  }

  T load() const {
    return value.load(std::memory_order_relaxed);
  }

  T fetch_add_acquire(T increment) {
    return value.fetch_add(increment, std::memory_order_acquire);
  }

  T fetch_add_release(T increment) {
    return value.fetch_add(increment, std::memory_order_release);
  }

 private:
  std::atomic<T> value;
};

} // end namespace moodycamel

#include <semaphore.h>

namespace moodycamel {
namespace spsc_sema {

class Semaphore {
 private:
  sem_t m_sema;

 public:
  Semaphore(int initialCount = 0) : m_sema() {
    assert(initialCount >= 0);
    int rc = sem_init(&m_sema, 0, static_cast<unsigned int>(initialCount));
    assert(rc == 0);
    AE_UNUSED(rc);
  }

  ~Semaphore() {
    sem_destroy(&m_sema);
  }

  bool wait() {
    int rc;
    do {
      rc = sem_wait(&m_sema);
    } while (rc == -1 && errno == EINTR);
    return rc == 0;
  }

  bool try_wait() {
    int rc;
    do {
      rc = sem_trywait(&m_sema);
    } while (rc == -1 && errno == EINTR);
    return rc == 0;
  }

  bool timed_wait(std::uint64_t usecs) {
    struct timespec ts;
    const int usecs_in_1_sec = 1000000;
    const int nsecs_in_1_sec = 1000000000;
    clock_gettime(CLOCK_REALTIME, &ts);
    ts.tv_sec += static_cast<time_t>(usecs / usecs_in_1_sec);
    ts.tv_nsec += static_cast<long>(usecs % usecs_in_1_sec) * 1000;
    if (ts.tv_nsec >= nsecs_in_1_sec) {
      ts.tv_nsec -= nsecs_in_1_sec;
      ++ts.tv_sec;
    }

    int rc;
    do {
      rc = sem_timedwait(&m_sema, &ts);
    } while (rc == -1 && errno == EINTR);
    return rc == 0;
  }

  void signal() {
    while (sem_post(&m_sema) == -1);
  }

  void signal(int count) {
    while (count-- > 0) {
      while (sem_post(&m_sema) == -1);
    }
  }
};

class LightweightSemaphore {
 public:
  typedef std::make_signed<std::size_t>::type ssize_t;

 private:
  weak_atomic<ssize_t> m_count;
  Semaphore m_sema;

  bool waitWithPartialSpinning(std::int64_t timeout_usecs = -1) {
    ssize_t oldCount;
    int spin = 1024;
    while (--spin >= 0) {
      if (m_count.load() > 0) {
        m_count.fetch_add_acquire(-1);
        return true;
      }
      compiler_fence(memory_order_acquire);
    }
    oldCount = m_count.fetch_add_acquire(-1);
    if (oldCount > 0) return true;
    if (timeout_usecs < 0) {
      if (m_sema.wait()) return true;
    }
    if (timeout_usecs > 0 && m_sema.timed_wait(static_cast<uint64_t>(timeout_usecs))) return true;
    while (true) {
      oldCount = m_count.fetch_add_release(1);
      if (oldCount < 0) return false;
      oldCount = m_count.fetch_add_acquire(-1);
      if (oldCount > 0 && m_sema.try_wait()) return true;
    }
  }

 public:
  LightweightSemaphore(ssize_t initialCount = 0) : m_count(initialCount), m_sema() {
    assert(initialCount >= 0);
  }

  bool tryWait() {
    if (m_count.load() > 0) {
      m_count.fetch_add_acquire(-1);
      return true;
    }
    return false;
  }

  bool wait() {
    return tryWait() || waitWithPartialSpinning();
  }

  bool wait(std::int64_t timeout_usecs) {
    return tryWait() || waitWithPartialSpinning(timeout_usecs);
  }

  void signal(ssize_t count = 1) {
    assert(count >= 0);
    ssize_t oldCount = m_count.fetch_add_release(count);
    assert(oldCount >= -1);
    if (oldCount < 0) {
      m_sema.signal(1);
    }
  }

  std::size_t availableApprox() const {
    ssize_t count = m_count.load();
    return count > 0 ? static_cast<std::size_t>(count) : 0;
  }
};

} // end namespace spsc_sema
} // end namespace moodycamel


#endif // ATOMICOPS_20COPY_H_
