#include "tbcore/base/mutex.hpp"

#include <boost/smart_ptr/detail/yield_k.hpp>

#include "tbcore/base/basic_types.hpp"
#include "tbcore/base/logging.hpp"

TB_NAMESPACE_BEGIN

struct SpinMutex::SpinMutexImpl {
  SpinMutexImpl() : state_(false) {}
  boost::atomic_bool state_;
};

SpinMutex::SpinMutex()
  : impl_(new SpinMutexImpl()) {

}

SpinMutex::~SpinMutex() {
  delete impl_;
}

bool SpinMutex::TryLock() {
  bool expected = false;
  return impl_->state_.compare_exchange_weak(expected, true, boost::memory_order_acquire);
}

void SpinMutex::Lock() {
  int k = 0;
  bool expected = false;
  while (!impl_->state_.compare_exchange_weak(expected, true, boost::memory_order_acquire)) {
    boost::detail::yield(k++);
  }
}

void SpinMutex::Unlock() {
  impl_->state_.store(false, boost::memory_order_release);
}

namespace {

enum SpinRWMutexIncement {
  WRITE_INCREMENT = 1,
  READ_INCREMENT = 2, 
};

}

struct SpinRWMutex::SpinRWMutexImpl {
	SpinRWMutexImpl() : state_(0) {}
  boost::atomic_int state_;
};

SpinRWMutex::SpinRWMutex()
  : impl_(new SpinRWMutexImpl()) {

}

SpinRWMutex::~SpinRWMutex() {
  delete impl_;
}

bool SpinRWMutex::TryLockShared() {
  //use memory_order_cosume to promote efficient
  int state = impl_->state_.load(boost::memory_order_consume);
  if ( ! TB_TESTBIT(state, WRITE_INCREMENT)) {
    impl_->state_.fetch_add(READ_INCREMENT, boost::memory_order_acquire);
    return true;
  } else {
      return false;
  }
}

bool SpinRWMutex::TryLockExclusive() {
  int expected = 0;
  return impl_->state_.compare_exchange_weak(expected, WRITE_INCREMENT, 
    boost::memory_order_acquire);
}

void SpinRWMutex::LockShared() {
  int k = 0;
  int state = impl_->state_.load(boost::memory_order_acquire);

  while (TB_TESTBIT(state, WRITE_INCREMENT)) {
    boost::detail::yield(k++);
  }

  impl_->state_.fetch_add(READ_INCREMENT, boost::memory_order_acq_rel);
}

void SpinRWMutex::UnlockShared() {
#if defined(TB_DEBUG)
  int state = impl_->state_.load(boost::memory_order_relaxed);
  DCHECK(TB_TESTBIT(state, WRITE_INCREMENT) || (state <= 0))
    << "previous lock state is not shared";
#endif 
  impl_->state_.fetch_sub(READ_INCREMENT, boost::memory_order_release);
}

void SpinRWMutex::LockExclusive() {
  int k = 0;
  int expected = 0;
  while (impl_->state_.compare_exchange_weak(expected, WRITE_INCREMENT, 
    boost::memory_order_acquire)) {
    boost::detail::yield(k++);
  }
}

void SpinRWMutex::UnlockExclusive()
{
  DCHECK((impl_->state_.load(boost::memory_order_relaxed) != WRITE_INCREMENT))
    << "previous lock state is not exclusive";
  impl_->state_.store(0, boost::memory_order_release);
}

TB_NAMESPACE_END