/*
 * =====================================================================================
 *
 *       Filename:  CStack.cpp
 *
 *    Description:  CStack.cpp
 *
 *         Author:  Younghyun Jo
 *   Organization:  Master Student
 *                  Computer Systems and Platforms Laboratory
 *                  Department of Computer Science and Engineering
 *                  Seoul National University
 *
 * =====================================================================================
 */

#include <cstdio>
#include <cstdlib>
#include "stack.h"

#include <immintrin.h>

// TSX code is stolen from glibc-2.18

#define _XA_EXPLICIT		0
#define _XA_RETRY		1
#define _XA_CONFLICT		2
#define _XA_CAPACITY		3

#define _XBEGIN_STARTED		(~0u)
#define _XABORT_EXPLICIT	(1 << _XA_EXPLICIT)
#define _XABORT_RETRY		(1 << _XA_RETRY)
#define _XABORT_CONFLICT	(1 << _XA_CONFLICT)
#define _XABORT_CAPACITY	(1 << _XA_CAPACITY)
#define _XABORT_DEBUG		(1 << 4)
#define _XABORT_NESTED		(1 << 5)

#define _XABORT_CODE(x)		(((x) >> 24) & 0xff)

#define _ABORT_LOCK_BUSY	0xff

#define __force_inline __attribute__((__always_inline__)) inline

static __force_inline int _xbegin(void)
{
	int ret = _XBEGIN_STARTED;
	asm volatile (".byte 0xc7,0xf8 ; .long 0"
			: "+a" (ret) :: "memory");
	return ret;
}

static __force_inline void _xend(void)
{
	asm volatile (".byte 0x0f,0x01,0xd5"
			::: "memory");
}

static __force_inline void _xabort(const unsigned int status)
{
	asm volatile (".byte 0xc6,0xf8,%P0"
			:: "i" (status) : "memory");
}

static __force_inline int _xtest(void)
{
	unsigned char out;
	asm volatile (".byte 0x0f,0x01,0xd6 ; setnz %0"
			: "=r" (out) :: "memory");
	return out;
}

//--------------------------------------------------------------------------------------
// Node
//
// Node: This unbounded stack use doubly linked nodes
//

Node::Node()
  : _value(-1), _next(NULL), _prev(NULL)
{
}

Node::Node(const item_t value)
  : _value(value), _next(NULL), _prev(NULL)
{
}

Node::~Node()
{
}

//--------------------------------------------------------------------------------------
// CStack
//
// CStack: Basic implementation of CStack
//         It only supports one sequential task
//

CStack::CStack()
  : _head(NULL), _top(NULL)
{
}

CStack::~CStack(void)
{
  Node* node = _head;

  /* delete all entries */
  while (node != NULL) {
    Node* dnode = node;
    node = node->_next;
    delete dnode;
  }
}

bool CStack::Push(int value, int thread_id)
{
  Node* new_node = new Node(value);

  /* When the stack is empty */
  if (!_head) {
    // _head points to first node 
    // or we can use a sentinel node as a first node
    _head = new_node;
    new_node->_prev = _head;
  } else {
    _top->_next = new_node;
    new_node->_prev = _top;
  }

  _top = new_node;

  return true;
}

Node* CStack::Pop(int thread_id)
{
  /* When the stack is empty */
  if (!_head) {
    return NULL; // Exception
  } else {
    Node* pop_node = _top;
    _top = _top->_prev;
    return pop_node;
  }
}

Node* CStack::Peek(void)
{
  return _top;
}

//--------------------------------------------------------------------------------------
// CCoarseGrainedLockStack
//
// CCoarseGrainedLockStack: It uses a coarse grained lock to preserve sequential order
//                          between a set of tasks
//

CCoarseGrainedLockStack::CCoarseGrainedLockStack()
{
  lock_init(&_lock); // init the basic spin lock
}

CCoarseGrainedLockStack::~CCoarseGrainedLockStack()
{
  lock_destruct(&_lock); // destruct the spin lock
}

bool CCoarseGrainedLockStack::Push(int value, int thread_id)
{
  Node* new_node = new Node(value);

  /* acquire the spin lock */
  lock_acquire(&_lock);

  /* When the stack is empty */
  if (!_head) {
    _head = new_node;
    new_node->_prev = _head;
  } else {
    _top->_next = new_node;
    new_node->_prev = _top;
  }

  _top = new_node;

  /* release the spin lock */
  lock_release(&_lock);

  return true;
}

Node* CCoarseGrainedLockStack::Pop(int thread_id)
{
  /* acquire the spin lock */
  lock_acquire(&_lock);

  /* When the stack is empty */
  if (!_head) {
    lock_release(&_lock); // Exception
    return NULL;
  } else {
    Node* pop_node = _top;
    _top = _top->_prev;

    /* release the spin lock */
    lock_release(&_lock);
    return pop_node;
  }
}

//--------------------------------------------------------------------------------------
// CTransactionalMemory
//
// CTransactionalMemory: It uses a coarse grained lock to preserve sequential order
//                          between a set of tasks
//

CTransactionalMemory::CTransactionalMemory()
{
}

CTransactionalMemory::~CTransactionalMemory()
{
}

bool CTransactionalMemory::Push(int value, int thread_id)
{
  Node* new_node = new Node(value);
	int abrt = 0;
	while (1) {
		unsigned status = _xbegin();

		//if (__builtin_expect(status == _XBEGIN_STARTED, 1)) {
    if (_xbegin() == _XBEGIN_STARTED) {
			// we're in transactional context
      /* When the stack is empty */
      if (!_head) {
        _head = new_node;
        new_node->_prev = _head;
      } else {
        _top->_next = new_node;
        new_node->_prev = _top;
      }

      _top = new_node;

			// Hacky check whether spinlock is locked.
			// See glibc/nptl/sysdeps/x86_64/pthread_spin_unlock.S
			//if (__builtin_expect((int)_lock != 1, 0))
			//	_xabort(_ABORT_LOCK_BUSY);

			_xend();

			return true;
		} else {
      // 
      __sync_fetch_and_add(&(this->failures), 1);
    }
  }
}

Node* CTransactionalMemory::Pop(int thread_id)
{
  Node* pop_node = NULL;
	int abrt = 0;

	while (1) {
		unsigned status = _xbegin();

		//if (__builtin_expect(status == _XBEGIN_STARTED, 1)) {
    if (_xbegin() == _XBEGIN_STARTED) {
			// we're in transactional context
      /* When the stack is empty */
      if (!_head) {
        pop_node = NULL;
      } else {
        Node* pop_node = _top;
        _top = _top->_prev;
      }

			// Hacky check whether spinlock is locked.
			// See glibc/nptl/sysdeps/x86_64/pthread_spin_unlock.S
			//if (__builtin_expect((int)_lock != 1, 0))
			//	_xabort(_ABORT_LOCK_BUSY);

			_xend();

			return pop_node;
		} else {
      //
      __sync_fetch_and_add(&(this->failures), 1);
    }
  }
}


//--------------------------------------------------------------------------------------
// CCoarseGrainedTransactionalMemory
//
// CCoarseGrainedTransactionalMemory: It uses a coarse grained lock to preserve sequential order
//                          between a set of tasks
//

CCoarseGrainedTransactionalMemory::CCoarseGrainedTransactionalMemory()
{
  lock_init(&_lock); // init the basic spin lock
}

CCoarseGrainedTransactionalMemory::~CCoarseGrainedTransactionalMemory()
{
  lock_destruct(&_lock); // destruct the spin lock
}

bool CCoarseGrainedTransactionalMemory::Push(int value, int thread_id)
{
  Node* new_node = new Node(value);
	int abrt = 0;
	while (1) {
		unsigned status = _xbegin();

		//if (__builtin_expect(status == _XBEGIN_STARTED, 1)) {
    if (_xbegin() == _XBEGIN_STARTED) {
			// we're in transactional context
      /* When the stack is empty */
      if (!_head) {
        _head = new_node;
        new_node->_prev = _head;
      } else {
        _top->_next = new_node;
        new_node->_prev = _top;
      }

      _top = new_node;

			// Hacky check whether spinlock is locked.
			// See glibc/nptl/sysdeps/x86_64/pthread_spin_unlock.S
			//if (__builtin_expect((int)_lock != 1, 0))
			//	_xabort(_ABORT_LOCK_BUSY);

			_xend();

			return true;
		} else {
      __sync_fetch_and_add(&(this->failures), 1);
      /* acquire the spin lock */
      lock_acquire(&_lock);
   
      /* When the stack is empty */
      if (!_head) {
        _head = new_node;
        new_node->_prev = _head;
      } else {
        _top->_next = new_node;
        new_node->_prev = _top;
      }
    
      _top = new_node;
    
      /* release the spin lock */
      lock_release(&_lock);
      return true;
		}
	}
}

Node* CCoarseGrainedTransactionalMemory::Pop(int thread_id)
{
  Node* pop_node = NULL;
	int abrt = 0;

	while (1) {
		unsigned status = _xbegin();

		//if (__builtin_expect(status == _XBEGIN_STARTED, 1)) {
    if (_xbegin() == _XBEGIN_STARTED) {
			// we're in transactional context
      /* When the stack is empty */
      if (!_head) {
        pop_node = NULL;
      } else {
        Node* pop_node = _top;
        _top = _top->_prev;
      }

			// Hacky check whether spinlock is locked.
			// See glibc/nptl/sysdeps/x86_64/pthread_spin_unlock.S
			//if (__builtin_expect((int)_lock != 1, 0))
			//	_xabort(_ABORT_LOCK_BUSY);

			_xend();

			return pop_node;
		} else {
      __sync_fetch_and_add(&(this->failures), 1);
      /* acquire the spin lock */
      lock_acquire(&_lock);
    
      /* When the stack is empty */
      if (!_head) {
        lock_release(&_lock); // Exception
        return NULL;
      } else {
        pop_node = _top;
        _top = _top->_prev;
    
        /* release the spin lock */
        lock_release(&_lock);
        return pop_node;
      }
		}
	}
}

//--------------------------------------------------------------------------------------
// CLockFreeStack
//
// CLockFreeStack: The implementation of Lock free stack by using compare and exch
//

CLockFreeStack::CLockFreeStack()
{
}

CLockFreeStack::~CLockFreeStack()
{
}

bool CLockFreeStack::Push(int value, int thread_id)
{
  Node* newNode = new Node(value);
  Node* oldTop = NULL; // it is for CompareAndSet in LockFreeStack
  int backoff = 2; // MIN_DELAY

  while (true) {
    oldTop = Peek();
    newNode->_prev = oldTop;

    /* TryPush(): CAS Lock free */
    if (__sync_bool_compare_and_swap(&_top, oldTop, newNode)) {
      //printf("Push:%lu\n", newNode->_value);
      return true;
    } else {
      __sync_fetch_and_add(&(this->failures), 1);
      /*  Back off to avoid spamming memory networks. */
      for (int i = backoff; i > 0; i--);

      if (backoff < 256) // MAX_DELAY
          backoff *= 2;
    }
  }
}

Node* CLockFreeStack::Pop(int thread_id)
{
  Node* oldTop = NULL; // it is for CompareAndSet in LockFreeStack
                       // and, it will be also returning pop node
  int backoff = 2; // MIN_DELAY

  while (true) {
    oldTop = Peek();
    if (oldTop == NULL) return NULL; // Exception

    /* TryPop(): CAS Lock free */
    if (__sync_val_compare_and_swap(&_top, oldTop, oldTop->_prev)
        == oldTop) {
      //printf("Pop:%lu\n", oldTop->_value);
      return oldTop;
    } else {
      __sync_fetch_and_add(&(this->failures), 1);
      /*  Back off to avoid spamming memory networks. */
      for (int i = backoff; i > 0; i--);

      if (backoff < 256) // MAX_DELAY
          backoff *= 2;
    }
  }
}

//--------------------------------------------------------------------------------------
// CLockFreeTransactionalMemory
//
// CLockFreeTransactionalMemory: It uses a coarse grained lock to preserve sequential order
//                          between a set of tasks
//

CLockFreeTransactionalMemory::CLockFreeTransactionalMemory()
{
}

CLockFreeTransactionalMemory::~CLockFreeTransactionalMemory()
{
}

bool CLockFreeTransactionalMemory::Push(int value, int thread_id)
{
  Node* new_node = new Node(value);
  Node* oldTop = NULL; // it is for CompareAndSet in LockFreeStack
	int abrt = 0;
  int backoff = 2;

  while (1) {
    unsigned status = _xbegin();

    //if (__builtin_expect(status == _XBEGIN_STARTED, 1)) {
    if (_xbegin() == _XBEGIN_STARTED) {
      // we're in transactional context
      /* When the stack is empty */
      if (!_head) {
        _head = new_node;
        new_node->_prev = _head;
      } else {
        _top->_next = new_node;
        new_node->_prev = _top;
      }

      _top = new_node;

      // Hacky check whether spinlock is locked.
      // See glibc/nptl/sysdeps/x86_64/pthread_spin_unlock.S
      //if (__builtin_expect((int)_lock != 1, 0))
      //	_xabort(_ABORT_LOCK_BUSY);

      _xend();

      return true;
    } else {
      __sync_fetch_and_add(&(this->failures), 1);

      oldTop = Peek();
      new_node->_prev = oldTop;

      /* TryPush(): CAS Lock free */
      if (__sync_bool_compare_and_swap(&_top, oldTop, new_node)) {
        //printf("Push:%lu\n", new_node->_value);
        return true;
      } else {
        /*  Back off to avoid spamming memory networks. */
        for (int i = backoff; i > 0; i--);

        if (backoff < 256) // MAX_DELAY
          backoff *= 2;
      }

      return true;
    }
  }
}

Node* CLockFreeTransactionalMemory::Pop(int thread_id)
{
  Node* pop_node = NULL;
  Node* oldTop = NULL; // it is for CompareAndSet in LockFreeStack
                       // and, it will be also returning pop node
  int backoff = 2; // MIN_DELAY
	int abrt = 0;

	while (1) {
		unsigned status = _xbegin();

		//if (__builtin_expect(status == _XBEGIN_STARTED, 1)) {
    if (_xbegin() == _XBEGIN_STARTED) {
			// we're in transactional context
      /* When the stack is empty */
      if (!_head) {
        pop_node = NULL;
      } else {
        Node* pop_node = _top;
        _top = _top->_prev;
      }

			// Hacky check whether spinlock is locked.
			// See glibc/nptl/sysdeps/x86_64/pthread_spin_unlock.S
			//if (__builtin_expect((int)_lock != 1, 0))
			//	_xabort(_ABORT_LOCK_BUSY);

			_xend();

			return pop_node;
		} else {
      __sync_fetch_and_add(&(this->failures), 1);
      oldTop = Peek();
      if (oldTop == NULL) return NULL; // Exception
  
      /* TryPop(): CAS Lock free */
      if (__sync_val_compare_and_swap(&_top, oldTop, oldTop->_prev)
          == oldTop) {
        //printf("Pop:%lu\n", oldTop->_value);
        return oldTop;
      } else {
        /*  Back off to avoid spamming memory networks. */
        for (int i = backoff; i > 0; i--);
  
        if (backoff < 256) // MAX_DELAY
            backoff *= 2;
      }
		}
	}
}



//--------------------------------------------------------------------------------------
// CEliminationBackoffStack
//
// CEliminationBackoffStack: 
//

//--------------------------------------------------------------------------------------
CRangePolicy::CRangePolicy(int range)
  : _range(range), _success(0), _timeout(0)
{
}

CRangePolicy::~CRangePolicy()
{
  printf("  CEliminationBackoffStack Results::\n");
  printf("    Elimination Success: %lu\n", _success);
  printf("    Elimination TimeOut: %lu\n", _timeout);
}

int CRangePolicy::GetRange()
{
  return _range;
}

void CRangePolicy::RecordEliminationSuccess()
{
  _success++;
}

void CRangePolicy::RecordEliminationTimeout()
{
  _timeout++;
}

//--------------------------------------------------------------------------------------
CAtomicStampedReference::CAtomicStampedReference()
  : _item(0)
{
}

CAtomicStampedReference::~CAtomicStampedReference()
{
}

bool CAtomicStampedReference::CompareAndSet(item_t yrItem, item_t myItem,
                                            int beforeStamp, int nextStamp)
{
  /*
   * It mimics the AtomicStampedReference functionality in JAVA
   * This is used for changing two values (item, state) at the same time
   * and it can be also used to avoid ABA problem
   *
   * So, we can attack those spare area by using mask and bit operations
   * For the safety, we assume that the item will not be bigger than 2^62,
   * because we need at least two spare bits to contain the state value
   * (EMPTY, WAITING, and BUSY)
   *
   * Therefore, when the task take return pointer,
   * the task remove the stamp bits again.
   *
   * In this time, we don't consider ABA problem,
   * because it is negligable in this time.
   * but we can resolve the ABA problem also by using this kind of trick
   */

  item_t before = yrItem | ((item_t)(beforeStamp&0x03)<<62);
  item_t next = myItem | ((item_t)(nextStamp&0x03)<<62);

  if (__sync_bool_compare_and_swap(&_item, before, next)) {
    //printf("_item: %x before: %x next: %x\n", _item, before, next);
    return true;
  } else {
    return false;
  }
}

item_t CAtomicStampedReference::Get(int* stampHolder)
{
  stampHolder[0] = (int)(_item>>62)&0x03; // Get _state
  return _item & 0x3FFFFFFFFFFFFFFF; // Get _item, give the real Virtual Address
}

void CAtomicStampedReference::Set(item_t item, int newStamp)
{
  _item = item | ((item_t)(newStamp&0x03)<<62);
}

//--------------------------------------------------------------------------------------
long long CTimeUnit::GetTimeStamp(void)
{
  /* it returns current system time value as long long type */
  static __thread struct timeval tv;
  gettimeofday(&tv, NULL);
  return 1000000LL * tv.tv_sec + tv.tv_usec;
}

//--------------------------------------------------------------------------------------
CLockFreeExchanger::CLockFreeExchanger()
{
  _slot = new CAtomicStampedReference();
}

CLockFreeExchanger::~CLockFreeExchanger()
{
}

item_t CLockFreeExchanger::Exchange(item_t myItem, long long timeout, CTimeUnit* unit)
{
  /* timeBound = currentTimeStamp + timeOut */
  long long timeBound = unit->GetTimeStamp() + timeout;
  int stampHolder[] = {EMPTY};

  while (true) {
    if (unit->GetTimeStamp() > timeBound)
      throw TimeoutException();
    item_t yrItem = _slot->Get(stampHolder);
    int stamp = stampHolder[0];

    switch (stamp) {
      case EMPTY:
        /* Try to place the item in the slot and set the state to WAITING */
        if (_slot->CompareAndSet(yrItem, myItem, EMPTY, WAITING)) {
          while (unit->GetTimeStamp() < timeBound) { // within timeBound
            yrItem = _slot->Get(stampHolder);
            if (stampHolder[0] == BUSY) { // Another task shows up
              _slot->Set(NOITEM, EMPTY);
              return yrItem;
            }
          }
          // No other thread shows up, reset the state
          if (_slot->CompareAndSet(myItem, NOITEM, WAITING, EMPTY)) {
            throw TimeoutException();
          } else {
            yrItem = _slot->Get(stampHolder);
            _slot->Set(NOITEM, EMPTY);
            return yrItem;
          }
        }
        break;
      case WAITING:
        /* take the item, and try to replace it with its own by changing the state */
        if (_slot->CompareAndSet(yrItem, myItem, WAITING, BUSY))
          return yrItem;
        break;
      case BUSY:
        break;
      default: // impossible
        break;
    }
  }
}

//--------------------------------------------------------------------------------------
CEliminationArray::CEliminationArray(int capacity, long long timeout)
  : _duration(timeout), _random(new CRandom()), _timeUnit(new CTimeUnit()), 
    _capacity(capacity)
{
  exchanger = (CLockFreeExchanger**)malloc(capacity*sizeof(CLockFreeExchanger*));
  for (int i=0; i<capacity; i++)
    exchanger[i] = new CLockFreeExchanger();
}

CEliminationArray::~CEliminationArray()
{
  for (int i=0; i<_capacity; i++)
    delete exchanger[i];
}

item_t CEliminationArray::visit(item_t item, int range)
{
  int slot = _random->nextInt(range);
  return (exchanger[slot]->Exchange(item, _duration, _timeUnit));
}

//--------------------------------------------------------------------------------------
CEliminationBackoffStack::CEliminationBackoffStack()
  : _capacity(4), _timeout(100LL) // default: 16 entries, 1 micro second timeout
{
  _rangePolicy = new CRangePolicy(_capacity);
  _eliminationArray = new CEliminationArray(_capacity, _timeout);
}

CEliminationBackoffStack::CEliminationBackoffStack(int capacity, long long timeout)
  : _capacity(capacity), _timeout(timeout)
{
  _rangePolicy = new CRangePolicy(_capacity);
  _eliminationArray = new CEliminationArray(_capacity, _timeout);
}

CEliminationBackoffStack::~CEliminationBackoffStack()
{
  delete _eliminationArray;
  delete _rangePolicy;
}

bool CEliminationBackoffStack::Push(int value, int thread_id)
{
  Node* node = new Node(value);

  while (true) {
    Node* oldTop = Peek();
    node->_prev = oldTop;

    /* Try Push */
    if (__sync_bool_compare_and_swap(&_top, oldTop, node)) {
      return true;
    } else {
      /* If it fails, visit the elimination array */
      try {
        item_t item = _eliminationArray->visit(node->_value, _rangePolicy->GetRange());
        if (item == NOITEM) {
          //printf("Elimination success! push: %lu\n", node->_value);
          _rangePolicy->RecordEliminationSuccess();
          return true; // exchanged with pop
        }
      } catch (TimeoutException ex) {
        //printf("Elimination timeout! push: %lu\n", node->_value);
        _rangePolicy->RecordEliminationTimeout();
      }
    }
  }
}

Node* CEliminationBackoffStack::Pop(int thread_id)
{
  while (true) {
    Node* oldTop = Peek();
    if (oldTop == NULL) return NULL;

    /* Try Pop */
    if (__sync_val_compare_and_swap(&_top, oldTop, oldTop->_prev == oldTop)) {
      return oldTop;
    } else {
      try {
        /* If it fails, visit the elimination array */
        item_t item = _eliminationArray->visit(NOITEM, _rangePolicy->GetRange());
        if (item != NOITEM) {
          //printf("Elimination success! pop: %lu\n", item);
          _rangePolicy->RecordEliminationSuccess();
          return new Node(item); // exchanged with push
        }
      } catch (TimeoutException ex) {
        //printf("Elimination timeout!\n");
        _rangePolicy->RecordEliminationTimeout();
      }
    }
  }
}

//--------------------------------------------------------------------------------------
CEliminationBackoffTransactionalMemory::CEliminationBackoffTransactionalMemory()
  : _capacity(16), _timeout(1000LL) // default: 16 entries, 1 micro second timeout
{
  _rangePolicy = new CRangePolicy(_capacity);
  _eliminationArray = new CEliminationArray(_capacity, _timeout);
}

CEliminationBackoffTransactionalMemory::CEliminationBackoffTransactionalMemory(int capacity, long long timeout)
  : _capacity(capacity), _timeout(timeout)
{
  _rangePolicy = new CRangePolicy(_capacity);
  _eliminationArray = new CEliminationArray(_capacity, _timeout);
}

CEliminationBackoffTransactionalMemory::~CEliminationBackoffTransactionalMemory()
{
  delete _eliminationArray;
  delete _rangePolicy;
}

bool CEliminationBackoffTransactionalMemory::Push(int value, int thread_id)
{
  Node* new_node = new Node(value);

  while (1) {
    unsigned status = _xbegin();

    //if (__builtin_expect(status == _XBEGIN_STARTED, 1)) {
    if (_xbegin() == _XBEGIN_STARTED) {
      // we're in transactional context
      /* When the stack is empty */
      if (!_head) {
        _head = new_node;
        new_node->_prev = _head;
      } else {
        _top->_next = new_node;
        new_node->_prev = _top;
      }

      _top = new_node;

      // Hacky check whether spinlock is locked.
      // See glibc/nptl/sysdeps/x86_64/pthread_spin_unlock.S
      //if (__builtin_expect((int)_lock != 1, 0))
      //	_xabort(_ABORT_LOCK_BUSY);

      _xend();

      return true;
    } else {
      __sync_fetch_and_add(&(this->failures), 1);
      /* If it fails, visit the elimination array */
      try {
        item_t item = _eliminationArray->visit(new_node->_value, _rangePolicy->GetRange());
        if (item == NOITEM) {
          //printf("Elimination success! push: %lu\n", node->_value);
          _rangePolicy->RecordEliminationSuccess();
          return true; // exchanged with pop
        }
      } catch (TimeoutException ex) {
        //printf("Elimination timeout! push: %lu\n", node->_value);
        _rangePolicy->RecordEliminationTimeout();
      }
    }
  }
}

Node* CEliminationBackoffTransactionalMemory::Pop(int thread_id)
{
  Node* pop_node = NULL;
  int backoff = 2; // MIN_DELAY
	int abrt = 0;

	while (1) {
		unsigned status = _xbegin();

		//if (__builtin_expect(status == _XBEGIN_STARTED, 1)) {
    if (_xbegin() == _XBEGIN_STARTED) {
			// we're in transactional context
      /* When the stack is empty */
      if (!_head) {
        pop_node = NULL;
      } else {
        Node* pop_node = _top;
        _top = _top->_prev;
      }

			// Hacky check whether spinlock is locked.
			// See glibc/nptl/sysdeps/x86_64/pthread_spin_unlock.S
			//if (__builtin_expect((int)_lock != 1, 0))
			//	_xabort(_ABORT_LOCK_BUSY);

			_xend();

			return pop_node;
		} else {
      __sync_fetch_and_add(&(this->failures), 1);
      try {
        /* If it fails, visit the elimination array */
        item_t item = _eliminationArray->visit(NOITEM, _rangePolicy->GetRange());
        if (item != NOITEM) {
          //printf("Elimination success! pop: %lu\n", item);
          _rangePolicy->RecordEliminationSuccess();
          return new Node(item); // exchanged with push
        }
      } catch (TimeoutException ex) {
        //printf("Elimination timeout!\n");
        _rangePolicy->RecordEliminationTimeout();
      }
    }
	}
}

