#pragma once
#include "../Utils/PlatTypeUtil.h"
#include <atomic>
#include <cstddef>

template <typename T, uint32 capacity = 128> class LockFreeStack
{
public:
	explicit LockFreeStack()
		: mCapacity(capacity)
	{
		mIndexMask = capacity;
		for (uint32 i = 1; i <= sizeof(void*) * 4; i <<= 1)
			mIndexMask |= mIndexMask >> i;
		mAbaOffset = mIndexMask + 1;

		mQueue = new Node[capacity + 1];
		for (uint32 i = 1; i < capacity;)
		{
			Node& node = mQueue[i];
			node.data = nullptr;
			node.abaNextFree.store(++i, std::memory_order_relaxed);
		}
		mQueue[capacity].abaNextFree.store(0, std::memory_order_relaxed);

		mAbaFree.store(1, std::memory_order_relaxed);
		mAbaPushed.store(0, std::memory_order_relaxed);
	}

	~LockFreeStack()
	{
		for (uint32 abaPushed = mAbaPushed;;)
		{
			uint32 nodeIndex = abaPushed & mIndexMask;
			if (!nodeIndex)
				break;
			Node& node = mQueue[nodeIndex];
			abaPushed = node.abaNextPushed.load(std::memory_order_relaxed);
		}

		delete[] mQueue;
	}

	uint32 Capacity() const { return mCapacity; }

	bool Push(T* data)
	{
		Node* node;
		uint32 abaFree;
		for (;;)
		{
			abaFree = mAbaFree.load(std::memory_order_relaxed);
			uint32 nodeIndex = abaFree & mIndexMask;
			if (!nodeIndex)
				return false;
			node = &mQueue[nodeIndex];
			const uint32 abaNextFree = node->abaNextFree.load(std::memory_order_relaxed);
			if(mAbaFree.compare_exchange_weak(abaFree, abaNextFree + mAbaOffset, std::memory_order_relaxed))
				break;
		}

		node->data = data;
		for (;;)
		{
			uint32 abaPushed = mAbaPushed;
			node->abaNextPushed.store(abaPushed, std::memory_order_relaxed);
			if (mAbaPushed.compare_exchange_weak(abaPushed, abaFree, std::memory_order_relaxed))
				return true;
		}
	}

	bool Pop(T** result)
	{
		Node* node;
		uint32 abaPushed;
		for (;;)
		{
			abaPushed = mAbaPushed;
			uint32 nodeIndex = abaPushed & mIndexMask;
			if (!nodeIndex)
				return false;
			node = &mQueue[nodeIndex];
			const uint32 abaNextPushed = node->abaNextPushed.load(std::memory_order_relaxed);
			if(mAbaPushed.compare_exchange_weak(abaPushed, abaNextPushed + mAbaOffset, std::memory_order_relaxed))
				break;
		}

		*result = node->data;

		abaPushed += mAbaOffset;
		for (;;)
		{
			uint32 abaFree = mAbaFree.load(std::memory_order_relaxed);
			node->abaNextFree.store(abaFree, std::memory_order_relaxed);
			if (mAbaFree.compare_exchange_weak(abaFree, abaPushed))
				return true;
		}
	}

private:
	struct Node
	{
		T* data;
		std::atomic<uint32> abaNextFree;
		std::atomic<uint32> abaNextPushed;
	};

private:
	uint32 mIndexMask;
	Node* mQueue;
	uint32 mAbaOffset;
	uint32 mCapacity;
	char mCacheLinePad1[64];
	std::atomic<uint32> mAbaFree;
	char mCacheLinePad2[64];
	std::atomic<uint32> mAbaPushed;
	char mCacheLinePad3[64];
};

#if 0

/**
 *	LockFreeStack
 *	MultiThread Queue copy from boost lockfree queue
 **/
template <typename T, int32 AllocSize = 1000>
class LockFreeStack
{
public:
	LockFreeStack(int32 allocSize = AllocSize) : mHeadIterator(-1), mAllocSize(allocSize)
	{
		mStack = new T * [mAllocSize];
		memset(mStack, 0, sizeof(T*) * mAllocSize);
	}

	~LockFreeStack()
	{
		if (mStack)
			delete[] mStack;
	}


	bool Enqueue(T* data, uint32 retryCount = 1000)
	{
		if (NULL == data)
		{
			// Null enqueues are not allowed  
			return false;
		}

		uint32 currCount = 0;
		while (currCount < retryCount)
		{
			// Release fence in order to prevent memory reordering   
			// of any read or write with following write  
			std::atomic_thread_fence(std::memory_order_release);

			int32 headIterator = mHeadIterator.load();
			if (headIterator + 1 >= mAllocSize)
			{
				assert(0);
				return false;
			}

			if (mStack[headIterator + 1] == nullptr)
			{
				int32 headIteratorOrig = headIterator;
				++headIterator;
				

				// Don't worry if this CAS fails.  It only means some thread else has  
				// already inserted an item and set it.  
				if (std::atomic_compare_exchange_strong(&mHeadIterator, &headIteratorOrig, headIterator))
				{
					// void* are always atomic (you wont set a partial pointer).  
					mStack[headIterator] = data;
					return true;
				}
			}
			else
			{
				// The queue is full.  Spin a few times to check to see if an item is popped off.  
				++currCount;
			}
		}
		return false;
	}

	bool Dequeue(T** pdata)
	{
		if (!pdata)
		{
			// Null dequeues are not allowed!  
			return false;
		}

		bool bDone = false;

		while (!bDone)
		{
			// Acquire fence in order to prevent memory reordering   
			// of any read or write with following read  
			std::atomic_thread_fence(std::memory_order_acquire);
			//MemoryBarrier();  
			int32 headIterator = mHeadIterator.load();

			if (headIterator < 0)
			{
				return false;
			}

			T* pdata = mStack[headIterator];
			//volatile T *pdata = mStack[tailIterator];              
			if (pdata != nullptr)
			{
				int32 headIteratorOrig = headIterator;
				--headIterator;

				if (std::atomic_compare_exchange_strong(&mHeadIterator, &headIteratorOrig, headIterator))
				{
					// Sets of sizeof(void*) are always atomic (you wont set a partial pointer).  
					mStack[headIteratorOrig] = nullptr;

					// Gets of sizeof(void*) are always atomic (you wont get a partial pointer).  
					*pdata = (T*)pdata;

					return true;
				}
			}
			else
			{
				bDone = true;
			}
		}
		*pdata = nullptr;
		return false;
	}


	int32 GetSize() const
	{
		int32 headIterator = mHeadIterator.load();
		return headIterator + 1;
	}

	int32 GetMaxSize() const
	{
		return mAllocSize;
	}

private:

private:
	std::atomic<int32> mHeadIterator;  // enqueue index  
	int32 mAllocSize;                     // size of the array  
	T** mStack;                  // array of pointers to the data  
};

#endif