#pragma once
#include "../Utils/PlatTypeUtil.h"
#include <atomic>
#include <cstddef>

template <typename T, uint32 capacity = 4096>
class LockFreeQueue
{
public:
	explicit LockFreeQueue()
	{
		mCapacityMask = capacity - 1;
		for (uint32 i = 1; i <= sizeof(void*) * 4; i <<= 1)
			mCapacityMask |= mCapacityMask >> i;
		mCapacity = mCapacityMask + 1;

		mQueue = (Node*)new char[sizeof(Node) * mCapacity];
		for (uint32 i = 0; i < mCapacity; ++i)
		{
			mQueue[i].data = nullptr;
			mQueue[i].tail.store(i, std::memory_order_relaxed);
			mQueue[i].head.store(-1, std::memory_order_relaxed);
		}

		mTail.store(0, std::memory_order_relaxed);
		mHead.store(0, std::memory_order_relaxed);
	}

	~LockFreeQueue()
	{
		delete[] (char*)mQueue;
	}

	uint32 Capacity() const { return mCapacity; }

	uint32 Size() const
	{
		uint32 head = mHead.load(std::memory_order_acquire);
		return mTail.load(std::memory_order_relaxed) - head;
	}

	bool Push(T* data)
	{
		Node* node;
		uint32 tail = mTail.load(std::memory_order_relaxed);
		for (;;)
		{
			node = &mQueue[tail & mCapacityMask];
			if (node->tail.load(std::memory_order_relaxed) != tail)
				return false;
			if ((mTail.compare_exchange_weak(tail, tail + 1, std::memory_order_relaxed)))
				break;
		}
		node->data = data;
		node->head.store(tail, std::memory_order_release);
		return true;
	}

	bool Pop(T** result)
	{
		Node* node;
		uint32 head = mHead.load(std::memory_order_relaxed);
		for (;;)
		{
			node = &mQueue[head & mCapacityMask];
			if (node->head.load(std::memory_order_relaxed) != head)
				return false;
			if (mHead.compare_exchange_weak(head, head + 1, std::memory_order_relaxed))
				break;
		}
		*result = node->data;
		node->tail.store(head + mCapacity, std::memory_order_release);
		return true;
	}

private:
	struct Node
	{
		T* data;
		std::atomic<uint32> tail;
		std::atomic<uint32> head;
	};

private:
	uint32 mCapacityMask;
	Node* mQueue;
	uint32 mCapacity;
	char mCacheLinePad1[64];
	std::atomic<uint32> mTail;
	char mCacheLinePad2[64];
	std::atomic<uint32> mHead;
	char mCacheLinePad3[64];
};


#if 0

/**
 *	LockFreeQueue
 *	MultiThread Queue copy from boost lockfree queue
 **/
template <typename T, int32 AllocSize = 1000>
class LockFreeQueue
{
public:
	LockFreeQueue(int32 allocSize = AllocSize) : mHeadIterator(0), mTailIterator(0), mAllocSize(allocSize)
	{
		mQueue = new T * [mAllocSize];
		memset(mQueue, 0, sizeof(T*) * mAllocSize);
	}

	~LockFreeQueue()
	{
		if (mQueue)
			delete[] mQueue;
	}


	bool Enqueue(T* data, uint32 retryCount = 1000)
	{
		if (NULL == data)
		{
			// Null enqueues are not allowed  
			return false;
		}

		uint32 currCount = 0;
		while (currCount < retryCount)
		{
			// Release fence in order to prevent memory reordering   
			// of any read or write with following write  
			std::atomic_thread_fence(std::memory_order_release);

			int32 headIterator = mHeadIterator.load();

			if (mQueue[headIterator] == nullptr)
			{
				int32 headIteratorOrig = headIterator;

				++headIterator;
				if (headIterator >= mAllocSize)
					headIterator = 0;

				// Don't worry if this CAS fails.  It only means some thread else has  
				// already inserted an item and set it.  
				if (std::atomic_compare_exchange_strong(&mHeadIterator, &headIteratorOrig, headIterator))
				{
					// void* are always atomic (you wont set a partial pointer).  
					mQueue[headIteratorOrig] = data;
					return true;
				}
			}
			else
			{
				// The queue is full.  Spin a few times to check to see if an item is popped off.  
				++currCount;
			}
		}
		return false;
	}

	bool Dequeue(T** pdata)
	{
		if (!pdata)
		{
			// Null dequeues are not allowed!  
			return false;
		}

		bool bDone = false;

		while (!bDone)
		{
			// Acquire fence in order to prevent memory reordering   
			// of any read or write with following read  
			std::atomic_thread_fence(std::memory_order_acquire);
			
			int32 tailIterator = mTailIterator.load();
			T* pdata = mQueue[tailIterator];
			            
			if (pdata != nullptr)
			{
				int32 tailIteratorOrig = tailIterator;

				++tailIterator;
				if (tailIterator >= mAllocSize)
					tailIterator = 0;

				if (std::atomic_compare_exchange_strong(&mTailIterator, &tailIteratorOrig, tailIterator))
				{
					// Sets of sizeof(void*) are always atomic (you wont set a partial pointer).  
					mQueue[tailIteratorOrig] = NULL;

					// Gets of sizeof(void*) are always atomic (you wont get a partial pointer).  
					*pdata = (T*)pdata;

					return true;
				}
			}
			else
			{
				bDone = true;
			}
		}
		*pdata = NULL;
		return false;
	}


	int32 GetSize() const
	{
		int32 count = TryCount();

		if (0 != count)
			return count;

		// If the queue is full then the item right before the tail item will be valid.  If it
		// is empty then the item should be set to NULL.  
		int32 lastInsert = mTailIterator - 1;
		if (lastInsert < 0)
			lastInsert = mAllocSize - 1;

		T* pdata = mQueue[lastInsert];
		if (pdata != NULL)
			return mAllocSize;

		return 0;
	}

	int32 GetMaxSize() const
	{
		return mAllocSize;
	}

private:
	int32 TryCount() const
	{
		int32 headIterator = mHeadIterator.load();
		int32 tailIterator = mTailIterator.load();

		if (tailIterator > headIterator)
			return mAllocSize - tailIterator + headIterator;

		// This has a bug where it returns 0 if the queue is full.  
		return headIterator - tailIterator;
	}

private:
	std::atomic<int32> mHeadIterator;  // enqueue index  
	std::atomic<int32> mTailIterator;  // dequeue index  
	int32 mAllocSize;                     // size of the array  
	T** mQueue;                  // array of pointers to the data  
};

#endif