#ifndef H_C_MPMC_QUEUE_H
#define H_C_MPMC_QUEUE_H

#include "../typedefine.h"
#include <atomic>
#include <stdexcept>
#include <thread>

namespace TB
{
	template<typename T> class CMPMCQueueNodePool;
	template<typename T>
	QDLL_EXPORT QCLASS CMPMCQueueNode
	{
	public:
		CMPMCQueueNode()
		{
		}

		/*volatile*/ PTR(CMPMCQueueNode<T>) m_poNext = NULL_PTR; //应该不需要使用volatile
		T m_oData = T();
		CMPMCQueueNodePool<T>* m_poPool = NULL_PTR;
	};
	template<typename T>
	class CMPMCQueueNodePool
	{
		NON_COPYABLE(CMPMCQueueNodePool)
	public:
		CMPMCQueueNodePool()
		{
		}

		// 非线程安全
		inline CMPMCQueueNode<T>* New()
		{
			CMPMCQueueNode<T>& roNode = m_aNodePool[m_btPoolPopIndex++];
			roNode.m_poPool = this;
			return &roNode;
		}

		// 非线程安全，只能调用一次，然后整个Pool就不可用了
		inline INT8 ReleaseAll()
		{
			INT8 btSub = 64 - m_btPoolPopIndex;
			return m_btRefCount.fetch_sub(btSub, std::memory_order_relaxed) - btSub;
		}

		// 线程安全
		inline INT8 DecreaseRefCount()
		{
			return m_btRefCount.fetch_sub(1, std::memory_order_relaxed) - 1;
		}

		// 线程安全
		inline bool IsEmpty() const
		{
			return m_btPoolPopIndex >= 64;
		}

	private:
		CMPMCQueueNode<T> m_aNodePool[64];
		INT8 m_btPoolPopIndex = 0;
		std::atomic<INT8> m_btRefCount = 64;
	};

	template<typename T>
	QDLL_EXPORT QCLASS CMPMCQueue
	{
		NON_COPYABLE(CMPMCQueue)
	private:
		class CMPMCQueueNodeFactory
		{
			NON_COPYABLE(CMPMCQueueNodeFactory)
		public:
			CMPMCQueueNodeFactory()
			{
			}
			~CMPMCQueueNodeFactory()
			{
				if (m_poPool != nullptr && 0 == m_poPool->ReleaseAll())
					SAFE_DELETE(m_poPool);
			}
			// 非线程安全
			CMPMCQueueNode<T>* New()
			{
				if (m_poPool == nullptr)
					m_poPool = new CMPMCQueueNodePool<T>();
				CMPMCQueueNode<T>* poNode = m_poPool->New();
				if (m_poPool->IsEmpty())
					m_poPool = nullptr;
				return poNode;
			}
		private:
			CMPMCQueueNodePool<T>* m_poPool = nullptr;
		};
	public:
		CMPMCQueue()
		{
			CMPMCQueueNode<T>* poHeader = m_oMPMCQueueNodeFactory.New();
			m_poHeader = poHeader;
			m_oTail.store(poHeader, std::memory_order_release);
		}

		~CMPMCQueue()
		{
			while (m_oSpinLock.test_and_set(std::memory_order_acquire))
			{
				std::this_thread::yield();
			}
			for (auto p = m_poHeader; p != NULL_PTR;)
			{
				auto tmp = p;
				p = p->m_poNext;
				_ReleaseNode(tmp);
			}
			m_oSpinLock.clear(std::memory_order_release);
		}

		inline const CMPMCQueueNode<T>* Front() const
		{
			return m_poHeader->m_poNext; // serialization-point wrt producers, acquire
		}

		UINT32 Pop(T& roData)
		{
			while (m_oSpinLock.test_and_set(std::memory_order_acquire))
			{
				std::this_thread::yield();
			}
			auto poHeader = m_poHeader;
			PTR(CMPMCQueueNode<T>) poNext = poHeader->m_poNext;
			if (poNext == nullptr)
			{
				m_oSpinLock.clear(std::memory_order_release);
				return 0;
			}
			m_poHeader = poNext;
			roData = poNext->m_oData;
			m_oSpinLock.clear(std::memory_order_release);
			_ReleaseNode(poHeader);
			return m_dwCount.fetch_sub(1, std::memory_order_relaxed);
		}

		inline UINT32 Push(CONST_REF(T) roData)
		{
			UINT32 dwCount = m_dwCount.fetch_add(1, std::memory_order_relaxed);
			PTR(CMPMCQueueNode<T>) poNode = m_oMPMCQueueNodeFactory.New();
			poNode->m_oData = roData;
			PTR(CMPMCQueueNode<T>) prev = m_oTail.exchange(poNode, std::memory_order_acq_rel); // serialization-point wrt producers, acquire-release
			prev->m_poNext = poNode; // serialization-point wrt consumer, release
			return dwCount;
		}

		inline UINT32 GetCount()
		{
			return m_dwCount.load(std::memory_order_acquire);
		}

	private:
		void _ReleaseNode(CMPMCQueueNode<T>* poNode)
		{
			if (poNode->m_poPool->DecreaseRefCount() == 0)
				SAFE_DELETE(poNode->m_poPool);
		}

	private:
		CMPMCQueueNode<T>* m_poHeader = NULL_PTR;
		std::atomic<CMPMCQueueNode<T>*> m_oTail = NULL_PTR;
		std::atomic_flag m_oSpinLock = ATOMIC_FLAG_INIT;
		std::atomic_uint32_t m_dwCount;
		thread_local static CMPMCQueueNodeFactory m_oMPMCQueueNodeFactory;
	};

	template<typename T> thread_local CMPMCQueue<T>::CMPMCQueueNodeFactory CMPMCQueue<T>::m_oMPMCQueueNodeFactory;
}

#endif
