//==============================================================================
// Copyright (c) 2008-2013 Niall Ryan. All Rights Reserved.
//==============================================================================

#ifndef HYDRA_QUEUES_LOCKFREEQUEUE_H
#define HYDRA_QUEUES_LOCKFREEQUEUE_H 1

#include "hydra/allocators/DelayedFreeAllocator.h"
#include "hydra/atomic/Atomic.h"

namespace Hydra
{
	/**
	 * This queue relies on garbage collection to avoid the ABA problem. The allocators
	 * garbage collector must be ran when no push or pop operations are running on the queue.
	 */
	template<typename T, typename Allocator=DelayedFreeAllocator>
	class LockFreeQueue
	{
		struct Node
		{
			T m_item;
			Atomic<Node*> m_next;
		};

	public:
        static_assert(!Allocator::kAbaVulnerable, "Allocator must not be vulnerable to the ABA problem");

		LockFreeQueue()
		{
			Node* sentinel = allocNode();
			sentinel->m_next = NULL;
			m_alloc.m_head = sentinel;
			m_tail = sentinel;
		}

		~LockFreeQueue()
		{
			//free all nodes
			Node* node = m_alloc.m_head;
			while( node )
			{
				Node* nextNode = node->m_next;
				freeNode(node);
				node = nextNode;
			}
		}

		void push(const T& item)
		{
			Node* node = allocNode();
			node->m_item = item;
			node->m_next = NULL;
			while( true )
			{
				Node* last = m_tail;
				Node* next = last->m_next;
				if( last == m_tail )
				{
					if( next == NULL )
					{
						if( last->m_next.compareAndSwap(node, NULL) == NULL )
						{
							m_tail.compareAndSwap(node, last);
							return;
						}
					}
					else
					{
						m_tail.compareAndSwap(next, last);
					}
				}
			}
		}

		bool pop(T& item)
		{
			while( true )
			{
				Node* first = m_alloc.m_head;
				Node* last = m_tail;
				Node* next = first->m_next;
				if( first == m_alloc.m_head )
				{
					if( first == last )
					{
						if( next == NULL )
							return false;
						m_tail.compareAndSwap(next, last);
					}
					else
					{
						item = next->m_item;
						if( m_alloc.m_head.compareAndSwap(next, first) == first )
						{
							freeNode(first);
							return true;
						}
					}
				}
			}
		}

	private:
		Node* allocNode()
		{
			Node* node = reinterpret_cast<Node*>(m_alloc.alloc(sizeof(Node)));
			new(node) Node;
			return node;
		}
		void freeNode(Node* node)
		{
			node->~Node();
			m_alloc.free(node);
		}

		struct AllocAndHead : public Allocator
		{
			Atomic<Node*> m_head;
		};
		AllocAndHead m_alloc;
		Atomic<Node*> m_tail;
	};
}

#endif
