#pragma once

// Use for optional return value
#include <boost/optional.hpp>

// Used as the underlying storage for write_buffered and read_buffered
#include "../container/auto_list.hpp"




namespace boostext
{
	/**
	 * Buffering policy that enforces an unbuffered read.
	 */
	template
	<
		typename _Ty
	>
	class read_unbuffered
	{
	public:

		/**
		 * This policy disables buffering.
		 */
		enum { buffered = false };
	};
	///////////////////////////////////////////////////////////////////////////////////////////////




	/**
	 * This class implements a buffered read policy.
	 * Values are retrieved in chunks, not individually (although the user might
	 * call pop() all the time), which drastically reduces the need to lock the
	 * queue for all threads.
	 */
	template
	<
		typename _Ty
	>
	class read_buffered
	{
	public:

		typedef boost::optional<_Ty>           _Opt_Ty;
		typedef auto_list<_Ty>                 _List_Ty;

		typedef typename _List_Ty::size_type   size_type;

	private:

		_List_Ty    _buffer;
		size_type   _batchSize;

	public:

		/**
		 * Create a buffered policy.
		 * Will pop up to 100 elements from the queue, when the buffer is empty.
		 */
		read_buffered()
			: _batchSize(100)
		{}

		/**
		 * Create a buffered policy from another one.
		 */
		read_buffered(read_buffered<_Ty>& that)
			: _buffer(that._buffer)
			, _batchSize(that._batchSize)
		{}

		/**
		 * Create a buffered policy from an unbuffered policiy.
		 * Will pop up to 100 elements from the queue, when the buffer is empty.
		 */
		read_buffered(read_unbuffered<_Ty> &)
			: _batchSize(100)
		{}



		/**
		 * Set the minimal size that causes the buffer to be
		 * flushed onto the queue.
		 */
		void setBatchSize(size_type _Size)
		{
			_batchSize = _Size;
		}

		/**
		 * Get the minimal size that causes the buffer to be
		 * flushed onto the queue.
		 */
		size_type batchSize() const
		{
			return _batchSize;
		}

		/**
		 * This policy enables buffering.
		 */
		enum { buffered = true };

	protected:

		/**
		 * Retrieve and remove the first element from the buffer.
		 */
		_Opt_Ty pop()
		{
			if(!empty())
			{
				_Opt_Ty ret( _buffer.front() );
				_buffer.pop_front();
				return ret;
			}
			else
			{
				return _Opt_Ty();
			}
		}

		/**
		 * Retrieve and remove up to '_Count' elements from the buffer.
		 */
		_List_Ty pop(size_type _Count)
		{
			if(!empty())
			{
				_List_Ty ret;
				_List_Ty::iterator _first = _buffer.begin();
				_List_Ty::iterator _last = _first;

				std::advance(_last, (_Count > _buffer.size()) ? _buffer.size() : _Count);

				ret.splice(ret.end(), _buffer, _first, _last);
				return ret;
			}
			else
			{
				return _List_Ty();
			}
		}

		/**
		 * Get the size of the buffer.
		 */
		size_type size() const
		{
			return _buffer.size();
		}

		/**
		 * Test if the buffer is empty.
		 */
		bool empty() const
		{
			return _buffer.size() == 0;
		}

		/**
		 * Test if the buffer must be filled.
		 */
		bool mustFill() const
		{
			// The buffer must only be filled when it's empty
			return empty();
		}

		/**
		 * Fill the buffer.
		 */
		void fillBuffer( _List_Ty &_Vals )
		{
			//OutputDebugStringA( "Filled\n" );
			_buffer = _Vals;
		}
	};
	///////////////////////////////////////////////////////////////////////////////////////////////




	/**
	 * Buffering policy that enforces unbuffered writing.
	 */
	template
	<
		typename T
	>
	class write_unbuffered
	{
	public:

		/**
		 * This policy disables buffering.
		 */
		enum { buffered = false };
	};
	///////////////////////////////////////////////////////////////////////////////////////////////




	/**
	 * This class class implements a buffered write policy.
	 * Values are pushed in chunks onto the queue, not individually
	 * (although the user might only use push() all the time). This
	 * drastically reduces the need to lock the queue for all threads.
	 *
	 * Contrary to the read_buffered policy, this buffer can be flushed in
	 * two different ways. Either when the buffer is full, or when a certain
	 * amount of time has passed. This will, however only be detected when
	 * push() is called one more time.
	 * It is up to the user to call 'if( mustFlush() ) flush();' every once and then,
	 * to ensure the buffer is flushed constantly (if it is required).
	 */
	template
	<
		typename _Ty
	>
	class write_buffered
	{
	public:

		typedef auto_list<_Ty>                     _List_Ty;
		typedef typename _List_Ty::size_type       size_type;

		typedef boost::posix_time::second_clock    clock;
		typedef boost::posix_time::ptime           ptime;
		typedef boost::posix_time::time_duration   time_duration;

	private:

		_List_Ty        _buffer;

		ptime           _lastFlushed;
		time_duration   _interval;
		size_type       _batchSize;

	public:

		/**
		 * Create a write_buffered policy.
		 * Will flush the buffer every 100 pushes, or every 100ms,
		 * whatever is satisfied first. (Will NOT implicitly be flushed, when
		 * nothing is pushed to the queue, flush must be invoked
		 * manually when unused for some time).
		 */
		write_buffered()
			: _lastFlushed(clock::local_time())
			, _batchSize(100)
			, _interval(0,0,5,0)
		{}

		/**
		 * Create a buffered policy from an unbuffered policiy.
		 * Will flush the buffer every 100 pushes, or every 100ms,
		 * whatever is satisfied first. (Will NOT implicitly be flushed, when
		 * nothing is pushed to the queue, flush must be invoked
		 * manually when unused for some time).
		 */
		write_buffered(const write_buffered &)
			: _lastFlushed(clock::local_time())
			, _batchSize(100)
			, _interval(0,0,5,0)
		{}


		/**
		 * Set the minimal size that causes the buffer to be
		 * flushed onto the queue.
		 */
		void setBatchSize(size_type _Size)
		{
			_batchSize = _Size;
		}

		/**
		 * Get the minimal size that causes the buffer to be
		 * flushed onto the queue.
		 */
		size_type batchSize() const
		{
			return _batchSize;
		}



		/**
		 * Get the most recent time at which the buffer has been flushed.
		 */
		const ptime &lastFlush() const
		{
			return _lastFlushed;
		}

		/**
		 * Set the interval at which the buffer will be flushed into the queue.
		 */
		void setInterval(const time_duration &interval)
		{
			_interval = interval;
		}

		/**
		 * Get the interval at which the buffer will be flushed into the queue.
		 */
		const time_duration &interval() const
		{
			return _interval;
		}

		/**
		 * Test if the buffer must be flushed or not.
		 */
		bool mustFlush() const
		{
			// When there's nothing to flush, then don't
			if(!_buffer.size())
				return false;

			// When the buffer size is bigger than the batch size, flush it
			if(_buffer.size() >= _batchSize)
				return true;

			// When the last flush happened more than 'interval' ms ago, flush it
			if((clock::local_time() - _lastFlushed) >= _interval)
				return true;

			// If neither applies, don't flush
			return false;
		}



		/**
		 * This policy enables buffering.
		 */
		enum { buffered = true };

	protected:

		/**
		 * Get the size of the buffer.
		 */
		size_type size() const
		{
			return _buffer.size();
		}

		/**
		 * Test if the buffer is empty.
		 */
		bool empty() const
		{
			return _buffer.size() == 0;
		}

		/**
		 * Push an element onto the buffer.
		 */
		void push(const _Ty& _Val)
		{
			_buffer.push_back(_Val);
		}

		/**
		 * Push multiple elements onto the buffer.
		 */
		void push(_List_Ty &_Right)
		{
			_buffer.splice(_buffer.end(), _Right);
		}

		/**
		 * Return the buffer and flush it afterwards.
		 */
		_List_Ty flushBuffer()
		{
			_lastFlushed = clock::local_time();
			return _buffer;
		}
	};
	///////////////////////////////////////////////////////////////////////////////////////////////
}
///////////////////////////////////////////////////////////////////////////////////////////////////
