#pragma once 

#include "space\space.h"
#include "space\i_manager.h"

#include <boost\signal.hpp>
#include <boost\bind.hpp>
#include <boost\function.hpp>

#include <memory>
#include <mutex>
#include <concurrent_queue.h>
#include <concurrent_vector.h>

namespace space {

	class i_event {

	};

	//
	// if the event manager is queued, the event objects need a copy constructor
	// OR a copy object needs to be provided
	//
	template<u_int EventCount, class EventObjType, class EventReturnType>
	class base_event_dispatcher : 
		public i_manager
	{
	protected:
		struct _t_event_wrapper {
			int type;
			EventObjType object;
			_t_event_wrapper() {type=-1; object = EventObjType(); }
			_t_event_wrapper(u_int t, EventObjType o) {type=t; object=o; }
			_t_event_wrapper& operator=(_t_event_wrapper &in) { this->type = in.type; this->object = in.object; return *this;} 
		};

		bool _queued;
		concurrency::concurrent_queue< _t_event_wrapper > _event_queue;
			
		typedef boost::signal<EventReturnType(const EventObjType &) > _t_event_signal;

		// signal stack is a vector of signals
		concurrency::concurrent_vector< shared_ptr< _t_event_signal > > _signalStack;
		u_int _event_count;
	public:
		bool is_queued(void) const { return _queued; }

		template<class T>
		result add_event(u_int eventType, EventReturnType (T::* func)(const EventObjType &), T * obj) {
			// if our event number is greater than our event count, we need to increment up to that
			//
			if(eventType > _event_count) {
				for(int i = _event_count; i <= eventType; i++) {
					shared_ptr <_t_event_signal> eventRef = std::make_shared<_t_event_signal>();
					this->_signalStack.push_back(eventRef);
				}
				_event_count = eventType;
			}
			this->_signalStack[eventType]->connect(boost::function<EventReturnType (const EventObjType &)>(boost::bind(func, obj, _1)));

			return RESULT_OK;
		}

		EventReturnType on_event(u_int eventType, const EventObjType & event) {
			if(this->_queued) {
				this->_event_queue.push(_t_event_wrapper(eventType, event));
				return false;
			} else {
				return this->ex_event(eventType, event);
			}
		}

		EventReturnType ex_event(u_int eventType, const EventObjType &event) {
			if(this->_signalStack[eventType]->empty())
				return EventReturnType();

			return this->_signalStack[eventType]->operator()(event);
		}

		result on_pre_update(i_engine *engine, const float delta) { return RESULT_NOT_IMPL; }
		result on_post_update(i_engine *engine, const float delta) { return RESULT_NOT_IMPL; }

		result on_update(i_engine *engine, const float delta) { 
			if(this->_queued) {
				// only dequeue whatever our current count is NOW
				// this means it could increase, but we dont care
				_t_event_wrapper wrap;
				while(_event_queue.try_pop(wrap)) {
					this->ex_event(wrap.type, wrap.object);
				}
			}
		}

		result initialize()	{
				 
			// Add our signal stack
			for(int i = 0; i < _event_count; i++) {
				shared_ptr <_t_event_signal> eventRef = std::make_shared<_t_event_signal>();
				this->_signalStack.push_back(eventRef);
			}
			return RESULT_OK;
		}
		result release() { 

			shared_ptr <_t_event_signal> * eventRef;
			eventRef = new shared_ptr <_t_event_signal>[_event_count];

			// free our event queue if we are using it
			_event_queue.clear();

			// clear our event stack
			for(int i = 0; i < _event_count; i++) {
				eventRef[i] = this->_signalStack[i];
			}
			this->_signalStack.clear();

			// now free them all
			for(int i = 0; i < _event_count; i++) {
				eventRef[i].reset();
				eventRef[i].reset();
			}

			delete eventRef;

			return RESULT_OK;
		}

		base_event_dispatcher() :
			_queued(false),
			_event_count(EventCount) { 
		}
		base_event_dispatcher(const bool queued) :
			_queued(queued),
			_event_count(EventCount) { 
			this->initialize();
		}
		~base_event_dispatcher() {
			this->release();
		}
	};
};