/**
 * Copyright (c) 2010
 * Bert Young. UESTC. 
 */
 
#ifndef BERT_TCPCLIENTTASKPOOL_H
#define BERT_TCPCLIENTTASKPOOL_H

#include <list>
#include <errno.h>
#include "TCPClientTask.h"
#include "TaskQueue.h"
#include "RunnableEntry.h"

class TCPClientTaskPool;

class checkThread : public RunnableEntry, /*public*/ TaskQueue<TCPClientTask>
{
	friend class TCPClientTaskPool;
	/**
	 * 父类含有一个queue<TCPTask>
	 * 每个验证线程含有一个epoll符，监视queue
	 */
	TCPClientTaskPool * pool;
	std::list<TCPClientTask *> tasks;
	int taskcount;

	virtual void _add(TCPTask * task );
public:
	static const int max_task_size = 4096;
	checkThread( )
	{
		this->pool = NULL;
		this->taskcount = 0;
	}
	int taskSize() const
	{
		return this->taskcount + getTaskCount();
	}
	int maxTaskSize() const
	{
		return max_task_size;
	}
	void initPool( void * pool)
	{
		this->pool = (TCPTaskPool *) pool;
	}

	/**
	 * wait for verifyConn success or fail or timeout
	 */
	void run( )
	{
		Time current;
		std::list<TCPClientTask *>::iterator it;
		while ( isAlive() )
		{
			Thread::sleep(1);
			current.now();
			for ( it = tasks.begin(); it != tasks.end(); )
			{
				TCPClientTask * task = *it;
				switch( task->state)
				{
					case idle:
						if (true/*状态超时了*/) 
						{
							if ( task->connect())
							{
								pool->addVerify(task);
							}
							else
							{
								tasks.erase(it++);
								task->resetstate();
								delete task;
								continue;
							}
						}
						break;
					case verify:
						break;
					case okay:
						task->checkhearbeat();
						beat;
					case recycle:
						if (true/*状态超时了4s*/) 
						{
							task->getNextState();
							tasks.erase(it++);
							delete task;
							continue;
						}
						break;
				}
				++ it;
			}

		}
	}
};
///////////////////////////////////////////////
class verifyThread : public RunnableEntry, TaskQueue<TCPClientTask>
{
	friend class TCPClientTaskPool;

	static const int max_task_size = 4096;
	TCPClientTaskPool * pool;
	std::list<TCPClientTask *> tasks;
	virtual void _add(TCPCLientTask * task );
	{
		//只监视可读，等待客户传来的验证消息
		task->epollAdd(this->epfd, EPOLLIN | EPOLLPRI, (void *)task );
		this->tasks.push_back(task);
		this->taskcount = tasks.size();
		if ( taskcount > events.size() )
			events.resize(taskcount + 16 );
	}
	int epfd;
	std::vector<struct epoll_event> events;
	void remove(std::list<TCPTask *>::iterator & iter);
	{
		(*iter)->epollDel(this->epfd);
		iter = tasks.erase(iter);
		taskcount = tasks.size();
	}

	void remove(TCPTask * task );
	{
		task->epollDel(this->epfd);
		tasks.remove(task);//std::list::remove
		taskcount = tasks.size();
	}
public:
	verifyThread( )
	{
		taskcount = 0;
		this->pool = NULL;
		epfd = epoll_create(256);
		events.resize(256);
	}
	int taskSize() const
	{
		return this->taskcount + getTaskCount();
	}
	int maxTaskSize() const
	{
		return max_task_size;
	}
	void initPool( void * pool)
	{
		this->pool = (TCPTaskPool *) pool;
	}

	/**
	 *  wait verify cmd from server
	 */
	void run( )
	{
		Time current;
		std::list<TCPClientTask *>::iterator it, next;
		while ( isAlive() )
		{
			Thread::sleep(1);
			check_queue();
			if ( !tasks.empty() )
			{
				int ret = epoll_wait(epfd, &events[0], taskcount, 0);
				for (int i=0; i<ret; ++i)
				{
					TCPClientTask * task = events[i].data.ptr;
					if ( events[i].events & (EPOLLERR|EPOLLPRI))
					{
						remove(task);
						task->resetstate();
					}
					else if ( events[i].events & EPOLLIN )
					{
						switch(task->verifyConn() )
						{
							case 1:
								remove(task);
								if( !pool->addOkay(task))
									task->resetstate();
								break;
							case 0:
								break;
							case -1:
								remove(task);
								task->resetstate();
								break;
						}
					}
				}
			}
			for(it = tasks.begin();it!=tasks.end();)
			{
				TCPClientTask *task = *it;
				if(/*sync state timeout*/)
				{
					remove(it);
					task->resetstate();
				}
				else
					++ it;
			}
			Thread::msleep(50);
		}

		for ( it = tasks.begin(); it!=tasks.end(); )
		{
			TCPClientTask *task = *it;
			remove(it);

			task->resetstate();
		}
	}
};
//////////////////////////////////////////////////////////////////
class OkayThread : public RunnableEntry, TaskQueue<TCPClientTask>
{
	friend class TCPTaskPool;

	static const int max_task_size = 512;
	TCPClientTaskPool * pool;
	std::list<TCPClientTask * > tasks;
	int taskcount;
	int epfd;
	std::vector<struct epoll_event> events;
	virtual void _add(TCPClientTask * task )
	{
		task->epollAdd(this->epfd, EPOLLIN |EPOLLOUT | EPOLLPRI, (void *)task );
		this->tasks.push_back(task);
		this->taskcount = tasks.size();
		if ( taskcount > events.size() )
			events.resize(taskcount + 16 );
		task->recv(false);
	}
	void remove(std::list<TCPClientTask *>::iterator & iter)
	{
		(*it)->epollDel(this->epfd);
		tasks.erase(iter);
		taskcount = tasks.size();
	}
public:
	OkayThread( )
	{
		this->pool = NULL;
		this->taskcount = 0;
		this->epfd = epoll_create(max_task_size);
		this->events.resize(max_task_size);
	}
	~OkayThread( )
	{
		TEMP_FAILURE_RETRY(::close(this->epfd));
	}
	int taskSize() const
	{
		return this->taskcount + getTaskCount();
	}
	int maxTaskSize() const
	{
		return max_task_size;
	}
	void initPool( void * pool)
	{
		this->pool = (TCPTaskPool *) pool;
	}

	/**
	 * 
	 */
	void run( )
	{
		Time current, write_time;
		std::list<TCPTask * >::iterator it;

		int epfd_read = epoll_create(256);
		std::vector<struct epoll_event> events_read;
		events_read.resize(256);//可监控256条连接
		unsigned int count_read = 0;
		bool check = false;

		while( isAlive() )
		{
			current.now();

			if ( check )
			{
//将新连接加入只读监视epoll符;检测心跳；清洗无效连接
				check = false;
				checkQueue();//把main仍来的新连接收下
				for ( it = tasks.begin(); it != tasks.end(); )
				{
					TCPTask * task = *it;
					//task->checkHearbeat(current);
					if ( task->shouldDisconnect())
					{
						if ( task->isReadMonitor)
						{
							task->epollDel(epfd_read);
							-- count_read;
							task->isReadMonitor = false;
						}
						this->remove(it);
						task->nextState();
						continue;
					}
					else
					{
						if ( !task->isReadMonitor)
						{//往可读epoll监视符加入自己，可读计数加1
							task->epollAdd(epfd_read,EPOLLIN|EPOLLPRI, (void *)task);
							task->isReadMonitor = true;
							++ count_read;
							if ( count_read > events_read.size() )
								events_read.resize(count_read+32);

						}
						++it;
					}
				}
			}//end if check

			Thread::msleep(2);

			//监视可读条件
			if ( count_read > 0 )
			{
				int ret = epoll_wait(epfd_read, &events_read[0], count_read, 0 );
				for (int i=0; i<ret; ++i)
				{
					TCPTask *task=(TCPTask*)events_read[i].data.ptr;
					if ( events_read[i].events & (EPOLLERR|EPOLLPRI) )
					{
						//连接应断开，下一个循环处理它，见continue
						task->setDisconnect(TCPTask::disconnect_active);
						check = true;
					}
					else if ( events_read[i].events & EPOLLIN)
					{
						if ( !task->recv(true) )
						{
							task->setDisconnect(TCPTask::disconnect_active);
							check = true;
						}
					}
					events_read[i].events = 0;
				}
			}// end if count_read
			if ( check )//有无效连接，回头处理去!
				continue;

			//50毫秒才检测写事件，避免从epoll模型来回移动
			if(current.msec()-write_time.msec()>=pool->usleep_time/1000)
			{
				write_time = current;
				if ( !tasks.empty())
				{
					int ret = epoll_wait(epfd, &events[0], taskcount, 0 );
					for (int i=0; i<ret; ++i)
					{
						TCPTask *task=(TCPTask*)events[i].data.ptr;
						if ( events[i].events & (EPOLLERR|EPOLLPRI))
						{
							task->setDisconnect(TCPTask::disconnect_active);
						}
						else
						{
							if ( events[i].events & EPOLLIN)
							{
								if ( !task->recv(true) )
									task->setDisconnect(TCPTask::disconnect_active);
							}
							if ( events[i].events & EPOLLOUT)
							{
								if ( !task->send() )
									task->setDisconnect(TCPTask::disconnect_active);
							}
						}
						events_read[i].events = 0;
					}
				}
				//发生了读写，可能出错，所以要检查
				//50毫秒定时器都到了，也该检查了
				check = true;
			}//end if写事件
		}//end while

		for ( it = tasks.begin(); it!=tasks.end(); )
		{
			TCPTask * task = *it;
			this->remove(it);
			task->nextState();
		}
		TEMP_FAILURE_RETRY(::close(epfd_read));
	}
};
//////////////////////////////////////////////
///////////////////////////////////////////////
class TCPClientTaskPool : private Noncopyable
{
	const int maxConns;
	RunnableManager<checkThread> checkThreads;
	RunnableManager<VerifyThread> verifyThreads;
	RunnableManager<OkayThread> okayThreads;
public:
	static long usleep_time;

	explicit TCPTaskPool(int maxConns = 8, long us = 50000) : maxConns(maxConns)
	{
		usleep_time = us;
	}
	~TCPTaskPool()
	{
		this->stop();
	}
	int getSize()
	{
		return okayThreads.taskCount();
	}
	bool addCheck(TCPTask * task)
	{
		checkThread * thread = checkThreads.getOne();
		if ( thread )
		{
			thread->_add(task);
			return true;
		}
		else
			return false;
	}
	bool addVerify(TCPTask * task)
	{
		verifyThread * thread = verifyThreads.getOne();
		if ( thread )
		{
			thread->_add(task);
			task->nextstate();
			return true;
		}
		else
			return false;
	}
	bool addOkay(TCPTask * task)
	{
		OkayThread * thread = okayThreads.getOne();
		if ( thread )
		{
			thread->_add(task);
			task->nextstate();
			return true;
		}
		else
			return false;
	}

	bool init()
	{
		if ( !checkThreads.init(1,1,"监视线程",this))
			return false;
		if ( !verifyThreads.init(1,1,"验证线程",this))
			return false;
		int maxThread = (maxConns + OkayThread::max_task_size -1 )/OkayThread::max_task_size;//4
		if ( !okayThreads.init(1,maxThread,"回收线程",this))
			return false;
		return true;
	}

	void stop()
	{
	//主服务器重写baseserver的recycle函数，调用此stop
		checkThreads.stop();
		verifyThreads.stop();
		okayThreads.stop();
	}
};

#endif
