#include <netinet/ip.h>
#include <sys/socket.h>
#include "ikcp.h"
#include "GXContext.h"
#include "ARand.h"
#include "CAS.h"
#include "picohttpparser/picohttpparser.h"


extern ARand *g_rand;


// HELP FUNCTIONS

int nc_check_socket_error(int fd)
{
	int err = 0;
	socklen_t errlen = sizeof(err);
	if(0==getsockopt(fd, SOL_SOCKET, SO_ERROR, (char *)&err, &errlen)){
		return err;
	}
	return -1;
}

int nc_setsockopt_server(int fd)
{
	int size = MY_SO_RCVBUF_MAX_LEN;
	int r;
	r = setsockopt(fd, SOL_SOCKET, SO_RCVBUF,(const char *) &size, sizeof(size)); if (r == -1) return r;
	r = setsockopt(fd, SOL_SOCKET, SO_SNDBUF, (const char *) &size, sizeof(size)); if (r == -1) return r;
	return 0;
}

int nc_setsockopt_client(int fd)
{
	int size1 = 1024*4;
	int size2 = 1024*2;
	int r;
	r = setsockopt(fd, SOL_SOCKET, SO_RCVBUF, (const char *)&size1, sizeof(size1)); if (r == -1) return r;
	r = setsockopt(fd, SOL_SOCKET, SO_SNDBUF, (const char *)&size2, sizeof(size2)); if (r == -1) return r;
	return 0;
}


int nc_connect(int sock,const char *ip,int port)
{
	struct sockaddr_in xsin;
	memset(&xsin,0,sizeof(xsin));
	xsin.sin_addr.s_addr = ::inet_addr(ip);
	xsin.sin_family = AF_INET;
	xsin.sin_port = htons((short)port);

	return ::connect(sock,(struct sockaddr *)&xsin, sizeof(xsin));
}

int nc_bind(int sock,int port)
{
	struct sockaddr_in xsin;
	memset(&xsin,0,sizeof(xsin));
	xsin.sin_addr.s_addr = htonl(INADDR_ANY);
	xsin.sin_family = AF_INET;
	xsin.sin_port = htons((short)port);

	int st = ::bind(sock,(struct sockaddr *)&xsin, sizeof(xsin));
	return st;
}

int nc_set_no_delay(int sock)
{
int on = 1; // TCP_NODELAY
return ::setsockopt(sock, IPPROTO_TCP, TCP_NODELAY, (const char*)&on, sizeof (on));
}

int nc_set_reuse_addr(int sock)
{
	int on = 1;
	return ::setsockopt(sock,SOL_SOCKET, SO_REUSEADDR, (const char*)&on, sizeof(on));
}

int nc_set_nonblock(int sock)
{
#ifdef WIN32
	unsigned long   w = 1 ;
	::ioctlsocket(sock ,FIONBIO,&w) ;
#else
	int val = fcntl(sock, F_GETFL, 0);
	fcntl(sock, F_SETFL, val | O_NONBLOCK);
#endif
	return 0;
}

int nc_set_block(int sock)
{
#ifdef WIN32
	unsigned long   w = 0 ;
	::ioctlsocket(sock ,FIONBIO,&w) ;
#else
	int val = fcntl(sock, F_GETFL, 0);
	fcntl(sock, F_SETFL, val & (~O_NONBLOCK));
#endif
	return 0;
}

// below copied from nginx ngx_inet.c ========================
size_t
ngx_sock_ntop(struct sockaddr *sa, socklen_t socklen, char *text, size_t len,
	uint port)
{
	u_char               *p;
	struct sockaddr_in   *sin;
#if (NGX_HAVE_INET6)
	size_t                n;
	struct sockaddr_in6  *sin6;
#endif
#if (NGX_HAVE_UNIX_DOMAIN)
	struct sockaddr_un   *saun;
#endif
	int sprintf_r;

	switch (sa->sa_family) {

		case AF_INET:

		sin = (struct sockaddr_in *) sa;
		p = (u_char *) &sin->sin_addr;

		if (port) {
			sprintf_r = sprintf(text, "%u.%u.%u.%u:%d",
				p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
		} else {
			sprintf_r = sprintf(text, "%u.%u.%u.%u",
				p[0], p[1], p[2], p[3]);
		}

    //return (p - text);
		return sprintf_r;

#if (NGX_HAVE_INET6)

		case AF_INET6:

		sin6 = (struct sockaddr_in6 *) sa;

		n = 0;

		if (port) {
			text[n++] = '[';
		}

		n = ngx_inet6_ntop(sin6->sin6_addr.s6_addr, &text[n], len);

		if (port) {
			n = ngx_sprintf(&text[1 + n], "]:%d",
				ntohs(sin6->sin6_port)) - text;
		}

		return n;
#endif

#if (NGX_HAVE_UNIX_DOMAIN)

		case AF_UNIX:
		saun = (struct sockaddr_un *) sa;

    /* on Linux sockaddr might not include sun_path at all */

		if (socklen <= (socklen_t) offsetof(struct sockaddr_un, sun_path)) {
			p = ngx_snprintf(text, len, "unix:%Z");

		} else {
			p = ngx_snprintf(text, len, "unix:%s%Z", saun->sun_path);
		}

    /* we do not include trailing zero in address length */

		return (p - text - 1);

#endif

		default:
		return 0;
	}
}
// ========================

#define IP_TEXT_LEN 60

int nc_get_ip(int sock,char *out_ip,int max_len)
{
	if(0==out_ip || max_len<IP_TEXT_LEN){
		return -1;
	}

	sockaddr_in sa;
#ifdef WIN32
	int len = sizeof(sa);
#else
	socklen_t len = sizeof(sa);
#endif
	getpeername(sock,(struct sockaddr *)&sa,&len);

	size_t r = ngx_sock_ntop((struct sockaddr *)&sa, len, (char*)out_ip, max_len, 1);
	if(r<=0){
		return 0;
	}

	return r;
}

int nc_read(intptr_t fd,char *buf,int buf_len,int &real_read)
{
	real_read = 0;
#ifdef WIN32
	int nrecv = recv(fd,buf,buf_len,0);
#else
	int nrecv = read(fd,buf,buf_len);
#endif

	if(nrecv > 0){
		real_read = nrecv;
		return 1;
	}
	else if(nrecv == 0){
		return 0;
	}
	else{
#ifdef WIN32
		int err = WSAGetLastError();
		if(err==WSAEWOULDBLOCK){
			return 1;
		}
		else{
			return 0;
		}
#else
		if(errno==EAGAIN || errno==EWOULDBLOCK){
			return 1;
		}
		else{
			return 0;
		}
#endif
	}
	return 0;
}

int nc_write(intptr_t fd,char *buf,int buf_len,int &real_write)
{
	real_write = 0;
	if(buf==0 || buf_len<=0) return 1;

#ifdef WIN32
	int nrecv = send(fd,buf,buf_len,0);
#else
	int nrecv = write(fd,buf,buf_len);
#endif

	if(nrecv > 0){
		real_write = nrecv;
		return 1;
	}
	else if(nrecv == 0){
		return 0;
	}
	else{
#ifdef WIN32
		int err = WSAGetLastError();
		if(err==WSAEWOULDBLOCK){
			return 1;
		}
		else{
			return 0;
		}
#else
		if(errno==EAGAIN || errno==EWOULDBLOCK){
			return 1;
		}
		else{
			return 0;
		}
#endif
	}
	return 0;
}


// =================================================================

#define min(a,b) (a)<(b)?(a):(b)


/*把kfifo中的内容，恰当的向网络上写，不一定全部写完。
这个函数有些过于深入kfifo内部，似乎不够模块化。
但是不能用__kfifo_get 把数据从kfifo里拿出来再向网络上写，不是因为效率上多了一次memcpy，
而是拿出来的数据不一定能全部写到网络上去，没写完的数据没法还给kfifo，没有这样的接口，
也写不出。
内部使用，这个文件之外的地方不应该调用。 
*/
int __kfifo_2_net(struct kfifo *fifo,intptr_t fd,u32 len)
{
	u32 l = 0;
	int ok = 0;
	int real_write = 0;
	int real_write2 = 0;

	len = min(len, fifo->in - fifo->out);

/* first get the data from fifo->out until the end of the buffer */
	l = min(len, fifo->size - (fifo->out & (fifo->size - 1)));
// for test
//if(l < len){
//	printf("有折返[%d]个字节\n",len-l);
//}
	ok = nc_write(fd,(char*)(fifo->buffer + (fifo->out & (fifo->size - 1))),l,real_write);
	if(ok!=1 || real_write<l){
		fifo->out += real_write;
		return real_write;
	}

/* then get the rest (if any) from the beginning of the buffer */
	ok = nc_write(fd,(char*)fifo->buffer,len - l,real_write2);
	if(ok!=1 || real_write2<(len - l)){
		fifo->out += (l+real_write2);
		return (l+real_write2);
	}

	fifo->out += len;

	return len;
}

// =================================================================


struct PortalPrint{
	int stat_;
	int pool_index_;
};


bool GXContext::init(int type,const char* ID,int pool_size,int read_buf_len,int write_buf_len,int req_read_buf_len,int req_write_buf_len)
{
	type_ = type;

	strncpy(gx_id_,ID,GXCONTEXT_ID_LEN);

	link_pool_size_conf_ = pool_size;
	read_buf_len_ = read_buf_len;
	write_fifo_len_ = write_buf_len;
	req_read_buf_len_ = req_read_buf_len;
	req_write_fifo_len_ = req_write_buf_len;

#ifdef WIN32
	WORD wVersionRequested;
	WSADATA wsaData;
	wVersionRequested = MAKEWORD( 2, 2 );
	int err = WSAStartup( wVersionRequested, &wsaData ); 
	iocp_handle_ = CreateIoCompletionPort(INVALID_HANDLE_VALUE,NULL,0,1);
	if(NULL == iocp_handle_){
		printf("Init IOCP failed!! exit...\n");
		exit(-1);
	}

#else
	epoll_fd_ = epoll_create(100);		//  Since Linux 2.6.8, the size argument is unused
	if (-1 == epoll_fd_) {
		printf("epoll_create failed!\n");
		exit(-1);
	}


#endif

	link_pool_ = new Link[pool_size];
	if(0 == link_pool_){
		printf("init lnk_pool failed! exit...\n");
		exit(-1);
	}
	link_pool_size_ = pool_size;

	int t2 = link_pool_size_*8;
	trans_pool_ = new TransactionContext[t2];
	if(0 == trans_pool_){
		printf("init trans_pool_ failed! exit...\n");
		exit(-1);
	}
	trans_pool_size_ = t2;

	FOR(i,t2){
		trans_pool_[i].reset();
		trans_pool_[i].pool_idx_ = i;
	}

	int t3 = link_pool_size_*2;
	kcp_pop_pool_ = new KCPPop[t3];
	if(NULL == kcp_pop_pool_){
		exit(-1);
	}
	kcp_pop_pool_size_ = t3;

	FOR(i, t3){
		kcp_pop_pool_[i].reset();
		kcp_pop_pool_[i].pool_idx_ = i;
	}

	stat_ = 1;


	// init luaVM only for self
	{
		lua_State *L = luaL_newstate();
		luaopen_base(L);
		luaopen_table(L);
		
		lua_vm_ = new LuaInterface(L);
	}

	return true;
}


/*
bool GXContext::initWrap(int type,const char* ID)
{
lua_vm2_ = new LuaInterface();
lua_vm2_->SetGlobal("g_tag",0);
lua_vm2_->SetGlobal(LUA_GX_ID,ID);
lua_vm2_->SetGlobal("g_node_id",ID);

lua_vm2_->doFile("./lua/init.lua");


// 初始化GX上下文
int config_maxconn = lua_vm2_->callGlobalFunc<int>("getMaxConn");
int config_readbuflen = lua_vm2_->callGlobalFunc<int>("getReadBufLen");
int config_writebuflen = lua_vm2_->callGlobalFunc<int>("getWriteBufLen");

std::string my_port = lua_vm2_->callGlobalFunc<std::string>("getMyPort");

bool r = init(type,ID,config_maxconn,config_readbuflen,config_writebuflen,REQ_READ_BUF_LEN, REQ_WRITE_BUF_LEN);
strncpy(this->udp_ip_and_port_,my_port.c_str(),127);


lua_vm2_->SetGlobal("g_tag",1);
lua_vm2_->doFile("./lua/init.lua");

return r;
}
*/

void GXContext::free()
{
// @TODO
}

static int __udp_output(const char *buf, int len, ikcpcb *kcp, void *user)
{
	KCPPop *c = (KCPPop*)user;

	return sendto(c->udp_sock_, buf, len, 0, (const sockaddr*)&c->remote_, sizeof(struct sockaddr_in));
}

TransactionContext* GXContext::createTransactionForKCP(KCPPop *kc, lua_State *L){
	if(NULL==kc){
		return NULL;
	}

	TransactionContext *tr = newTrans();
	if(!tr) return NULL;

	tr->io_type_ = 0;
	tr->my_creator_ = kc->pool_idx_;


	tr->serial_no_ = next_trans_serial_no_;
	++ next_trans_serial_no_;

	tr->main_vm = L;
	tr->co = lua_newthread(L);

	// store the co in a table, prevent co be GCed
	lua_rawseti(L, lua_stack_indicator_[0], tr->pool_idx_);

	return tr;
}

TransactionContext* GXContext::createTransactionForComingRequest(Link *src, lua_State *L){
	if(!src) return NULL;

	// check the src link could bind more transaction
	int more = src->findEmptyTransactionSlot();
	if(more < 0) return NULL;

	TransactionContext *tr = newTrans();
	if(!tr) return NULL;

	tr->io_type_ = 1;
	// bind link and transaction
	src->a_my_trans[more] = tr->pool_idx_;
	tr->my_creator_ = src->pool_index_;

	tr->serial_no_ = next_trans_serial_no_;
	++ next_trans_serial_no_;

	tr->main_vm = L;
	tr->co = lua_newthread(L);

	// store the co in a table, prevent co be GCed
	lua_rawseti(L, lua_stack_indicator_[0], tr->pool_idx_);

	return tr;

}

int GXContext::resumeTransactionAndCheckIfEnd(TransactionContext *tr, int num_param)
{
	int r1 = -1;
	int num = num_param;

	if(0 == tr->io_type_){
		KCPPop *kc = getKCPPop(tr->my_creator_);
		if(kc){
			kc->last_io_time_ = cur_frame_gametime_;
		}
	}

	FOR(i, 100){	// 100 is a safe limiter
		bool b = __helper1(tr,num,r1);
		num = 0;
		if(b){
			return r1;
		}
	}
	

	return r1;
}

bool __redis_async_command(lua_State* L, void *privdata);

// return: true-no error  false-error(resume again to inform lua)
bool GXContext::__helper1(TransactionContext *tr, int num_param, int &r_yield)
{
	if(NULL==tr || NULL==tr->main_vm || NULL==tr->co) return false;

	r_yield = lua_resume(tr->co, num_param);
	++ tr->times_resume_;

	if(LUA_YIELD != r_yield){	// Transaction ended
		if(0 != r_yield){
			// has error
			fprintf(stderr,"%s\n", lua_tostring(tr->co,-1));
		}

		if(1 == tr->io_type_){
			Link *l = getLink(tr->my_creator_);
			if(l){
				l->unbindTransaction(tr->pool_idx_);
			}
		}
		
		
		// release it
		releaseTrans(tr);

	}
	else{	// else the co yield
		int op = lua_tonumber(tr->co, 1);
		switch(op){
			case UC_currentTransaction:{
				lua_pushlightuserdata(tr->co, tr);
				// LUA side may get nil?  but this op is NOT supported
				return false;	// we need resume again here
			}
			break;

			case UC_connectAsync:{
				const char *ip = lua_tostring (tr->co,2);
				int port = lua_tointeger(tr->co,3);

				if(NULL==ip || 0==port){
					return false;
				}

				// TODO: maybe make a keeplive pool?

				int r1 = connect2_async(ip, port);
				Link *cc = getLink(r1);
				if(!cc){
					return false;
				}

				// bind them
				cc->setToClient();
				cc->a_my_trans[0] = tr->pool_idx_;

				lua_pop(tr->co, 3);
			}
			break;

			case UC_writeReqAndWait:{
				int c_idx = lua_tointeger(tr->co,2);
				size_t len = 0;
				const char *to_write = lua_tolstring(tr->co,3,&len);
				if(NULL == to_write){
					return false;
				}

				Link *l = getLink(c_idx);
				if(!l) return false;

				int r_write = __kfifo_put(&l->write_fifo_, (unsigned char*)to_write,len);

				lua_pop(tr->co, 3);
			}
			break;

			case UC_redis:{
				if(!__redis_async_command(tr->co, tr)){
					return false;
				}

				lua_pop(tr->co, 2);
			}
			break;

			default:{
				return false;
			}
			break;
		}
	}

	
	// clear the co on stack
	//if(lua_isthread(tr->main_vm, -1)){
	//	lua_pop(tr->main_vm, 1);
	//}
	//printf("lua stack left %d elem\n", lua_gettop(tr->main_vm));

	return true;
}


KCPPop* GXContext::newKCPPop(){
	int ff = (prev_kcp_pop_pool_indicator_+1) % kcp_pop_pool_size_;

	for(int i=ff;i<kcp_pop_pool_size_;++i){
		if(0 == kcp_pop_pool_[i].pool_stat_){
			KCPPop *rr = kcp_pop_pool_+i;
			rr->pool_stat_ = 1;
			rr->reset();
			rr->pool_idx_ = i;
			prev_kcp_pop_pool_indicator_ = i;

			return rr;
		}
	}

	FOR(i,ff){
		if(0 == kcp_pop_pool_[i].pool_stat_){
			KCPPop *rr = kcp_pop_pool_+i;
			rr->pool_stat_ = 1;
			rr->reset();
			rr->pool_idx_ = i;
			prev_kcp_pop_pool_indicator_ = i;

			return rr;
		}
	}

	return NULL;
}

void GXContext::releaseKCPPop(KCPPop *k)
{
// 验证这个指针的确是一个合法的link ，不是野指针 
	if(kcp_pop_pool_ <= k && k < kcp_pop_pool_ + kcp_pop_pool_size_){
		k->pool_stat_ = 0;
	}
	else{
		printf("releaseKCPPop error\n");
	}
}

KCPPop* GXContext::getKCPPop(int pool_index)
{
	if(pool_index<0 || pool_index>=kcp_pop_pool_size_) return NULL;

	KCPPop *aa = kcp_pop_pool_ + pool_index;
	if(1==aa->pool_stat_) return aa;

	return NULL;
}


TransactionContext* GXContext::newTrans(){
	int ff = (prev_trans_pool_indicator_+1) % trans_pool_size_;

	for(int i=ff;i<trans_pool_size_;++i){
		if(0 == trans_pool_[i].pool_stat_){
			TransactionContext *rr = trans_pool_+i;
			rr->pool_stat_ = 1;
			rr->reset();
			rr->pool_idx_ = i;
			prev_trans_pool_indicator_ = i;

			return rr;
		}
	}

	FOR(i,ff){
		if(0 == trans_pool_[i].pool_stat_){
			TransactionContext *rr = trans_pool_+i;
			rr->pool_stat_ = 1;
			rr->reset();
			rr->pool_idx_ = i;
			prev_trans_pool_indicator_ = i;

			return rr;
		}
	}

	return NULL;
}

void GXContext::releaseTrans(TransactionContext *ll)
{
	//printf("GXContext::releaseTrans()  %d\n", ll->pool_idx_);
// 验证这个指针的确是一个合法的link ，不是野指针 
	if(trans_pool_ <= ll && ll < trans_pool_ + trans_pool_size_){
		ll->pool_stat_ = 0;
	}
	else{
		printf("releaseTrans error\n");
	}
}

TransactionContext* GXContext::getTrans(int pool_index)
{
	if(pool_index<0 || pool_index>=trans_pool_size_) return NULL;

	TransactionContext *aa = trans_pool_ + pool_index;
	if(1==aa->pool_stat_) return aa;

	return NULL;
}

bool GXContext::resetLinkPool(int pool_size)
{
	if(link_pool_){
		delete []link_pool_;
		link_pool_ = NULL;
	}

	link_pool_ = new Link[pool_size];
	if(0 == link_pool_){
		printf("init lnk_pool failed! exit...\n");
		exit(-1);
	}

	link_pool_size_ = pool_size;
	return true;
}


Link* GXContext::newLink()
{
	int ff = (prev_link_pool_indicator_+1) % link_pool_size_;

	for(int i=ff;i<link_pool_size_;++i){
		if(0 == link_pool_[i].pool_stat_){
			Link *aa = link_pool_+i;
			// We make it single thread, so CAS is ignored
			//if(cas16(&aa->pool_stat_,0,1)){
			aa->pool_stat_ = 1;
			aa->cleanup();
			aa->pool_index_ = i;
			prev_link_pool_indicator_ = i;

			return aa;
		}
	}

	FOR(i,ff){
		if(0 == link_pool_[i].pool_stat_){
			Link *aa = link_pool_+i;
			//if(cas16(&aa->pool_stat_,0,1)){
			aa->pool_stat_ = 1;
			aa->cleanup();
			aa->pool_index_ = i;
			prev_link_pool_indicator_ = i;

			return aa;
		}
	}

	return NULL;
}

void GXContext::releaseLink(Link *ll)
{
// 验证这个指针的确是一个合法的link ，不是野指针 
	if(link_pool_ <= ll && ll < link_pool_+link_pool_size_){
		ll->pool_stat_ = 0;
	}
	else{
		printf("releaseLink error\n");
	}
}

Link* GXContext::getLink(int pool_index)
{
	if(pool_index<0 || pool_index>=link_pool_size_) return NULL;

	Link *aa = link_pool_+pool_index;
	if(1==aa->pool_stat_) return aa;

	return NULL;
}


void GXContext::forceCutLink(Link* ll)
{
// 验证这个指针的确是一个合法的link ，不是野指针 
	if(link_pool_ <= ll && ll < link_pool_+link_pool_size_){
		ll->releaseSystemHandle(this);
		releaseLink(ll);
	}
}

#ifdef __USING_WINDOWS_IOCP
DWORD WINAPI __AcceptThreadProc(void* lpParam)
{
	GXContext *nc = (GXContext*)lpParam;
	int listen_sock = nc->listening_socket_;
	printf("running IOCP. accept thread ready to run.\n");


while(true){	// this should be endless
	struct sockaddr_storage ss;
	int socklen = sizeof(ss);

	int new_fd = accept(listen_sock,(struct sockaddr *)&ss,&socklen);
	if(new_fd==-1){
		continue;
	}
	nc_set_no_delay(new_fd);
	nc_set_nonblock(new_fd);
	nc_setsockopt_client(new_fd);


	Link *new_ioable = nc->newLink();
	if(0 == new_ioable){
		printf("IOAble pool is full\n");
		closesocket(new_fd);
		continue;
	}

	new_ioable->sock_ = new_fd;

	HANDLE new_hd = CreateIoCompletionPort((HANDLE)new_fd, nc->iocp_handle_, (DWORD)new_ioable , 1);
	if(NULL == new_hd){
		fprintf(stderr,"CreateIoCompletionPort return NULL after accept(). fd[%d]\n",new_fd);
    	// free resource
		nc->releaseLink(new_ioable);
		closesocket(new_fd);
		continue;
	}

    //printf("accepted new con. pool index[%d]\n",new_ioable->pool_index_);
	new_ioable->enable_encrypt_ = nc->enable_encrypt_;
	new_ioable->becomeOnline(nc->read_buf_len_,nc->write_fifo_len_);

    //Post initial Recv
    //This is a right place to post a initial Recv
    //Posting a initial Recv in WorkerThread will create scalability issues.
	if(new_ioable->post_recv() != 0){
		fprintf(stderr,"first recv after accept failed.\n");
	}

}

printf("accept thread ended.  This should NOT happen\n");
fprintf(stderr,"accept thread ended.  This should NOT happen\n");

return -1;
}
#endif


bool GXContext::start_listening_udp()
{
	char *delimiter = strstr(udp_ip_and_port_,":");
	if(0==delimiter) return false;

	int port = atoi(delimiter+1);

	int gate_sock = ::socket(AF_INET,SOCK_DGRAM,0);
	nc_set_reuse_addr(gate_sock);

	if(nc_bind(gate_sock,port)!=0){
		fprintf(stderr,"listening socket initialize error. port:[%u]\n",port);
		return false;
	}

	nc_setsockopt_server(gate_sock);

	udp_listening_socket_ = gate_sock;


	return true;
}

bool GXContext::start_listening_tcp()
{
	char *delimiter = strstr(tcp_ip_and_port_,":");
	if(0==delimiter) return false;

	int port = atoi(delimiter+1);

	int gate_sock = ::socket(PF_INET,SOCK_STREAM,0);
	nc_set_reuse_addr(gate_sock);
//nc_set_nonblock(gate_sock);
	
	if(nc_bind(gate_sock,port)!=0){
		fprintf(stderr,"listening socket initialize error. port:[%u]\n",port);
		return false;
	}
	if(::listen(gate_sock,10)!=0){
		fprintf(stderr,"listening socket initialize error. port:[%u]\n",port);
		return false;
	}

	tcp_listening_socket_ = gate_sock;

#ifdef __USING_WINDOWS_IOCP
// 由于 accept() 不被IOCP支持，另开一个单独的线程专门用来 accept 
	DWORD threadID = 0;
	HANDLE thread_hd = ::CreateThread(0, 0, __AcceptThreadProc, (void*)this , 0, &threadID);
	if(NULL == thread_hd){
		fprintf(stderr,"CreateThread failed\n");
		return false;
	}
#else
	nc_set_nonblock(gate_sock);

	Link *listen_link = newLink();
	listen_link->sock_ = gate_sock;
	int r = listen_link->register_read_event(this);
	if(-1 == r){
		printf("listening socket epoll failed.\n");
		return false;
	}

	//bindLinkWithGlobalID("listen0",listen_link);

#endif
	return true;
}


// @TODO: WIN32
int GXContext::connect2_async(const char *ip, int port)
{
	if(0 == ip || 0==port) return -1;

	Link *ll = newLink();
	if(0 == ll || ll->isOnline()){
		return -1;
	}


	int sock = ::socket(PF_INET,SOCK_STREAM,0);
	nc_setsockopt_client(sock);
	nc_set_no_delay(sock);
	nc_set_nonblock(sock);

	bool need_sche_write = true;	// however schedule one
	if(nc_connect(sock,ip,port)!=0){
		if (EINPROGRESS == errno) {
			/* This is ok. */
			need_sche_write = true;
		}
		else{
			closesocket(sock);
			releaseLink(ll);
			printf("connect2_async failed.\n");
			return -1;
		}
	}


	ll->sock_ = sock;


	#ifdef __USING_WINDOWS_IOCP
	HANDLE new_hd = CreateIoCompletionPort((HANDLE)sock, iocp_handle_, (DWORD)ll , 1);
	if(NULL == new_hd){
		fprintf(stderr,"CreateIoCompletionPort return NULL after accept(). fd[%d]\n",sock);
	    // free resource
		releaseLink(ll);
		closesocket(sock);
		return -1;
	}

	ll->becomeOnline(req_read_buf_len_,req_write_fifo_len_);

	ll->post_recv();
	#else
	if(need_sche_write){
		ll->register_write_event(this);
	}
	else{
		ll->register_read_event(this);
	}


	ll->becomeOnline(req_read_buf_len_, req_write_fifo_len_);
	#endif

	return ll->pool_index_;
}

int GXContext::regVMHandle(int event, lua_State* l, const char* fname)
{
	if(event<0 || event>=vmhandleNum) return -1;

	a_vmhandle[event].vm_ = new LuaInterface(l);
	a_vmhandle[event].fname_ = fname;


	return event;
}

#define CLIENT_MSG_MAX_LEN (1024*7)
#define ERR_NEED_KICK -4


#define PHR_MAX_HEADER 8
struct _phr_parse_data{
	const char *method, *path;
	int minor_version;
	struct phr_header headers[PHR_MAX_HEADER];
	size_t method_len, path_len, num_headers;
	const char* src;
	size_t src_len;
};

int GXContext::try_deal_one_http(Link *ioable,int &begin){
	int begin22 = begin;
	if(0==ioable) return -1;
	if(begin22+4 >= ioable->read_buf_offset_) return 0;


//printf("HTTP:  %s\n", ioable->read_buf_+begin22);

	struct _phr_parse_data aa;
	memset(&aa, 0, sizeof(aa));
	aa.num_headers = PHR_MAX_HEADER;
	aa.src = ioable->read_buf_+begin22;
	aa.src_len = ioable->read_buf_offset_ - begin22;

	int pret = phr_parse_request(aa.src, aa.src_len,
		&aa.method, &aa.method_len,&aa.path, &aa.path_len,
		&aa.minor_version, aa.headers, &aa.num_headers, 0);

	if(pret > 0){
						// parse success
		begin += pret;

		if(true){
			LuaInterface *vm = this->a_vmhandle[vmhandleHTTPResponse].vm_;
			lua_getglobal(vm->L(), this->a_vmhandle[vmhandleHTTPResponse].fname_.c_str());
			lua_pushlightuserdata(vm->L(), (void*)&aa);

			int r = vm->_Call<int>(1);
		}

		return 1;
	}
	else if(-1 == pret){
						// parse Error
		return ERR_NEED_KICK;
	}
	else{
						// incomplete, do nothing
		return 0;
	}

	return -1;
}

// only GET and POST supported
inline bool _is_http_request(const char* buf){
	return 0==strncmp("GET",buf,3) || 0==strncmp("POST",buf,4);
}

inline bool _is_http_response(const char* buf){
	return 0==strncmp("HTTP",buf,4);
}

inline bool _is_lemuria_header(const char* buf){
	return 0==strncmp("LEM",buf,3);
}


uint64_t GXContext::make_hash_by_sockaddr_value(struct sockaddr *addr)
{
	uint32_t *aa1,*aa2;
	struct sockaddr_in *sin = (struct sockaddr_in *) addr;
	aa1 = (uint32_t*)&sin->sin_addr;
	aa2 = (uint32_t*)&sin->sin_port;

	return ((uint64_t)(*aa1))*0xFFFFFFFF + (*aa2);
}

static char __buf_for_addr[64];

int GXContext::__helper_udp_recv(const char *buf, int len, struct sockaddr *addr, uint32_t now)
{
	//size_t r2 = ngx_sock_ntop(addr, sizeof(struct sockaddr), __buf_for_addr, 60, 1);
	uint64_t hash = make_hash_by_sockaddr_value(addr);
	printf("%d %s  %ld\n", len, buf, hash);


	if(len<8) return -1;

	uint32_t *conv = (uint32_t*)buf;

	KCPPop *kc = map_kcp_by_hash_[hash];
	if(NULL == kc || 0==kc->pool_stat_){
		// make a new one
		kc = newKCPPop();
		if(NULL==kc){
			return -1;
		}

		kc->udp_sock_ = udp_listening_socket_;
		memcpy(&kc->remote_, (struct sockaddr_in*)addr, sizeof(struct sockaddr_in));

		kc->kcp_ = ikcp_create(*conv, kc);
		if(NULL == kc->kcp_){
			releaseKCPPop(kc);
			return -1;
		}
		ikcp_setoutput(kc->kcp_, __udp_output);
		ikcp_nodelay(kc->kcp_,1, KCP_DELAY_INTERVAL, 1, 1);

		map_kcp_by_hash_[hash] = kc;
	}
	else if(NULL==kc->kcp_){
		return -1;
	}


	kc->last_io_time_ = now;

#define BUF_KCP_BODY_MAX (1024*8)
	static char *buf_kcp_body = NULL;
	if(NULL == buf_kcp_body){
		buf_kcp_body = (char*)malloc(BUF_KCP_BODY_MAX);
		if(NULL == buf_kcp_body){
			::exit(-1);
		}
	}

	ikcp_input(kc->kcp_, (char*)buf, len);
	FOR(limiter, 100){
		int kn = ikcp_recv(kc->kcp_, buf_kcp_body, BUF_KCP_BODY_MAX);
		if(kn>0){
			TransactionContext *tr = createTransactionForKCP(kc, a_vmhandle[vmhandleUDP].vm_->L());
			if(NULL==tr) break;

			lua_getglobal(tr->co, this->a_vmhandle[vmhandleUDP].fname_.c_str());
			lua_pushlstring(tr->co, buf_kcp_body, kn);
			//lua_pushinteger(tr->co, TRANSACTION_VIRTUAL_INDEX_OFFSET + tr->pool_idx_);
			lua_pushinteger(tr->co, tr->pool_idx_);

			// CALL it
			resumeTransactionAndCheckIfEnd(tr, 2);
		}
		else{
			break;
		}
	}

	return 0;
}

#define KCP_TIMEOUT_SAY (1000*10)

void GXContext::__helper_update_all_kcp(uint32_t now)
{
	FOR(i, kcp_pop_pool_size_){
		KCPPop *kc = kcp_pop_pool_ + i;
		if(1==kc->pool_stat_ && NULL != kc->kcp_){
			ikcp_update(kc->kcp_, now);

			if( 0!=kc->last_io_time_ && now > KCP_TIMEOUT_SAY + kc->last_io_time_ ){
				// release all res
				uint64_t hash = make_hash_by_sockaddr_value((struct sockaddr *)&kc->remote_);
				map_kcp_by_hash_.erase(hash);

				releaseKCPPop(kc);

				ikcp_release(kc->kcp_);
			}
		}
	}
}

/*
使用IOCP需要注意的一些问题

1- 不要为每个小数据包发送一个IOCP请求,这样很容易耗尽IOCP的内部队列.....从而产生10055错误.

2- 不要试图在发送出IOCP请求之后,收到完成通知之前修改请求中使用的数据缓冲的内容,因为在这段时间,系统可能会来读取这些缓冲.

3- 为了避免内存拷贝,可以尝试关闭SOCKET的发送和接收缓冲区,不过代价是,你需要更多的接收请求POST到一个数据流量比较大的SOCKET,从而保证系统一直可以找到BUFFER来收取到来的数据.

4- 在发出多个接收请求的时候,如果你的WORKTHREAD不止一个,一定要使用一些手段来保证接收完成的数据按照发送接收请求的顺序处理,否则,你会遇到数据包用混乱的顺序排列在你的处理队列里.....

5- 说起工作线程, 最好要根据MS的建议, 开 CPU个数*2+2 个, 如果你不了解IOCP的工作原理的话.

6- IOCP的工作线程是系统优化和调度的, 自己就不需要进行额外的工作了.如果您自信您的智慧和经验超过MS的工程师, 那你还需要IOCP么....

7-发出一个Send请求之后，就不需要再去检测是否发送完整，因为iocp会帮你做这件事情，有些人说iocp没有做这件事情，这和iocp的高效能是相悖的，并且我做过的无数次测试表明，Iocp要么断开连接，要么就帮你把每个发送请求都发送完整。

8- 出现数据错乱的时候，不要慌，要从多线程的角度检查你的解析和发送数据包的代码，看看是不是有顺序上的问题。

9- 当遇到奇怪的内存问题时，逐渐的减少工作线程的数量，可以帮你更快的锁定问题发生的潜在位置。

10-同样是遇到内存问题时，请先去检查你的客户端在服务器端内部映射对象的释放是否有问题。而且要小心的编写iocp完成失败的处理代码，防止引用一个错误的内部映射对象的地址。

11- overlapped对象一定要保存在持久的位置，并且不到操作完成（不管成功还是失败）不要释放，否则可能会引发各种奇怪的问题。

12- IOCP的所有工作都是在获取完成状态的那个函数内部进行调度和完成的，所以除了注意工作线程的数量之外，还要注意，尽量保持足够多的工作线程处在获取完成状态的那个等待里面，这样做就需要减少工作线程的负担，确保工作线程内部要处理费时的工作。（我的建议是工作线程和逻辑线程彻底区分开）

14- 尽量保持send和recv的缓冲的大小是系统页面大小的倍数，因为系统发送或者接收数据的时候，会锁用户内存的，比页面小的缓冲会浪费掉整个一个页面。（作为第一条的补充，建议把小包合并成大包发送）
*/


#define __TIMEOUT_ERROR 10000


void GXContext::frame_poll(timetype now,int block_time)
{
	if(0 == stat_) return;

#ifdef __USING_WINDOWS_IOCP
	
FOR(counter_iocp,link_pool_size_){	// 为了安全，做一个最大限制  
	DWORD bytesTransfered = 0;
	OVERLAPPED* overlapped = NULL;
	ULONG_PTR completionKey = NULL;

	int ret = GetQueuedCompletionStatus(iocp_handle_, &bytesTransfered, &completionKey, &overlapped, block_time);
	int err = 0;
	if(0 == ret){
		if(0==overlapped){
			// it's just time out
			err = __TIMEOUT_ERROR;
		}
		else{
			// 非graceful的断开
			printf("非graceful的断开\n"); 
			err = 2;
		}
	}
	else if(0 == bytesTransfered){
		printf("graceful的断开\n"); 
		err = 1;
	}
	else if(completionKey == NULL || overlapped == NULL) {
		printf("非graceful的断开2\n"); 
		err = 2;
	}
	
	/*
	 关于连接断开时接收到的通知。如果iocp检测到Socket连接已经断开，程序马上会收得到通知，而且有时候会收到不至一次通知，
	 这取决于你在该socket上投递WSASend与WSARecv的次数。例如你在一个socket上投递了一次WSASend与一次WSARecv，
	 在这两次投递还没有被完成时，如果socket断开了连接，那么GetQueuedCompletionStatus()将会收到两次通知
	*/
	
	// TODO: 1、要做分配释放资源的压测  2、要做主动断开  3、刚刚连上时不知道是client还是internal 
	
	if(0 == err){
		Link *ioable = (Link*)completionKey;
		int real_read = bytesTransfered;
		ioable->read_buf_offset_ += real_read;
		// test
		//printf("get [%d]  [%s]\n",real_read,ioable->read_buf_);

		if(true){
			// 是服务端内部包，绝大多数情况下应该是 InternalHeader 包头 
			int byte_begin = 0;
			FOR(limiter,9999){
				int suc = try_deal_one_msg_s(ioable,byte_begin);
				if(1 != suc){
					if(ERR_NEED_KICK==suc){
						err = -1;
						printf("-4==suc\n");
					}
					break;
				}
			}
			if(byte_begin == ioable->read_buf_offset_){
				ioable->read_buf_offset_ = 0;
			}
			else if(ioable->read_buf_offset_ > byte_begin){
				memmove(ioable->read_buf_,ioable->read_buf_+byte_begin,ioable->read_buf_offset_-byte_begin);
				ioable->read_buf_offset_ = ioable->read_buf_offset_-byte_begin;
			}
			else{
				// unlegal
				ioable->read_buf_offset_ = 0;
			}
		}
		
		// 继续投递 
		if(0 == err){
			int rr = ioable->post_recv();
			if(rr != 0){
				err = 2;
			}	
		}
	}
	
	// If it's timeout, we dont need post_recv again
	
	
	if(err!=0 && err!=__TIMEOUT_ERROR){
		// 不再继续投递 
		

		// 做断开处理 
		if(completionKey){
			Link *ioable = (Link*)completionKey;
			printf("做断开处理   [%d]\n",ioable->pool_index_);
			if(link_cut_callback_){
				link_cut_callback_(this,ioable,1,type_);
			}
			ioable->link_stat_ = 0;
			ioable->releaseSystemHandle(this);
			releaseLink(ioable);

		}
	}
	
	if(__TIMEOUT_ERROR==err){
		// IOCP超时返回，应该没有更多数据要读了 
		return;
	}
}
#else

#define likely(x) __builtin_expect((x),1)
#define unlikely(x) __builtin_expect((x),0)
{
	int epoll_err = -1;
	static int s_prev_event_buffer_byte_len = 0;
	static void *s_event_buffer = NULL;
	
	int event_buffer_byte_len = sizeof(struct epoll_event)*(link_pool_size_+4);
	if(unlikely(event_buffer_byte_len != s_prev_event_buffer_byte_len)){
		s_event_buffer = realloc(s_event_buffer,event_buffer_byte_len);
		s_prev_event_buffer_byte_len = event_buffer_byte_len;
		
		if(0==s_event_buffer){
			exit(-1);
		}
	}

	// ================ UDP below
#define UDP_BATCH_VLEN 1024
#define UDP_BUFSIZE (1024*2)

	static char *udp_buf = NULL;
	static struct mmsghdr *udp_msgs = NULL;
	static struct iovec *iovecs = NULL;
	static struct sockaddr_in *udp_addrs = NULL;

	if(udp_listening_socket_ > 0){
		if(NULL == udp_buf){
			udp_buf = (char*)malloc(UDP_BUFSIZE * UDP_BATCH_VLEN);
			udp_msgs = new struct mmsghdr[UDP_BATCH_VLEN];
			iovecs = new struct iovec[UDP_BATCH_VLEN];
			udp_addrs = new struct sockaddr_in[UDP_BATCH_VLEN];

			if(NULL==udp_buf || NULL==udp_msgs || NULL==iovecs || NULL==udp_addrs){
				exit(-1);
			}
		}

	    struct sockaddr_in sa;
	    struct timespec timeout;	

		memset(udp_msgs, 0, sizeof(struct mmsghdr)*UDP_BATCH_VLEN);
		memset(udp_buf, 0, UDP_BUFSIZE * UDP_BATCH_VLEN);

	    FOR(i, UDP_BATCH_VLEN){
	        iovecs[i].iov_base         = udp_buf + UDP_BUFSIZE*i;
	        iovecs[i].iov_len          = UDP_BUFSIZE;
	        udp_msgs[i].msg_hdr.msg_iov    = iovecs+i;
	        udp_msgs[i].msg_hdr.msg_iovlen = 1;
	        udp_msgs[i].msg_hdr.msg_name = udp_addrs + i;
	        udp_msgs[i].msg_hdr.msg_namelen = sizeof(struct sockaddr_in);
	    }

		timeout.tv_sec = 0;
		timeout.tv_nsec = block_time*1000 / 2;

		int mmsg_r = recvmmsg(udp_listening_socket_, udp_msgs, UDP_BATCH_VLEN, MSG_DONTWAIT  , &timeout);
	    if (mmsg_r == -1) {
	        if(EAGAIN==errno || EWOULDBLOCK==errno){
	        	// just ignore
	        }
	        else{
	        	perror("recvmmsg()");
	        }
	    }
	    else{
	    	printf("%d UDP messages received\n", mmsg_r);
	    	char buf2[64];
		    FOR(i, mmsg_r) {
		        __helper_udp_recv(udp_buf + UDP_BUFSIZE*i, udp_msgs[i].msg_len, (struct sockaddr *)(udp_addrs+i), now);
		    }
	    }

	    // update all kcp here
	    __helper_update_all_kcp(now);
	}

	//printf("UDP polled\n");

	// ================ EPOLL below
	void *event_buffer = s_event_buffer;

	memset(event_buffer,0,event_buffer_byte_len);
	int n = epoll_wait(this->epoll_fd_ , (struct epoll_event*)event_buffer, link_pool_size_, block_time/2);
	if(unlikely(-1 == n)){
		epoll_err = 1;
		printf("EPOLL error [%d]\n",errno);
	}
	else if(0==n){
		epoll_err = __TIMEOUT_ERROR;
	}
	else if(n>0){
		epoll_err = 0;
	}
	
	if(likely(0 == epoll_err)){
		FOR(ev_index,n){
			struct epoll_event* ev = ((struct epoll_event*)event_buffer)+ev_index;
			Link *ioable = (Link*)ev->data.ptr;
			
			if(unlikely(ioable->sock_ == tcp_listening_socket_)){
				// 是监听socket
				struct sockaddr_storage ss;
				int socklen = sizeof(ss);
				FOR(i,10){
					int new_fd = ::accept(ioable->sock_,(struct sockaddr *)&ss,(socklen_t*)&socklen);
					if(-1 == new_fd){
						break;
					}
					
					nc_set_no_delay(new_fd);
					nc_set_nonblock(new_fd);
					nc_setsockopt_server(new_fd);
					
					Link *aa = newLink();
					if(aa){
						aa->sock_ = new_fd;
						int r = aa->register_read_event(this);
						if(-1 == r){
							printf("register_read_event failed.\n");
							aa->releaseSystemHandle(this);
							releaseLink(aa);
						}
						
						aa->enable_encrypt_ = this->enable_encrypt_;
						aa->becomeOnline(read_buf_len_,write_fifo_len_);
					}
					else{
						closesocket(new_fd);
						printf("pool is full.\n");
					}
				}
				
				
				continue; 
			}
			else{
				int err = 1;
				int real_read = 0;
				int luar = 0;
				int passed = -1;
				bool need_kick = false;

				if(unlikely(0!=(EPOLLOUT & ev->events))){
					// write event
					int er = nc_check_socket_error(ioable->sock_);
					if(EINPROGRESS == er){
						// nothing...
					}
					else{
						ioable->unregister_event(this);
						ioable->register_read_event(this);

						// is it an succesfully connect() call?
						if(unlikely(ioable->isService())){
							// should NOT be here
						}
						else{
							TransactionContext *tr = getTrans(ioable->a_my_trans[0]);
							if(tr && tr->co){
								lua_pushinteger(tr->co, ioable->pool_index_);
								resumeTransactionAndCheckIfEnd(tr, 1);
							}
						}
						

						err = luar;
						if(ERR_NEED_KICK == luar){
							need_kick = true;
						}
					}
				}
				else if(likely(0!=(EPOLLIN & ev->events))){

					int ok = nc_read(ioable->sock_,ioable->read_buf_+ioable->read_buf_offset_,ioable->read_buf_len_-ioable->read_buf_offset_,real_read);
					if(unlikely(ok != 1)){
						err = 1;
					}
					else{
						err = 0;
						ioable->read_buf_offset_ += real_read;
					}

				#define _MIN_MESSAGE_LEN 4

					while(likely(0==err && false==need_kick && ioable->read_buf_offset_>=_MIN_MESSAGE_LEN)){
						luar = 0;
						passed = -1;

						if(unlikely(_is_http_response(ioable->read_buf_))){
							int minor_version, status;
							const char* response_msg;
							size_t msg_len, num_headers;
							num_headers = PHR_MAX_HEADER;
							struct phr_header response_header[PHR_MAX_HEADER];

							int pret = phr_parse_response(ioable->read_buf_, ioable->read_buf_offset_, &minor_version, &status, 
							&response_msg, &msg_len, response_header, &num_headers, 0);

							passed = 0;
							if(pret > 0){
								passed = pret;

								LuaInterface *vm = this->a_vmhandle[vmhandleHTTPResponse].vm_;

								if(!ioable->isService()){
									TransactionContext *tr = getTrans(ioable->a_my_trans[0]);
									if(tr && tr->co){
										// fill params for CALL
										//lua_getglobal(tr->co, this->a_vmhandle[vmhandleHTTPResponse].fname_.c_str());
										lua_pushinteger(tr->co, status);
										lua_pushlstring(tr->co, response_msg, msg_len);

										// CALL it
										resumeTransactionAndCheckIfEnd(tr, 2);
									}
									// OR we kick the link, cause one link should NOT have too much Transaction running.
									else{
										luar = ERR_NEED_KICK;
									}
									

									luar = 0;
								}
								else{	// Service type connection should NEVER got a http response
									luar = ERR_NEED_KICK;
								}
							}
							else if(-1 == pret){
								// parse Error
								luar = ERR_NEED_KICK;
							}
							else{
								// incomplete, do nothing
								luar = 0;
								break;
							}
							
							//printf("HttpResponseParse OVER %d  %d\n",luar, passed);
						}
						else if(_is_http_request(ioable->read_buf_)){
							//_HELPER_CALL_1(vmhandleHTTPRequest)

							struct _phr_parse_data aa;
							memset(&aa, 0, sizeof(aa));
							aa.num_headers = PHR_MAX_HEADER;
							aa.src = ioable->read_buf_;
							aa.src_len = ioable->read_buf_offset_;

							int pret = phr_parse_request(aa.src, aa.src_len,
								&aa.method, &aa.method_len,&aa.path, &aa.path_len,
								&aa.minor_version, aa.headers, &aa.num_headers, 0);

							passed = 0;

							// TODO: HTTP POST NOT supported yet

							if(pret > 0){
								// parse success
								passed = pret;

								LuaInterface *vm = this->a_vmhandle[vmhandleHTTPRequest].vm_;

								if(ioable->isService()){
									TransactionContext *tr = createTransactionForComingRequest(ioable, vm->L());
									if(tr){
										// fill params for CALL
										lua_getglobal(tr->co, this->a_vmhandle[vmhandleHTTPRequest].fname_.c_str());
										lua_pushlstring(tr->co, aa.method, aa.method_len);
										lua_pushlstring(tr->co, aa.path, aa.path_len);
										lua_pushinteger(tr->co, tr->pool_idx_);

										// CALL it
										resumeTransactionAndCheckIfEnd(tr, 3);

										//printf("HttpReqParse"" OVER %d  %d\n",luar, passed);
									}
									// OR we kick the link, cause one link should NOT have too much Transaction running.
									else{
										luar = ERR_NEED_KICK;
									}
									

									luar = 0;
								}
								else{
									TransactionContext *tr = getTrans(ioable->a_my_trans[0]);
									if(tr && tr->co){
										// fill params for CALL
										lua_pushlstring(tr->co, aa.method, aa.method_len);
										lua_pushlstring(tr->co, aa.path, aa.path_len);

										resumeTransactionAndCheckIfEnd(tr, 2);

										luar = 0;
									}
								}

								
							}
							else if(-1 == pret){
								// parse Error
								luar = ERR_NEED_KICK;
							}
							else{
								// incomplete, do nothing
								luar = 0;
								break;
							}
							
						}
						else if(_is_lemuria_header(ioable->read_buf_)){
							// just for test
							luar = 0;
							passed = ioable->read_buf_offset_;
							static const char *s_ack = "LEM hello 4\r\nABCD";
							__kfifo_put(&ioable->write_fifo_, (unsigned char*)s_ack,strlen(s_ack));
						}
						else{
							// NO other protocol valid
							need_kick = true;
						}

						if(!need_kick){
							if(likely(passed == ioable->read_buf_offset_)){
								ioable->read_buf_offset_ = 0;
							}
							else if(0==passed){
								// nothing
							}
							else if(-1!=passed && ioable->read_buf_offset_ > passed){
								memmove(ioable->read_buf_,ioable->read_buf_+passed,ioable->read_buf_offset_-passed);
								ioable->read_buf_offset_ -= passed;
							}
							else{
								// unlegal
								ioable->read_buf_offset_ = 0;
							}
						}
					}
					if(ERR_NEED_KICK == luar){
						need_kick = true;
					}

				}


				if(need_kick || 1==err){
				// 做断开处理 
					//printf("做断开处理   [%d]\n",ioable->pool_index_);
					
					LuaInterface *vm = this->a_vmhandle[vmhandleOnCut].vm_;

					if(ioable->isService()){
						/*
						TransactionContext *tr = createTransactionForComingRequest(ioable, vm->L());
						if(tr){
							lua_getglobal(tr->co, this->a_vmhandle[vmhandleOnCut].fname_.c_str());
							lua_pushinteger(tr->co, ioable->pool_index_);
						}
						*/

						// STOP all it's transactions
						FOR(jj ,MAX_TRANSACTION_PER_LINK){
							TransactionContext *tr = getTrans(ioable->a_my_trans[jj]);
							if(tr){
								lua_pushnil(tr->co);
								// CALL it
								resumeTransactionAndCheckIfEnd(tr, 1);
							}
						}
					}
					
					ioable->link_stat_ = 0;
					ioable->releaseSystemHandle(this);
					releaseLink(ioable);
				}
			}
			
		}
	}
}
#endif
}


//对客户端连接，应该限制每帧写出去的字节
//但是这个值应该大于系统缓存比较好，查了在我们的ubuntu系统上，系统的socket读、写缓存的上限都是163840 byte
//It's 212992 at aliyun
//命令：cat /proc/sys/net/core/rmem_max
// cat /proc/sys/net/core/wmem_max
// 故把这个值定为163840
#define MAX_BYTES_PER_FRAME 212992

int GXContext::frame_flush(timetype now)
{
	if(0 == stat_) return -1;

	int counter = 0;
	FOR(i,link_pool_size_){
		Link *aa = link_pool_+i;
		if(aa->isOnline()){
			struct kfifo *ff = &aa->write_fifo_;
			if(ff->in != ff->out){
				if(1 == header_type_){
					int sent = __kfifo_2_net(ff,aa->sock_,MAX_BYTES_PER_FRAME);
				//printf("flushed index[%d] [%d] bytes\n",i,sent);
				}
				else{
					__kfifo_2_net(ff,aa->sock_,1024*1024);
				//printf("flushed index[%d]\n",i);
				}
				++counter;
			}
		}
	}
	return counter;
}




