#include "I_UnixNetVConnect.h"
#include "I_UnixNetProcessor.h"
#include <zlib/lib/Ink_time.h>
#include <zlib/lib/Ink_sock.h>

#define NET_RETRY_DELAY (HRTIME_MSECOND * 10)

VIO::VIO()
{
	ndone = 0;
	_cont = NULL;
	_nbytes = 0;
	_reader = NULL;
	_writer = NULL;
	op = NONE;
}

void
VIO::init_reader(Continuation *cont, int64_t nbytes, MIOBuffer *writer)
{
	_cont = cont;
	_nbytes = nbytes;
	_writer = writer;
	ndone = 0;
	op = READER;
}

void
VIO::init_writer(Continuation *cont, int64_t nbytes, IOBufferReader *reader)
{
	_cont = cont;
	_nbytes = nbytes;
	_reader = reader;
	ndone = 0;
	op = WRITER;
}

void
VIO::update(int64_t len)
{
	assert(len >= 0);
	if(_nbytes == INT64_MAX)
		return;
	ndone += len;
}

void
VIO::set_continuation(Continuation *cont)
{
	_cont = cont;
	mutex = cont->mutex;
}

void
VIO::set_reader(IOBufferReader *reader)
{
	assert(op == READER);
	_reader = reader;
}

void
VIO::set_writer(MIOBuffer *writer)
{
	assert(op == WRITER);
	_writer = writer;
}

void
VIO::reenable()
{
	vc->reenable(this);
}

int64_t
VIO::ntodo()
{
	if(_nbytes == INT64_MAX)
		return INT64_MAX;
	else
		return _nbytes - ndone;
}

NetState::NetState()
{
	in_enable_list = false;
	triggered = false;
	enabled = false;
}

UnixNetVConnect::UnixNetVConnect()
{
	closed = false;
	fd = -1;
	nh = NULL;
	reentry_counter = 0;
	lerr = 0;
	f_shutdown = NET_SHUTDOWN_NONE;
}

VIO *
UnixNetVConnect::do_io_read(Continuation *c, MIOBuffer *writer, int64_t nbytes)
{
	read.vio.init_reader(c, nbytes, writer);
	read.vio.mutex = c ? c->mutex : this->mutex;
	assert(read.vio.mutex.m_ptr);
	read.vio.vc = this;

	if(writer)
	{
		read.vio.reenable();
	}else{
		read.enabled = false;
	}

	return &(read.vio);
}

VIO *
UnixNetVConnect::do_io_write(Continuation *c, IOBufferReader *reader, int64_t nbytes)
{
	write.vio.init_writer(c, nbytes, reader);
	write.vio.mutex = c ? c->mutex : this->mutex;
	assert(write.vio.mutex.m_ptr);
	write.vio.vc = this;

	if(reader)
	{
		write.vio.reenable();
	}else{
		write.enabled = false;
	}

	return &(write.vio);
}

void
UnixNetVConnect::do_io_close(int alerrno)
{
	(void)alerrno;
	closed = true;
	if(!reentry_counter && this_ethread() == nh->mutex->thread_holding)
		close_UnixNetVConnect(this);
}

void
UnixNetVConnect::do_io_shutdown(ShutDownHowTo_t how)
{
	switch(how)
	{
	case IO_SHUTDOWN_READ:
		shutdown(conn.fd, 0);
		f_shutdown = NET_SHUTDOWN_READ;
		break;
	case IO_SHUTDOWN_WRITE:
		shutdown(conn.fd, 1);
		f_shutdown = NET_SHUTDOWN_WRITE;
		break;
	case IO_SHUTDOWN_READWRITE:
		shutdown(conn.fd, 2);
		f_shutdown = NET_SHUTDOWN_READWRITE;
		break;
	default:
		assert(!"not this how");
		break;
	}
}

void
UnixNetVConnect::reenable(VIO *vio)
{
	if(vio == &(read.vio))
	{
		read.enabled = true;
		MUTEX_TRY_LOCK(lock, nh->mutex, this_ethread());
		if(lock.is_locked())
		{
			ep.modify(EVENTIO_READ, true);
			if(read.triggered)
				nh->read_ready_list.enqueue(this);
			else
				nh->read_ready_list.remove(this);
		}else{
			nh->read_enable_list.enqueue(this);
		}
	}else if(vio == &(write.vio))
	{
		write.enabled = true;
		MUTEX_TRY_LOCK(lock, nh->mutex, this_ethread());
		if(lock.is_locked())
		{
			ep.modify(EVENTIO_WRITE, true);
			if(write.triggered)
				nh->write_ready_list.enqueue(this);
			else
				nh->write_ready_list.remove(this);
		}else{
			nh->write_enable_list.enqueue(this);
		}
	}else{
		assert(!"this vio is not the current object");
	}
}

int
UnixNetVConnect::acceptEvent(int event, void *data)
{
	Event *e = (Event *)data;
	nh = get_NetHandler(this_ethread());
	MUTEX_TRY_LOCK(l, nh->mutex, this_ethread());

	if(!l.is_locked())
	{
		e->schedule_at(NET_RETRY_DELAY);
		return EVENT_CONT;
	}

	if(_action.cancelled)
	{
		close_UnixNetVConnect(this);
		return EVENT_DONE;
	}

	nh->startIO(this);

	//Imitate Read Event Tarigger
	//Because we used Edige Tarigger model.
	//this has Race Condition in acceptEvent.
	//if before call startIO, the vc already has Read Events.
	//after startIO, we do listen it.
	read.triggered = true;
	nh->read_ready_list.enqueue(this);
	//not lock action.
	this->_action.continuation->handleEvent(NET_EVENT_ACCEPT, this);

	return EVENT_DONE;
}

int
UnixNetVConnect::connectEvent(int event, void *data)
{
	Event *e = (Event *)data;
	nh = get_NetHandler(this_ethread());
	MUTEX_TRY_LOCK(l, nh->mutex, this_ethread());
	if(!l.is_locked())
	{
		e->schedule_at(NET_RETRY_DELAY);
		return EVENT_CONT;
	}

	if(_action.cancelled)
	{
		close_UnixNetVConnect(this);
		return EVENT_DONE;
	}

	fd = conn.open(opt);
	//we should call startIO befor call connect
	//this is reac condition
	//in nonblocking connect, we think connect success if get first Write Event.
	//if before startIO, call connect. the Write Event can be trigger in brfore startIO.

	nh->startIO(this);
	conn.connect(NULL, opt);
	this->fd = conn.fd;

	this->_action.continuation->handleEvent(NET_EVENT_CONNECT, this);

	return EVENT_DONE;
}

void
UnixNetVConnect::read_from_net()
{
	assert(nh != NULL);

	MUTEX_TRY_LOCK(lock, read.vio.mutex, this_ethread());
	if(!lock.is_locked())
	{
		read_reschedule();
		return;
	}

	//recode vio lock
	ProxyMutex *vio_tmp_mutex = read.vio.mutex.m_ptr;

	if(closed)
	{
		close_UnixNetVConnect(this);
		return;
	}

	if(!read.enabled || read.vio.op != VIO::READER)
	{
		read_disable();
		return;
	}

	int64_t ntodo = read.vio.ntodo();
	if(ntodo <= 0)
	{
		read_disable();
		return;
	}

	int64_t toread = read.vio._writer->write_alive();
	if(toread > ntodo)
		toread = ntodo;

	struct iovec tiovec[NET_MAX_IOV];
	unsigned int niov = 0;
	int64_t r = 0;
	//toread: required workload.
	//total_read: actual workload.
	//rattempted: expected workload.
	int64_t total_read = 0, rattempted = 0;
	if(toread > 0)
	{
		IOBufferBlock *b = read.vio._writer->first_write_block();

		do{
			niov = 0;
			rattempted = 0;
			while(b && niov < NET_MAX_IOV)
			{
				int64_t a = b->write_alive();
				if(a > 0)
				{
					tiovec[niov].iov_base = b->_end;
					int64_t togo = toread - total_read - rattempted;
					if(a > togo)
						a = togo;
					tiovec[niov].iov_len = a;
					rattempted += a;
					niov++;
					// note: ==
					//if a equail togo is togo already
					if(a >= togo)
						break;
				}
				b = b->next;
			}
			assert(niov > 0);
			if(niov == 1)
				r = ink_read(fd, tiovec[0].iov_base, tiovec[0].iov_len);
			else
				r = ink_readv(fd, &tiovec[0], niov);

			total_read += rattempted;

		}while(rattempted && r == rattempted && total_read < toread);

		//if we have already moved some bytes successfully, summzrize in r
		if(total_read != rattempted)
		{
			if(r <= 0)
				r = total_read - rattempted;
			else
				r = total_read - rattempted + r;
		}

		if(r <= 0)
		{
			if(r == -EAGAIN || r == -ENOTCONN)
			{
				read.triggered = false;
				return;
			}

			if(!r || r == -ECONNRESET)
			{
				read.triggered = false;
				read_signal_done(VC_EVENT_EOS);
				return;
			}

			read.triggered = false;
			read_signal_error((int)-r);
			return;
		}

		read.vio._writer->fill(r);
		//add vio ndone
		read.vio.update(r);
	}else{
		r = 0;
	}

	if(r)
	{
		if(read.vio.ntodo() <= 0)
		{
			read_signal_done(VC_EVENT_READ_COMPLATE);
			return;
		}else{
			if(read_signal_update(VC_EVENT_READ_READY) != EVENT_CONT)
			{
				return;
			}
		}

		//the read_signal_update notification upper state machine
		//the SM maybe call do_io_read update read.vio
		if(vio_tmp_mutex != read.vio.mutex.m_ptr)
		{
			read_reschedule();
			return;
		}
	}

	if(read.vio.ntodo() <= 0 || !read.enabled || !read.vio._writer->write_alive())
	{
		read_disable();
		return;
	}

	read_reschedule();
}

void
UnixNetVConnect::write_to_net()
{
	assert(nh != NULL);
	MUTEX_TRY_LOCK(lock, write.vio.mutex, this_ethread());

	if(!lock.is_locked())
	{
		write_reschedule();
		return;	
	}

	if(closed)
	{
		close_UnixNetVConnect(this);
		return;
	}

	//record write mutex
	ProxyMutex *write_tmp_mutex = write.vio.mutex.m_ptr;
	if(!write.enabled || write.vio.op != VIO::WRITER)
	{
		write_disable();
		return;
	}

	int64_t ntodo = write.vio.ntodo();
	if(ntodo <= 0)
	{
		write_disable();
		return;
	}

	IOBufferReader *reader = write.vio._reader;
	IOBufferReader *tmp_reader = reader->clone();
	int64_t towrite = reader->read_alive();

	if(towrite > ntodo)
		towrite = ntodo;

	int r = 0;
	if(towrite > 0)
	{
		struct iovec tiovec[NET_MAX_IOV];
		int niov = 0;
		int64_t total_write = 0, wattempted = 0;
		do{
			wattempted = 0;
			niov = 0;
			while(niov < NET_MAX_IOV)
			{
				int64_t cur_len = tmp_reader->block_read_alive();
				if(cur_len > 0){
					int64_t togo = towrite - total_write - wattempted;
					if(cur_len > togo)
						cur_len = togo;
					wattempted += cur_len;
					tiovec[niov].iov_base = tmp_reader->start();
					tiovec[niov].iov_len = cur_len;
					niov++;
					//if curl_len equal togo, indicating that task has benn completed.
					if(cur_len >= togo)
						break;
				}else
					break;
			}

			assert(niov > 0);
			if(niov == 1)
				r = ink_write(fd, tiovec[0].iov_base, tiovec[0].iov_len);
			else
				r = ink_writev(fd, tiovec, niov);

			total_write += wattempted;
		}while(wattempted && r == wattempted && total_write < towrite);
		tmp_reader->dealloc();

		if(total_write != wattempted)
		{
			if(r <= 0)
				r = total_write - wattempted;
			else
				r = total_write - wattempted + r;
		}

		if(r <= 0)
		{
			if(r == -EAGAIN || r == -ENOTCONN)
			{
				write.triggered = false;
				return;
			}

			if(!r || r == -ECONNRESET || r == -EPIPE)
			{
				write.triggered = false;
				write_signal_done(VC_EVENT_EOS);
				return;
			}

			write.triggered = false;
			write_signal_error((int)-r);
			return;
		}

		write.vio._reader->consum(r);
		write.vio.update(r);
	}else
		r = 0;

	if(r)
	{
		if(write.vio.ntodo() <= 0)
		{
			write_signal_done(VC_EVENT_WRITE_COMPLATE);
			return;
		}else{
			if(read_signal_update(VC_EVENT_WRITE_READY) != EVENT_CONT)
			{
				return;
			}
		}
		if(write_tmp_mutex != write.vio.mutex.m_ptr)
		{
			write_reschedule();
			return;
		}
	}

	if(write.vio.ntodo() <= 0 || !write.enabled || !write.vio._reader->read_alive())
	{
		write_disable();
		return;
	}

	write_reschedule();
}

void
UnixNetVConnect::read_reschedule()
{
	if(read.enabled && read.triggered)
	{
		nh->read_ready_list.enqueue(this);
	}else{
		nh->read_ready_list.remove(this);
	}
}

void
UnixNetVConnect::write_reschedule()
{
	if(write.enabled && write.triggered)
	{
		nh->write_ready_list.enqueue(this);
	}else{
		nh->write_ready_list.remove(this);
	}
}

int
UnixNetVConnect::read_signal_update(int event)
{
	reentry_counter++;
	if(read.vio._cont)
	{
		read.vio._cont->handleEvent(event, &read.vio);
	}else{
		switch(event)
		{
		case VC_EVENT_EOS:
		case VC_EVENT_ERROR:
			closed = true;
			break;
		}
	}

	reentry_counter--;
	if(!reentry_counter && closed)
	{
		close_UnixNetVConnect(this);
		return EVENT_DONE;
	}

	return EVENT_CONT;
}

int
UnixNetVConnect::write_signal_update(int event)
{
	reentry_counter++;
	if(write.vio._cont)
	{
		write.vio._cont->handleEvent(event, &write.vio);
	}else{
		switch(event)
		{
		case VC_EVENT_EOS:
		case VC_EVENT_ERROR:
			closed = true;
			break;
		}
	}

	reentry_counter--;
	if(!reentry_counter && closed)
	{
		close_UnixNetVConnect(this);
		return EVENT_DONE;
	}

	return EVENT_CONT;
}

int
UnixNetVConnect::read_signal_done(int event)
{
	read.enabled = false;

	if(read_signal_update(event) == EVENT_DONE)
	{
		return EVENT_DONE;
	}else{
		read_reschedule();
		return EVENT_CONT;
	}
}

int
UnixNetVConnect::write_signal_done(int event)
{
	write.enabled = false;

	if(write_signal_update(event) == EVENT_DONE)
	{
		return EVENT_DONE;
	}else{
		write_reschedule();
		return EVENT_CONT;
	}
}

int
UnixNetVConnect::read_signal_error(int aerrno)
{
	lerr = aerrno;

	return read_signal_done(VC_EVENT_ERROR);
}

int
UnixNetVConnect::write_signal_error(int aerrno)
{
	lerr = aerrno;

	return write_signal_done(VC_EVENT_ERROR);
}

void
UnixNetVConnect::read_disable()
{
	read.enabled = false;
	ep.modify(EVENTIO_READ, false);
	nh->read_ready_list.remove(this);
}

void
UnixNetVConnect::write_disable()
{
	write.enabled = false;
	ep.modify(EVENTIO_WRITE, false);
	nh->write_ready_list.remove(this);
}
