﻿#include "tk_net_tcp_client.h"
#include <QDebug>

#include "tk_thread.h"

#include <cstdlib>
#include <functional>
#include <iostream>
#include <boost/asio/buffer.hpp>
#include <boost/asio/io_context.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/date_time/posix_time/posix_time_types.hpp>  

using boost::asio::ip::tcp;
using std::placeholders::_1;
using std::placeholders::_2;

using boost::asio::io_service;
using boost::asio::deadline_timer;

namespace tk{
namespace io{

	//----------------------------------------------------------------------

	//
	// This class manages socket timeouts by running the io_context using the timed
	// io_context::run_for() member function. Each asynchronous operation is given
	// a timeout within which it must complete. The socket operations themselves
	// use std::bind to specify the completion handler:
	//
	//   +---------------+
	//   |               |
	//   |    receive    |
	//   |               |
	//   +---------------+
	//           |
	//  async_-  |    +----------------+
	// receive() |    |                |
	//           +--->| handle_receive |
	//                |                |
	//                +----------------+
	//
	// For a given socket operation, the client object runs the io_context to block
	// thread execution until the operation completes or the timeout is reached. If
	// the io_context::run_for() function times out, the socket is closed and the
	// outstanding asynchronous operation is cancelled.
	//
	class client
	{
	public:
		client(boost::asio::io_context * p_io_context, tcp::socket *p_socket)
			:p_io_context_(p_io_context),
			p_socket_(p_socket)
		{
		}

		std::size_t receive(const boost::asio::mutable_buffer& buffer,
			std::chrono::steady_clock::duration timeout,
			boost::system::error_code& error)
		{
			// Start the asynchronous operation. The handle_receive function used as a
			// callback will update the error and length variables.
			std::size_t length = 0;
			p_socket_->async_receive(boost::asio::buffer(buffer),
				std::bind(&client::handle_receive, _1, _2, &error, &length));

			// Run the operation until it completes, or until the timeout.
			run(timeout);
			return length;
		}

	private:
		void run(std::chrono::steady_clock::duration timeout)
		{
			// Restart the io_context, as it may have been left in the "stopped" state
			// by a previous operation.
			p_io_context_->restart();

			// Block until the asynchronous operation has completed, or timed out. If
			// the pending asynchronous operation is a composed operation, the deadline
			// applies to the entire operation, rather than individual operations on
			// the socket.
			p_io_context_->run_for(timeout);

			// If the asynchronous operation completed successfully then the io_context
			// would have been stopped due to running out of work. If it was not
			// stopped, then the io_context::run_for call must have timed out.
			if (!p_io_context_->stopped())
			{
				// Cancel the outstanding asynchronous operation.
				p_socket_->cancel();

				// Run the io_context again until the operation completes.
				p_io_context_->run();
			}
		}

		static void handle_receive(
			const boost::system::error_code& error, std::size_t length,
			boost::system::error_code* out_error, std::size_t* out_length)
		{
			*out_error = error;
			*out_length = length;
		}

	private:
		boost::asio::io_context * p_io_context_;
		tcp::socket * p_socket_;
	};

	class AsyncConnectHandler
	{
	public:
		AsyncConnectHandler(io_service& ios, tcp::socket &s)
			:io_service_(ios)
			, timer_(ios)
			, socket_(s) {}
		bool aysnc_connect(const tcp::endpoint &ep, int million_seconds) {
			bool connect_success = false;
			socket_.async_connect(ep, std::bind(&AsyncConnectHandler::connect_handle, this, _1, std::ref(connect_success)));
			timer_.expires_from_now(boost::posix_time::milliseconds(million_seconds));
			bool timeout = false;
			timer_.async_wait(std::bind(&AsyncConnectHandler::timer_handle, this, _1, std::ref(timeout)));
			do
			{
				io_service_.run_one(); // 等待异步操作完成
			} while (!timeout && !connect_success); // 这两个异步操作有一个完成则不再等待
			timer_.cancel();
			return connect_success;
		}
	protected:
	private:
		void connect_handle(boost::system::error_code ec, bool &connect_success) {
			if (!ec)
			{
				// 连接成功
				connect_success = true;
			}
		}

		void timer_handle(boost::system::error_code ec, bool &timeout) {
			if (!ec)
			{
				// 定时器超时
				socket_.close();
				timeout = true;
			}
		}

		io_service &io_service_;
		deadline_timer timer_;
		tcp::socket &socket_;
	};


TkNetTcpClient::TkNetTcpClient()
{
	m_p_io_context = TK_NULL_PTR;
    m_p_socket = TK_NULL_PTR;
    m_p_client = TK_NULL_PTR;
}

TkNetTcpClient::~TkNetTcpClient()
{
    CloseAll();
}

tk_bool TkNetTcpClient::Init(QString remote_ip, tk_uint16 remote_port,tk_uint16 local_port,tk_int32 over_time_ms)
{
    TkMutexLocker locker(m_mutex_recv);
	TkMutexLocker locker_(m_mutex_send);
	try
    {
        CloseAll();

        m_local_port = local_port;
        m_remote_ip = remote_ip;
        m_remote_port = remote_port;

        //m_listen_endpoint = tcp::endpoint(/*tcp::v4(), m_local_port*/);
        m_remote_endpoint = tcp::endpoint(boost::asio::ip::address::from_string(m_remote_ip.toStdString()),
                                          m_remote_port);

		//m_p_io_service = new boost::asio::io_service;
		m_p_io_context = new boost::asio::io_context;
        m_p_socket = new tcp::socket(*m_p_io_context);
		
 		//AsyncConnectHandler acHandler(*m_p_io_context,*m_p_socket);
 		//if (acHandler.aysnc_connect(m_remote_endpoint, over_time_ms))
		boost::system::error_code ec;
		if(m_p_socket->connect(m_remote_endpoint,ec))
 		{
			qWarning() << "TkNetTcpClient Connect OverTime : " << m_remote_ip;
			return TK_FALSE;
		}

        m_p_client = new client(m_p_io_context,m_p_socket);
		//tcp::socket::receive_buffer_size _rbs(10*1024*1024);
       // m_p_socket->set_option(_rbs);

    }
    catch (std::exception& e)
    {
        qDebug()<<"TkNetTcpClient exception : "<< m_remote_ip <<" "<<e.what();
		CloseAll();
        return TK_FALSE;
    }
    return TK_TRUE;
}

void TkNetTcpClient::Exit()
{
    CloseAll();
}

void TkNetTcpClient::CloseAll()
{
	TkMutexLocker locker(m_mutex_recv);
	TkMutexLocker locker_(m_mutex_send);

	if (m_p_client)
	{
		delete m_p_client;
		m_p_client = TK_NULL_PTR;
	}
    if(m_p_socket)
    {
        if(m_p_socket->is_open())
        {
            m_p_socket->close();
        }
    }
	if (m_p_socket)
	{
		delete m_p_socket;
		m_p_socket = TK_NULL_PTR;
	}
	if (m_p_io_context)
	{
		delete m_p_io_context;
		m_p_io_context = TK_NULL_PTR;
	}
}

tk_bool TkNetTcpClient::Send(QByteArray &data)
{
    TkMutexLocker locker(m_mutex_send);

    if(!m_p_socket)
    {
        return TK_FALSE;
    }
    try
    {
        boost::system::error_code ignored_error;
        tk_size_t len = m_p_socket->write_some(
                    boost::asio::buffer(data.data(),static_cast<tk_size_t>(data.size())),
                    ignored_error);
		
		if (len != data.size())
		{
			return TK_FALSE;
		}
    }
    catch (std::exception& e)
    {
        qDebug()<<"TkNetTcpClient Send exception : "<<m_remote_port<<" "<<e.what();
        return TK_FALSE;
    }
    return TK_TRUE;
}

tk_bool TkNetTcpClient::Recv(QByteArray &data,tk_int32 timeout_ms)
{
	TkMutexLocker locker(m_mutex_recv);
	if (!m_p_socket || !m_p_client)
    {
        return TK_FALSE;
    }
    try
    {
        boost::system::error_code ec;

        if (m_p_socket)
        {
             tk_size_t len = m_p_client->receive(boost::asio::buffer(m_recv_buf),
												std::chrono::milliseconds(timeout_ms),
                                                ec);
			 if (!ec)
			 {
				 if (len)
				 {
					 data.resize(static_cast<tk_int32>(len));
					 memcpy(data.data(), m_recv_buf.data(), sizeof(tk_uint8)*len);
					 return TK_TRUE;
				 }
			 }
			 else
			 {
				 if (ec.value() == boost::asio::error::operation_aborted)
				 {
					 return TK_TRUE;
				 }
				 return TK_FALSE;
			 }
        }
        else
        {
            return TK_FALSE;
        }
    }
    catch (std::exception& e)
    {
        qDebug()<<"TkNetTcpClient Recv exception : "<<m_remote_port<<" "<<e.what();
        return TK_FALSE;
    }
	return TK_FALSE;
}

}//namespace io
}//namespace tk
