#include "async_api.h"
#include "../log.h"

Session::Session(std::shared_ptr<boost::asio::ip::tcp::socket> socket)
:m_socket(socket),m_send_pending(false),m_recv_pending(false)  {}

void Session::Connct(const boost::asio::ip::tcp::endpoint& ep)
{
    m_socket->connect(ep);
    //...
}

void Session::WriteCallBackErr(const boost::system::error_code& ec, size_t bytes, std::shared_ptr<MsgNode> node)
{
    //如果数据没有发送完，则继续发送（TCP缓冲区空间不足时会造成这种情况）
    if(bytes + node->m_cur_len < node->m_total_len)
    {
        node->m_cur_len += bytes;
        m_socket->async_write_some(boost::asio::buffer(node->m_msg + node->m_cur_len, node->m_total_len - node->m_cur_len), 
            [this, send_node = node](const boost::system::error_code& ec, size_t bytes){
                this->WriteCallBackErr(ec, bytes, send_node);
            });
    }
    /*
    这样的异步调用会出现数据乱序的情况（TCP的协议栈并不会排序）
    线程1发生了一个 hello world, TCP缓存区空间不够，只发送了hello，需要再次发送world，但还没来得及发送
    此时线程2又发送了 hello world，线程2的 hello world 发送完毕
    线程1的 world 发送完毕
    所以最终接受到的的结果是 hellohello world world
    解决方法：使用队列来保证数据的发送顺序
    */
}
void Session::WriteToSocketErr(const std::string buf)
{
    m_send_node = std::make_shared<MsgNode>(buf.c_str(), buf.length());
    m_socket->async_write_some(boost::asio::buffer(m_send_node->m_msg, m_send_node->m_total_len), 
        [this, send_node = m_send_node](const boost::system::error_code& ec, size_t bytes){
            this->WriteCallBackErr(ec, bytes, send_node);
        });
}


void Session::WriteCallBack(const boost::system::error_code& ec, size_t bytes)
{
    if(ec.value() != 0)
    {
        log_error("write error, error code:{}, error msg:{}", ec.value(), ec.message());
        return;
    }

    auto& send_date = m_send_queue.front();
    send_date->m_cur_len += bytes;
    if(send_date->m_cur_len < send_date->m_total_len)
    {
        m_socket->async_send(boost::asio::buffer(send_date->m_msg + send_date->m_cur_len, send_date->m_total_len - send_date->m_cur_len), 
            [this](const boost::system::error_code& ec, size_t bytes){
                this->WriteCallBack(ec, bytes);
            });
        return;
    }

    m_send_queue.pop();
    if(m_send_queue.empty())
    {
        m_send_pending = false;
        return;
    }

    if(!m_send_queue.empty())
    {
        auto& send_date = m_send_queue.front();
        m_socket->async_write_some(boost::asio::buffer(send_date->m_msg, send_date->m_total_len), 
            [this](const boost::system::error_code& ec, size_t bytes){
                WriteCallBack(ec, bytes);
            });
    }
}
void Session::WriteToSocket(const std::string & buf)
{
    m_send_queue.emplace(new MsgNode(buf.c_str(), buf.length()));
    
    //如果有未发完的数据
    if(m_send_pending)
    {
        return;
    }

    m_socket->async_write_some(boost::asio::buffer(buf), 
        [this](const boost::system::error_code& ec, size_t bytes){
            WriteCallBack(ec, bytes);
        });
    m_send_pending = true; //表示当前有数据正在发送
}


void Session::WriteAllCallBack(const boost::system::error_code& ec, size_t bytes)
{
    if(ec.value() != 0)
    {
        log_error("write error, error code:{}, error msg:{}", ec.value(), ec.message());
        return;
    }

    //如果使用 async_send ， 可用保证队列第一个节点的数据全部发送完毕
    m_send_queue.pop();

    if(m_send_queue.empty())
    {
        m_send_pending = false;
        return;
    }

    if(!m_send_queue.empty())
    {
        auto& send_date = m_send_queue.front();
        m_socket->async_send(boost::asio::buffer(send_date->m_msg, send_date->m_total_len), 
            [this](const boost::system::error_code& ec, size_t bytes){
                WriteAllCallBack(ec, bytes);
            });
    }
}
void Session::WriteTAlloSocket(const std::string& buf)
{
    m_send_queue.emplace(new MsgNode(buf.c_str(), buf.length()));
    if(m_send_pending)
    {
        return;
    }

    //async_send 内部会多次调用 async_write_some
    m_socket->async_send(boost::asio::buffer(buf), 
        [this](const boost::system::error_code& ec, size_t bytes){
            WriteAllCallBack(ec, bytes);
        });
    
    m_send_pending = true;
}


void Session::ReadCallBack(const boost::system::error_code& ec, size_t bytes)
{
    if(ec.value() != 0)
    {
        log_error("read error, error code:{}, error msg:{}", ec.value(), ec.message());
        return;
    }

    m_recv_node->m_cur_len += bytes;
    if(m_recv_node->m_cur_len < m_recv_node->m_total_len)
    {
        m_socket->async_read_some(boost::asio::buffer(m_recv_node->m_msg + m_recv_node->m_cur_len, m_recv_node->m_total_len - m_recv_node->m_cur_len),
            [this](const boost::system::error_code& ec, size_t bytes){
                ReadCallBack(ec, bytes);
            }); 
        return;
    }

    m_recv_pending = false;
    m_recv_node = nullptr;
}

void Session::ReadFromSocket()
{
    if(m_recv_pending)
    {
        return;
    }

    m_recv_node = std::make_shared<MsgNode>(MAX_SIZE);

    m_socket->async_read_some(boost::asio::buffer(m_recv_node->m_msg, MAX_SIZE),
        [this](const boost::system::error_code& ec, size_t bytes){
            ReadCallBack(ec, bytes);
        });
    m_recv_pending = true;
}


void Session::ReadAllCallBack(const boost::system::error_code& ec, size_t bytes)
{
    if(ec.value() != 0)
    {
        log_error("read error, error code:{}, error msg:{}", ec.value(), ec.message());
        return;
    }

    m_recv_node->m_cur_len += bytes;
    m_recv_node = nullptr;
    m_recv_pending = false;
}

void Session::ReadAllFromSocket()
{
    if(m_recv_pending)
    {
        return;
    }

    m_recv_node = std::make_shared<MsgNode>(MAX_SIZE);

    m_socket->async_receive(boost::asio::buffer(m_recv_node->m_msg, MAX_SIZE),
        [this](const boost::system::error_code& ec, size_t bytes){
            ReadAllCallBack(ec, bytes);
        });

    m_recv_pending = true;
}