#include <slothttpd/fcgi/multiplexer.hpp>
#include <slothttpd/fcgi/application.hpp>
#include <slothttpd/logger.hpp>
#include <slothttpd/server.hpp>

#include <stdexcept>
#include <limits>

#include <boost/bind.hpp>

namespace slothttpd {
namespace fcgi {

namespace asio = boost::asio;
namespace placeholder = boost::asio::placeholders;

multiplexer::request_id *multiplexer::acquire_request()
{
    if (free_ids_.empty()) {
        return nullptr;
    } else {
        auto &r = free_ids_.front();
        r.unlink();
        
        if (free_ids_.empty()) {
            unlink();
            
            app_->full_connections.push_front(*this);
        }
        
        return &r;
    }
}

void multiplexer::reset_request(multiplexer::request_id &r)
{
    r.stdout_buffers   = nullptr;
    r.stderr_buffers   = nullptr;
    
    r.handler           = request_id::handler_type();
    r.write_buffer      = request_id::get_write_buffer_type();
    
    r.unlink();
    free_ids_.push_front(r);
    
    unlink();
    
    app_->not_full_connections.push_back(*this);
}

void multiplexer::notify_error(multiplexer::request_id_list &list, const boost::system::error_code &err)
{
    while (!list.empty()) {
        auto &r = list.front();
        
        r.handler(nullptr, err);
        reset_request(r); // will erase 'r' from list and move to not used ids
    }
}

void multiplexer::enqueue_write(multiplexer::request_id &r)
{
    write_pending_ids_.push_back(r);
    
    if (write_state_ == write_not_active)
        post_write();
}

void multiplexer::close()
{
    try {
        socket_.shutdown(socket_.shutdown_both);
    } catch (...) {
        
    }
    //socket_.close();
}

multiplexer::request_id *multiplexer::find_request_id(std::uint16_t id)
{
    for (auto &r : read_pending_ids_) {
        if (r.id == id) {
            return &r;
        }
    }
    
    for (auto &r : write_pending_ids_) {
        if (r.id == id) {
            return &r;
        }
    }
    
    return nullptr;
}

multiplexer::multiplexer(application *app) :
    socket_(server::instance->get_service()),
    write_state_(write_not_active),
    app_(app),
    id_container_(app->requests_per_connection),
    ref_count_(0)
{
    app->not_full_connections.push_front(*this);
    
    // TODO: do all initializations after queryied values from fcgi application
    std::int32_t i = fcgi::min_id_value;
    
    for (auto &r : id_container_) {
        r.id = i;
        r.m = this;
        i++;
        free_ids_.push_back(r);
    }
    
    socket_.async_connect(app->endpoint,
                          boost::bind(&multiplexer::handle_connect, 
                                      multiplexer_ptr(this),
                                      placeholder::error));
}

bool multiplexer::full() const
{
    return free_ids_.empty();
}

multiplexer::request_controller multiplexer::begin_request(
        receive_buffer_list *stdout,
        receive_buffer_list *stderr, 
        request_id::get_write_buffer_type (get_buffers),
        request_id::handler_type handler)
{
    request_id *id = acquire_request();
    
    if (id != nullptr) {
        try {
            id->handler          = handler;
            id->write_buffer     = get_buffers;
            id->stderr_buffers   = stderr;
            id->stdout_buffers   = stdout;
        } catch (...) {
            reset_request(*id);
            throw;
        }
        
        enqueue_write(*id);
        
        return request_controller(id);
    }
    
    return request_controller(nullptr);
}

void multiplexer::handle_connect(multiplexer_ptr m, const boost::system::error_code &err)
{
    if (!err) {
        // TODO: query fcgi application for values
        m->post_read_header();
    } else {
        logger() << "connect error: " << err.message() << '\n';
        m->notify_error(m->write_pending_ids_, err);
    }
}

void multiplexer::post_write_record(std::uint16_t id, std::uint8_t t, char *b, std::size_t len)
{
    fcgi::header_view v(write_header_buffer_.data());
    
    if (len <= fcgi::max_content_length) {
        v.init(t, id, len);
        
        two_buffers_[0]     = asio::buffer(write_header_buffer_.data(), fcgi::header_len);
        two_buffers_[1]     = asio::buffer(b, len);
        
        asio::async_write(socket_,
                          two_buffers_,
                          asio::transfer_all(),
                          boost::bind(&multiplexer::handle_write,
                                      multiplexer_ptr(this),
                                      placeholder::error));
    } else {
        logger() << "buffer provided exceed limit: " << len << '\n';
        
        // TODO: try implement something more graceful
        BOOST_THROW_EXCEPTION(std::logic_error("buffer provided exceed limit"));
    }
}

void multiplexer::post_write()
{
    if (!socket_.is_open())
        return;
    
    fcgi::begin_request_view v(write_header_buffer_.data());
    
    switch (write_state_) {
    case write_not_active:
        if (!write_pending_ids_.empty()) {
            request_id  &f = write_pending_ids_.front();
            
            v.init(f.id, fcgi::role::responder, fcgi::keep_conn);
            
            asio::async_write(socket_, 
                              asio::buffer(write_header_buffer_, fcgi::control_record_len), 
                              asio::transfer_all(),
                              boost::bind(&multiplexer::handle_write,
                                          multiplexer_ptr(this),
                                          placeholder::error));
            write_state_ = write_sending_begin_request;
        }
        break;
        
    case write_sending_begin_request:
        write_state_ = write_sending_params;
        // falling through!
        
    case write_sending_params: {
            request_id  &f      = write_pending_ids_.front();
            char_range_t buffer = f.write_buffer();
            
            post_write_record(f.id, fcgi::type::params, buffer.begin(), buffer.size());
            
            if (buffer.size() == 0) {
                write_state_ = write_sending_params_end;
            }
        }
        break;
        
    case write_sending_params_end:
        write_state_ = write_sending_stdin;
        // falling through
        
    case write_sending_stdin:
        if (true) {
            request_id  &f      = write_pending_ids_.front();
            auto buffer = f.write_buffer();
            
            post_write_record(f.id, fcgi::type::stdin, buffer.begin(), buffer.size());
            
            if (buffer.size() == 0) {
                f.write_buffer = request_id::get_write_buffer_type();
                write_state_ = write_sending_stdin_end;
            }
        }
        break;
        
    case write_sending_stdin_end: {
            request_id  &f = write_pending_ids_.front();
            
            f.unlink();
            read_pending_ids_.push_front(f);
            
            write_state_ = write_not_active;
        }
        break;
    
    case write_abort:
        reset_request(write_pending_ids_.front());
        write_state_ = write_not_active;
        break;
        
    default:
        logger() << "invalid write state" << std::endl;
    }
    
    if (write_state_ == write_not_active && !write_pending_ids_.empty()) {
        post_write();
    }
}

void multiplexer::handle_write(multiplexer_ptr m, const boost::system::error_code &err)
{
    if (!err) {
        m->post_write();
    } else {
        m->on_write_error(err, __LINE__);
    }
}

void multiplexer::post_read_header()
{
    // TODO: close after timeout ?
    if (write_pending_ids_.empty() && read_pending_ids_.empty())
        return;
    
    boost::asio::async_read(socket_,
                            boost::asio::buffer(read_header_buffer_, fcgi::header_len),
                            boost::asio::transfer_all(),
                            boost::bind(&multiplexer::handle_read_header, multiplexer_ptr(this), placeholder::error));
}

void multiplexer::handle_read_header(multiplexer_ptr m, const boost::system::error_code &err)
{
    if (!err) {
        fcgi::header_view   v(m->read_header_buffer_.data());
        request_id         *req_id = nullptr;
        std::uint16_t       id = v.request_id();
        
        // TODO: handle managment records
        if (id == fcgi::managment_record_id) {
            m->post_skip_data(std::size_t(v.content_length()) + v.padding_length(),
                              boost::bind(&multiplexer::post_read_header, m.get()));
            return;
        }
        
        req_id = m->find_request_id(id);
        
        if (req_id == nullptr) {
            logger() << "fcgi app: unexpected request id: " << id << '\n';
            logger() << v;
            
            m->close();
            return;
        }
        
        switch (v.type()) {
        case fcgi::type::end_request:
            m->post_read_control_record();
            break;
            
        case fcgi::type::stdout:
        case fcgi::type::stderr:
            m->post_read_content(req_id, v.content_length(), v.type());
            break;
            
        default:
            logger() << "unknown fcgi record type: " << int(v.type()) << '\n';
            m->close();
            return;
        }
    } else {
        m->on_read_error(err, __LINE__);
    }
}

void multiplexer::post_read_control_record()
{
    fcgi::header_view v(read_header_buffer_.data());
    
    char *content = v.content().begin();
    
    if (std::size_t(v.content().size()) <= (read_header_buffer_.size() - fcgi::header_len)) {
        asio::async_read(socket_,
                         asio::buffer(content, v.content_length()),
                         asio::transfer_all(),
                         boost::bind(&multiplexer::handle_read_control_record,
                                     multiplexer_ptr(this),
                                     placeholder::error));
    } else {
        logger() << "control record too large: " << v.content_length() << '\n';
        close();
    }
}

void multiplexer::handle_read_control_record(multiplexer_ptr m, const boost::system::error_code &err)
{
    if (!err) {
        fcgi::header_view v(m->read_header_buffer_.data());
        
        switch (v.type()) {
        case fcgi::type::end_request: {
                request_id *req_id = m->find_request_id(v.request_id());
                
                // TODO: use protocol_status
                //fcgi::end_request_view(m->read_header_buffer_.data()).protocol_status()
                
                req_id->handler(m->read_header_buffer_.data(), err);
                
                // if aborting request for which write is pending, just mark that it was aborted.
                // request id should be freed when write will be completed
                if (m->write_state_ != write_not_active && &m->write_pending_ids_.front() == req_id) {
                    m->write_state_ = write_abort;
                } else {
                    m->reset_request(*req_id);
                }
            }
            break;
        
        default:
            break;
        }
        
        m->post_skip_padding();
    } else {
        logger() << err.message() << '\n';
        m->close();
    }
}

void multiplexer::post_read_content(request_id *req_id, std::uint16_t bytes, std::uint8_t type)
{
    read_current_ = req_id;
    read_left_    = bytes;
    read_dst_     = (type == fcgi::type::stdout) ? req_id->stdout_buffers : req_id->stderr_buffers;
    
    post_read_content();
}

void multiplexer::post_read_content()
{
    auto range = char_range_t(nullptr, nullptr);
    
    try {
        range = read_dst_->get_free_space();
    } catch (std::bad_alloc &ex) {
        logger() << "reading response from fcgi app: " << ex.what() << '\n';
    }
    
    if (range.size() > 0) {
        const std::size_t read_size = std::min(read_left_, std::size_t(range.size()));
        
        read_left_ -= read_size;
        
        fcgi::header_view   v(read_header_buffer_.data());
        
        boost::asio::async_read(socket_,
                                asio::buffer(range.begin(), read_size),
                                asio::transfer_all(),
                                boost::bind(&multiplexer::handle_read_content,
                                            multiplexer_ptr(this),
                                            placeholder::bytes_transferred,
                                            placeholder::error));
    } else {
        post_skip_data(read_left_, boost::bind(&multiplexer::post_skip_padding, this));
        read_dst_->disable();
        // TODO: abort request
    }
}

void multiplexer::handle_read_content(multiplexer_ptr m, std::size_t bytes, const boost::system::error_code &err)
{
    // TODO: more gracefull
    if (!err && !m->read_current_->handler.empty()) {
        fcgi::header_view   v(m->read_header_buffer_.data());
        
        m->read_dst_->provide(bytes);
        m->read_current_->handler(m->read_header_buffer_.data(), err);
        
        if (m->read_left_ > 0) {
            m->post_read_content();
        } else {
            m->post_skip_padding();
        }
    } else {
        m->on_read_error(err, __LINE__);
    }
}

void multiplexer::post_skip_padding()
{
    fcgi::header_view   v(read_header_buffer_.data());
    
    if (v.padding_length() > 0) {
        post_skip_data(v.padding_length(), boost::bind(&multiplexer::post_read_header, this));
    } else {
        post_read_header();
    }
}

void multiplexer::post_skip_data()
{
    static boost::array<char, 64 * 1024> dump;
    
    const std::size_t skip_size = std::min(skip_left_, dump.size());
    
    skip_left_ -= skip_size;
    
    asio::async_read(socket_,
                     asio::buffer(dump.data(), skip_size),
                     asio::transfer_all(),
                     boost::bind(&multiplexer::handle_skip_data,
                                 multiplexer_ptr(this),
                                 placeholder::error));
}

void multiplexer::handle_skip_data(multiplexer_ptr m, const boost::system::error_code &err)
{
    if (!err) {
        if (m->skip_left_ > 0) {
            m->post_skip_data();
        } else {
            m->skip_complete_();
        }
    } else {
        m->on_read_error(err, __LINE__);
    }
}

void multiplexer::on_read_error(const boost::system::error_code &err, long line)
{
    logger() <<"read error (line: " << line << "): " << err.message() << '\n';
    notify_error(read_pending_ids_, err);
}

void multiplexer::on_write_error(const boost::system::error_code &err, long line)
{
    logger() << "write error (line: " << line << "): " << err.message() << '\n';
    notify_error(write_pending_ids_, err);
    
    write_state_ = write_not_active;
}

void intrusive_ptr_add_ref(multiplexer *m) noexcept
{
    m->ref_count_++;
}

void intrusive_ptr_release(multiplexer *m) noexcept
{
    m->ref_count_--;
    if (m->ref_count_ == 0) {
        delete m;
    }
}

multiplexer::request_id::request_id() noexcept :
    m(nullptr),
    id(0),
    stdout_buffers(nullptr),
    stderr_buffers(nullptr)
{
}

}
}
