// -*-mode:c++; coding:utf-8-*-

#ifndef _XBASE_SERVICE_HPP_
#define _XBASE_SERVICE_HPP_

#include <queue>
#include <stdint.h>

#include <boost/thread.hpp>
#include <boost/thread/condition_variable.hpp>
#include <boost/asio.hpp>
#include <boost/noncopyable.hpp>
#include <boost/date_time/posix_time/posix_time.hpp>
#include <boost/bind.hpp>
#include <boost/intrusive_ptr.hpp>
#include <boost/pool/pool_alloc.hpp>

#include <xbase_utility.hpp>
#include <constants.hpp>
#include <wrapper.hpp>
#include <xbase_utility.hpp>
#include <logger.hpp>
#include <ref_count.hpp>
#include <protocol.hpp>
#include <config.hpp>

namespace xbase 
{
	struct header 
	{
		enum { SIZE = 13 };
	
		uint16_t m_magic;	// 魔数
		uint8_t m_version;	// 协议版本号
		uint16_t m_type;
		uint32_t m_sequence_number;
		uint32_t m_data_length;

		struct wrapper
		{
			typedef integer_wrapper<uint8_t, kb::network_byteorder> uint8_type;
			typedef integer_wrapper<uint16_t, kb::network_byteorder> uint16_type;
			typedef integer_wrapper<uint32_t, kb::network_byteorder> uint32_type;
			typedef integer_wrapper<uint64_t, kb::network_byteorder> uint64_type;
			typedef string_wrapper<uint32_t, char, kb::network_byteorder> string_type;

			uint16_type m_magic;
			uint8_type m_version;
			uint16_type m_type;
			uint32_type m_sequence_number;
			uint32_type m_data_length;
		};

		bool valid() const {
			return m_magic == XBASE_PROTOCOL_MAGIC;
		}

		std::size_t bytes() const {
			return SIZE;
		}

		std::size_t load(const boost::asio::const_buffer &buffer) {
			assert(boost::asio::buffer_size(buffer) >= SIZE);

			const wrapper *const wr = reinterpret_cast<const wrapper*>(boost::asio::buffer_cast<const char*>(buffer));
			m_magic = wr->m_magic.get();
			m_version = wr->m_version.get();
			m_type = wr->m_type.get();
			m_sequence_number = wr->m_sequence_number.get();
			m_data_length = wr->m_data_length.get();

			return SIZE;
		}

		std::size_t save(const boost::asio::mutable_buffer &buffer) const {
			assert(boost::asio::buffer_size(buffer) >= SIZE);

			wrapper *const wr = reinterpret_cast<wrapper*>(boost::asio::buffer_cast<char*>(buffer));
			wr->m_magic.set(m_magic);
			wr->m_version.set(m_version);
			wr->m_type.set(m_type);
			wr->m_sequence_number.set(m_sequence_number);
			wr->m_data_length.set(m_data_length);

			return SIZE;
		}
		
		std::string to_string () const {
			std::stringstream out;

			out << "{magic:" << "0x"
			    << std::hex
			    << std::uppercase
			    << m_magic;
			out << "; version:" << (uint32_t)m_version;
			out << "; type:" << "0x"
			    << std::hex
			    << std::uppercase
			    << m_type;
			out << "; sequence_number:" << m_sequence_number
			    << "; data_length:" << m_data_length
			    << "}";

			return out.str();
		}
	};

#define SET_HEADER(header, type, seq, len) do {			\
		(header).m_magic = XBASE_PROTOCOL_MAGIC;	\
		(header).m_version = XBASE_PROTOCOL_VERSION;	\
		(header).m_type = (type);			\
		(header).m_sequence_number = (seq);		\
		(header).m_data_length = (len);			\
	} while(0)

	class request
	{
	public:
		typedef boost::fast_pool_allocator<request> self_allocator_type;
		typedef basic_factory<request> factory_type;
		typedef boost::intrusive_ptr<request> pointer;

	public:
		request()
			: m_body_buffer(0) {}

		~request() {
			release_body_buffer();
		}

		boost::asio::mutable_buffers_1 get_header_buffer() {
			return boost::asio::buffer(m_header_buffer);
		}

		boost::asio::const_buffers_1 get_header_buffer() const {
			return boost::asio::buffer(m_header_buffer);
		}

		boost::asio::mutable_buffers_1 get_body_buffer() {
			return boost::asio::buffer(m_body_buffer, m_header.m_data_length);
		}

		boost::asio::const_buffers_1 get_body_buffer() const {
			return boost::asio::buffer(const_cast<const char*>(m_body_buffer), m_header.m_data_length);
		}

		header &get_header() {
			return m_header;
		}

		// header readed, load and check it, and allocate body buffer
		bool check_header() {
			m_header.load(get_header_buffer());
			const bool valid = m_header.valid();
			if(valid)
			{
				allocate_body_buffer();
			}
			return valid;
		}

	protected:
		void release_body_buffer() {
			if(m_body_buffer != 0)
			{
				m_allocator.deallocate(m_body_buffer, m_header.m_data_length);
			}
		}

		void allocate_body_buffer() {
			release_body_buffer();
			m_body_buffer = m_allocator.allocate(m_header.m_data_length);
		}

	private:
		char m_header_buffer[header::SIZE];
		header m_header;
		char *m_body_buffer;

		typedef default_char_allocator buffer_allocator_type;
		buffer_allocator_type m_allocator;

		reference_counter m_reference_counter;
		INTRUSIVE_PTR_HOOKS(request, m_reference_counter, factory_type);
	};

	class response
	{	
	public:
		typedef boost::fast_pool_allocator<response> self_allocator_type;
		typedef basic_factory<response> factory_type;
		typedef boost::intrusive_ptr<response> pointer;
		typedef std::vector<boost::asio::const_buffer> buffer_sequence_type;
		typedef default_char_allocator buffer_allocator_type;
	
	public:
		response() {
			m_data_buffers.push_back(boost::asio::buffer(m_header_buffer));
		}

		~response() {
			release_buffers();
		}

		void release_buffers() {
			if(m_data_buffers.size() > 1)
			{
				buffer_sequence_type::const_iterator pos = m_data_buffers.begin();
				const buffer_sequence_type::const_iterator end = m_data_buffers.end();
				++pos; // skip first, it's header with static array
				for(; pos != end; ++pos)
				{
					m_allocator.deallocate(const_cast<char*>(boost::asio::buffer_cast<const char*>(*pos)),
							       boost::asio::buffer_size(*pos));
				}
				m_data_buffers.clear();
				m_data_buffers.push_back(boost::asio::buffer(m_header_buffer));
			}
		}

		const buffer_sequence_type &buffer() {
			return m_data_buffers;
		}

		header &get_header() {
			return m_header;
		}

		boost::asio::mutable_buffers_1 get_header_buffer() {
			return boost::asio::buffer(m_header_buffer);
		}

		// 一定要确保自己的包的所有数据都已经放好了
		std::size_t append_response(response &other) {
			buffer_sequence_type::const_iterator pos = other.m_data_buffers.begin();
			const buffer_sequence_type::const_iterator end = other.m_data_buffers.end();

			const boost::asio::const_buffer other_header = *pos;
			char *p = m_allocator.allocate(header::SIZE);
			std::memcpy(p,
				    boost::asio::buffer_cast<const char*>(other_header),
				    header::SIZE);
			m_data_buffers.push_back(boost::asio::const_buffer(p, header::SIZE));

			std::size_t bytes = header::SIZE;
			++pos;	// skip static header
			for(; pos != end; ++pos)
			{
				m_data_buffers.push_back(*pos);
				bytes += boost::asio::buffer_size(*pos);
			}
			other.m_data_buffers.clear();
			other.m_data_buffers.push_back(other_header); // restore other's static header buffer

			return bytes;
		}

		template<typename DataGram>
		std::size_t add_body(/*const*/ DataGram &datagram) {
			const std::size_t bytes = get_bytes(datagram);
			char *p = m_allocator.allocate(bytes);
			const boost::asio::mutable_buffer buffer(p, bytes);
			ByteBuffer out_bb(writable_bb(buffer));
			BinOArchive out(out_bb); out & datagram;
			m_data_buffers.push_back(buffer);
			return bytes;
		}

		boost::asio::mutable_buffer add_empty_buffer(std::size_t bytes) {
			char *p = m_allocator.allocate(bytes);
			const boost::asio::mutable_buffer buffer(p, bytes);
			m_data_buffers.push_back(buffer);
			return buffer;
		}

//protected:
		void save_header() {
			m_header.save(boost::asio::buffer(m_header_buffer));
		}

	private:
		header m_header;
		char m_header_buffer[header::SIZE];
		buffer_sequence_type m_data_buffers;
	
		buffer_allocator_type m_allocator;

		reference_counter m_reference_counter;
		INTRUSIVE_PTR_HOOKS(response, m_reference_counter, factory_type);
	};

	class task_runner;

	class session
	{
	public:
		typedef boost::function<void(/*const boost::system::error_code &*/)> on_error_handler_type;
		typedef boost::fast_pool_allocator<session> self_allocator_type;
		
		struct session_factory 
		{
			typedef session::self_allocator_type allocator_type;

			static inline
			session *construct(boost::asio::io_service &ios,
					   task_runner &runner) {
				allocator_type m_allocator;
				session *p = m_allocator.allocate(1);
				new(p) session(ios, runner);
				return p;
			}
		
			static inline
			session *construct(on_error_handler_type on_complete_handler,
					   boost::asio::io_service &ios,
					   task_runner &runner) {
				allocator_type m_allocator;
				session *p = m_allocator.allocate(1);
				new(p) session(on_complete_handler, ios, runner);
				return p;
			}
		
			static inline
			void destroy(session *p) {
				allocator_type m_allocator;
				m_allocator.destroy(p);
				m_allocator.deallocate(p);
			}
		};
		
		typedef session_factory factory_type;
		typedef boost::intrusive_ptr<session> pointer;
	
	public:
		session(boost::asio::io_service &ios,
			task_runner &runner)
			: m_task_runner(runner)
			, m_socket(ios)
			, m_on_error_handler(boost::bind(&session::handle_error,
							   this)) {}

		session(on_error_handler_type on_error_handler,
			boost::asio::io_service &ios,
			task_runner &runner)
			: m_task_runner(runner)
			, m_socket(ios)
			, m_on_error_handler(on_error_handler) {}

		~session() {
			LOG(INFO, "session::~session connection closed");

			m_on_error_handler(/*m_error*/);
		}

		void start() {
			async_read_request();
		}

		// stop后异步操作就会出错，则会调用on_error_handler
		void stop() {
			boost::system::error_code ignored_ec;
			m_socket.shutdown(boost::asio::ip::tcp::socket::shutdown_both, ignored_ec);
			m_socket.close();
		}

		bool is_open() {
			return m_socket.is_open();
		}

		boost::asio::ip::tcp::socket &socket() {
			return m_socket;
		}

		void async_read_request() {
			request::pointer new_req(request::factory_type::construct());
			boost::asio::async_read(m_socket,
						new_req->get_header_buffer(),
						boost::bind(&session::handle_request_body,
							    pointer(this),
							    new_req,
							    boost::asio::placeholders::error,
							    boost::asio::placeholders::bytes_transferred));
		}

		void async_write_response(response::pointer resp) {
			boost::asio::async_write(m_socket,
						 resp->buffer(),
						 boost::bind(&session::handle_response,
							     pointer(this),
							     resp,
							     boost::asio::placeholders::error,
							     boost::asio::placeholders::bytes_transferred));
		}

		// 注意：handler中必须持有resp的指针，才能保证resp在写完后才会释放！
		template<typename Handler>
		void async_write_response(response::pointer resp, Handler h) {
			boost::asio::async_write(m_socket,
						 resp->buffer(),
						 h);
		}

		std::size_t write(response::pointer resp,
				  boost::system::error_code &ec) {
			boost::mutex::scoped_lock lock(m_mutex);
			return write_no_lock(resp, ec);
		}

	protected:
		std::size_t write_no_lock(response::pointer resp,
					  boost::system::error_code &ec) {
			return boost::asio::write(m_socket,
						  resp->buffer(),
						  boost::asio::transfer_all(),
						  ec);
		}

		void handle_request_body(request::pointer req,
					 const boost::system::error_code& error,
					 std::size_t bytes_transferred) {
			if(error)	// has error
			{
				LOG(DEBUG, "receive request header failed, ec = "
				    << error << " - " << error.message());
				return;
			}
			//assert(bytes_transferred != header::SIZE)
			if(! req->check_header())
			{
				LOG(ERROR, "session::handle_request_body check header failed: "
				    << req->get_header().to_string() << " - "
				    << dump_string_hex(req->get_header_buffer()));
				//error_code::make_error(error_code::invalid_header);
				return;
			}
		
			// if header checking passed, body buffer will be
			// allocated automatically
			boost::asio::async_read(m_socket,
						req->get_body_buffer(),
						boost::bind(&session::handle_request,
							    pointer(this),
							    req,
							    boost::asio::placeholders::error,
							    boost::asio::placeholders::bytes_transferred));
		}

		void handle_request(request::pointer req,
				    const boost::system::error_code& error,
				    std::size_t bytes_transferred);

		void handle_response(response::pointer resp,
				     const boost::system::error_code& error,
				     std::size_t bytes_transferred) {
			if(error)
			{
				LOG(ERROR, "session::handle_response " << error.message());
				return;
			}
			// send ok, do nothing
			// resp will be released
		}

		// default on_error_handler
		void handle_error(/*const boost::system::error_code &error*/) {
			//LOG(ERROR, "session::handle_error default error handler");
			// << error.message());
		}

	private:
		task_runner &m_task_runner;
		boost::asio::ip::tcp::socket m_socket;
		
		boost::mutex m_mutex; // 用于同步写的时候加锁

		//boost::system::error_code m_error; // 失败时的错误码及回高调函数
		on_error_handler_type m_on_error_handler;

		reference_counter m_reference_counter;
		INTRUSIVE_PTR_HOOKS(session, m_reference_counter, factory_type);
	};

	/// A pool of io_service objects.
	class io_service_pool
		: private boost::noncopyable
	{
	private:
		typedef boost::shared_ptr<boost::asio::io_service> io_service_ptr;
		typedef boost::shared_ptr<boost::asio::io_service::work> work_ptr;

	public:
		/// Construct the io_service pool.
		io_service_pool(const std::size_t pool_size)
			: m_next_io_service(0) {
			if (pool_size == 0)
			{
				throw std::runtime_error("io_service_pool size is 0");
			}

			for(std::size_t i = 0; i < pool_size; ++i)
			{
				io_service_ptr io_service(new boost::asio::io_service);
				work_ptr work(new boost::asio::io_service::work(*io_service));
				m_io_services.push_back(io_service);
				m_work.push_back(work);
			}
		}

		/// Run all io_service objects in the pool.
		void start() {
			for(std::size_t i = 0; i < m_io_services.size(); ++i)
			{
				m_threads.create_thread(boost::bind(&boost::asio::io_service::run, m_io_services[i]));
			}
		}

		/// Stop all io_service objects in the pool.
		void stop() {
			for(std::size_t i = 0; i < m_io_services.size(); ++i)
			{
				m_io_services[i]->stop();
			}
		}

		void join_all() {
			m_threads.join_all();
		}

		/// Get an io_service to use.
		boost::asio::io_service& get_io_service() {
			// Use a round-robin scheme to choose the next io_service to use.
			boost::asio::io_service& io_service = *m_io_services[m_next_io_service];
			++ m_next_io_service;
			if(m_next_io_service == m_io_services.size())
				m_next_io_service = 0;
			return io_service;
		}

	private:
		/// The pool of io_services.
		std::vector<io_service_ptr> m_io_services;

		/// The work that keeps the io_services running.
		std::vector<work_ptr> m_work;

		/// The next io_service to use for a connection.
		std::size_t m_next_io_service;

		boost::thread_group m_threads;
	};

	class task
	{
	public:
		task() {}

		task(session::pointer s,
		     request::pointer req)
			: m_session(s)
			, m_request(req) {}
	
		void run();

	protected:
		// void do_scan_open();
		// ...

	private:
		session::pointer m_session;
		request::pointer m_request;
		response::pointer m_response;
	};

	struct task_runner
		: private boost::noncopyable
	{
		std::queue<task> m_queue;
		bool m_stop;
		std::size_t m_counter;
		boost::mutex m_mutex;
		boost::condition_variable m_cond;

		const std::size_t m_thread_number;
		boost::thread_group m_threads;

		task_runner(const std::size_t n_thread)
			: m_stop(false)
			, m_counter(0)
			, m_thread_number(n_thread) {}

		void start() {
			for(std::size_t i = 0; i < m_thread_number; ++i)
			{
				m_threads.create_thread(boost::bind(&task_runner::runner, this));
			}
		}

		void stop() {
			{
				boost::mutex::scoped_lock lock(m_mutex);
				m_stop = true;
			}
			// 释放锁后再通知
			m_cond.notify_all();
		}

		void add_task(task t);

		void join_all() {
			m_threads.join_all();
		}

		void runner();
	};

	template<typename io_service_provider>
	inline
	boost::asio::io_service &get_io_service(io_service_provider &provider);

	template<>
	inline
	boost::asio::io_service &get_io_service(io_service_pool &provider) {
		return provider.get_io_service();
	}

	template<>
	inline
	boost::asio::io_service &get_io_service(boost::asio::io_service &provider) {
		return provider;
	}

	template<typename io_service_provider>
	class service
	{
	public:
		service(const std::string &ip,
			const std::string &port,
			io_service_provider &provider,
			task_runner &runner)
			: m_task_runner(runner)
			, m_io_service_provider(provider)
			, m_acceptor(get_io_service(m_io_service_provider))
			, m_ip(ip)
			, m_port(port) {}

		void start() {
			boost::asio::ip::tcp::resolver resolver(m_acceptor.get_io_service());
			boost::asio::ip::tcp::resolver::query query(m_ip, m_port);
			boost::asio::ip::tcp::endpoint endpoint = *resolver.resolve(query);
			m_acceptor.open(endpoint.protocol());
			m_acceptor.set_option(boost::asio::ip::tcp::acceptor::reuse_address(true));
			m_acceptor.bind(endpoint);
			m_acceptor.listen();

			async_accept();
		}

		void stop() {
			m_acceptor.close();
		}

	protected:
		void async_accept() {
			session::pointer new_session(session::factory_type::construct(get_io_service(m_io_service_provider),
										      m_task_runner));
			m_acceptor.async_accept(new_session->socket(),
						boost::bind(&service::handle_accept,
							    this,
							    new_session,
							    boost::asio::placeholders::error));
		}
	
		void handle_accept(session::pointer new_session,
				   const boost::system::error_code& error) {
			if(error)
			{
				LOG(ERROR, "service::handle_accept " << error.message());
				return;
			}

			LOG_ENV(boost::system::error_code ec_ignore);
			LOG(INFO, "service::handle_accept new session accepted "
			    << new_session->socket().remote_endpoint(ec_ignore) << " --- "
			    << new_session->socket().local_endpoint(ec_ignore));
			
			new_session->start();
			async_accept();
		}

	private:
		task_runner &m_task_runner;
		io_service_provider &m_io_service_provider;
		boost::asio::ip::tcp::acceptor m_acceptor;
		const std::string m_ip;
		const std::string m_port;
	};

	class master_session
	{
	public:
		void register_session(session::pointer s) {
			LOG(TRACE, "master_session::register_session connected to master");

			boost::mutex::scoped_lock lock(m_mutex);
			m_session = s;
			// // register_heartbeat(s);
			// // timer要与session有一致的io_service
			// m_heartbeat_timer.reset(new boost::asio::deadline_timer(s->socket().get_io_service(),
			// 							boost::posix_time::seconds(3))); // 延迟3秒发，以免抢到report之前
			// m_heartbeat_timer->async_wait(boost::bind(&master_session::heartbeat,
			// 					 this,
			// 					 s,
			// 					 boost::asio::placeholders::error));
			s->start();
		}

		// void async_heartbeat(response::pointer msg) {
		// 	s->async_write_response(msg,
		// 				boost::bind(&master_session::handle_heartbeat,
		// 					    this,
		// 					    s,
		// 					    msg, // 必须保存该指针
		// 					    boost::asio::placeholders::error));
		// }

		int heartbeat(response::pointer msg) {
			return write(msg);
		}

		int write(response::pointer msg) {
			boost::mutex::scoped_lock lock(m_mutex);
			if(! m_session)
			{
				LOG(WARN, "session not exists");
				return -1;
			}
			else if(! m_session->is_open())
			{
				LOG(WARN, "session is closed, reset it");
				clear_session();
				return -1;
			}
			else
			{
				boost::system::error_code ec;
				m_session->write(msg, ec);
				if(ec)
				{
					LOG(ERROR, "write message failed, msg = "
					    << msg->get_header().to_string() << ", ec = "
					    << ec.message());
					clear_session();
					return -1;
				}
			}
			return 0;
		}

		// // 要不停的检测是否打开，一旦关闭了，就调用clear_session，将该session清空，使其用新的session重新连接
		// void clear_session_if_closed() {
		// 	boost::mutex::scoped_lock lock(m_mutex);
		// 	if(! m_session) // 已经不存在了
		// 	{
		// 		LOG(WARN, "master_session::clear_session_if_closed session not exists");
		// 		return;
		// 	}
		// 	if(! m_session->is_open())
		// 	{
		// 		LOG(WARN, "master_session::clear_session_if_closed session is closed, reset it");
		// 		clear_session();
		// 	}
		// }

		// void async_write(response::pointer resp) {
		// 	boost::mutex::scoped_lock lock(m_mutex);
		// 	if(! m_session)
		// 	{
		// 		LOG(FATAL, "master_session::async_write no active session");
		// 		return;
		// 	}
		// 	m_session->async_write_response(resp,
		// 					boost::bind(&master_session::handle_write,
		// 						    this,
		// 						    resp, // 必须保存该指针
		// 						    boost::asio::placeholders::error));
		// }

		// // 注意：handler中必须持有resp的指针，才能保证resp在写完后才会释放！
		// template<typename Handler>
		// void async_write(response::pointer resp, Handler h) {
		// 	boost::mutex::scoped_lock lock(m_mutex);
		// 	if(! m_session)
		// 	{
		// 		LOG(FATAL, "master_session::async_write no active session");
		// 		return;
		// 	}
		// 	m_session->async_write_response(resp, h);
		// }

	protected:
		// // 该函数由其它地方回调，不在该类内使用，所以需要加锁
		// void heartbeat(session::pointer s,
		// 	       const boost::system::error_code &error) {
		// 	if(!error)
		// 	{
		// 		response::pointer msg(get_heartbeat_message());
		// 		s->async_write_response(msg,
		// 					boost::bind(&master_session::handle_heartbeat,
		// 						    this,
		// 						    s,
		// 						    msg, // 必须保存该指针
		// 						    boost::asio::placeholders::error));
		// 	}
		// 	else
		// 	{
		// 		LOG(ERROR, "master_session::heartbeat " << error.message());
		// 		clear_session(); // TODO: 这个错误也关闭连接，合适吗
		// 	}
		// }

		// // 该函数由其它地方回调，不在该类内使用，所以需要加锁
		// void handle_heartbeat(session::pointer s,
		// 		      response::pointer msg,
		// 		      const boost::system::error_code &error);

		// void handle_write(response::pointer resp, // 该参数保证响应在写完后才释放
		// 		  const boost::system::error_code &error) {
		// 	(void)resp;
		// 	if(error)
		// 	{
		// 		LOG(WARN, "master_session::handle_write " << error.message());
		// 		return;
		// 	}
		// }

		// response::pointer get_heartbeat_message();

		void clear_session() {
			m_session.reset(0);
			//m_heartbeat_timer.reset();
		}

	private:
		session::pointer m_session;
		//boost::shared_ptr<boost::asio::deadline_timer> m_heartbeat_timer;
		boost::mutex m_mutex;
	};

	void send_report_helper(session::pointer new_session,
				bool *report_flag);
	void register_session_helper(session::pointer new_session);

	template<typename io_service_provider>
	class client
	{
		enum { MIN_RETRY_WAIT_SECONDS = 3 };
	public:
		client(const std::string &ip,
		       const std::string &port,
		       io_service_provider &provider,
		       task_runner &runner,
		       std::size_t retry_wait_seconds = 0)
			: m_task_runner(runner)
			, m_io_service_provider(provider)
			, m_ip(ip)
			, m_port(port)
			, m_timer(get_io_service(provider))
			, m_stop(false)
			, m_retry_wait_seconds(retry_wait_seconds)
			, m_new_retry_wait_seconds((std::max<std::size_t>)(retry_wait_seconds,
									 MIN_RETRY_WAIT_SECONDS))
			, m_report_is_sent(false) {}

		void start() {
			async_connect();
		}

		void stop() {
			m_stop = true;
			m_timer.cancel();
		}

	protected:
		void async_connect() {
			// 设置该session断开后，调用重连函数
			session::pointer new_session(session::factory_type::construct(boost::bind(&client::async_wait_and_connect, this),
										      get_io_service(m_io_service_provider),
										      m_task_runner));
			boost::asio::ip::tcp::socket &new_socket = new_session->socket();

			boost::asio::ip::tcp::resolver resolver(new_socket.get_io_service());
			boost::asio::ip::tcp::resolver::query query(m_ip, m_port);
			boost::asio::ip::tcp::endpoint endpoint = *resolver.resolve(query);

			new_socket.async_connect(endpoint,
						 boost::bind(&client<io_service_provider>::handle_connect,
							     this,
							     new_session,
							     boost::asio::placeholders::error));
		}

		void async_wait_and_connect() {
			LOG(INFO, "client::async_wait_and_connect");
			if(! m_stop)
			{
				if(m_retry_wait_seconds != 0)
				{
					m_timer.expires_from_now(boost::posix_time::seconds(m_new_retry_wait_seconds));
					if(m_new_retry_wait_seconds > MIN_RETRY_WAIT_SECONDS)
						--m_new_retry_wait_seconds;
					m_timer.async_wait(boost::bind(&client<io_service_provider>::async_connect,
								       this));
				}
			}
		}
		
		void handle_connect(session::pointer new_session,
				    const boost::system::error_code& error) {
			if(error)
			{
				LOG(ERROR, "client::handle_connect " << error.message() << ", retrying connect");
				// new_session的析构中，会调用重连回调函数
				return;
			}

			// 要先注册session才行！
			register_session_helper(new_session);

			m_new_retry_wait_seconds = (std::max<std::size_t>)(m_retry_wait_seconds,
									   MIN_RETRY_WAIT_SECONDS); // success, restore

			if(! m_report_is_sent)
			{
				send_report_helper(new_session, &m_report_is_sent);
			}
		}

		// void handle_report(const boost::system::error_code &error) {
		// 	if(! error)
		// 	{
		// 		m_report_is_sent = true;
		// 	}
		// 	else
		// 	{
		// 		LOG(ERROR, "client::handle_report " << error.message());
		// 	}
		// }

	private:
		task_runner &m_task_runner;
		io_service_provider &m_io_service_provider;
		const std::string m_ip;
		const std::string m_port;
		boost::asio::deadline_timer m_timer;
		bool m_stop;
		const std::size_t m_retry_wait_seconds; // == 0, no retry
		std::size_t m_new_retry_wait_seconds;
		bool m_report_is_sent;
	};

	class application
	{
	public:
		application(std::size_t thread_number,
			    std::size_t io_service_number,
			    const std::string &server_ip,
			    const std::string &server_port,
			    const std::string &client_ip,
			    const std::string &client_port,
			    std::size_t retry_wait_seconds = 0)
			: m_task_runner(thread_number)
			, m_io_service_pool(io_service_number)
			, m_server(server_ip,
				   server_port,
				   m_io_service_pool,
				   m_task_runner)
			, m_client(client_ip,
				   client_port,
				   m_io_service_pool,
				   m_task_runner,
				   retry_wait_seconds) {}

		// ~application() {
		// 	stop();
		// 	join_all();
		// }

		void start() {
			m_task_runner.start();
			m_io_service_pool.start();
			m_server.start();
			m_client.start();
		}

		void stop() {
			m_task_runner.stop();
			m_io_service_pool.stop();
			m_server.stop();
			m_client.stop();
		}

		void join_all() {
			m_task_runner.join_all();
			m_io_service_pool.join_all();
		}

	private:
		task_runner m_task_runner;
		io_service_pool m_io_service_pool;
		service<io_service_pool> m_server;
		client<io_service_pool> m_client;
	};
}

#endif	// _XBASE_SERVICE_HPP_
