/*      
 *      file:   prepare.cpp 
 *      author: liujianping@snda.com
 *      description:
 *      usage:
 *              prepare -b <bind.ip.for.delivery> -o </path/of/configure> [-d]
 *              -b: bind ip for delivery
 *              -o: configure file path
 *              -d: daemon mode
 * 
 *      configure format:
 *      ----------------------------------------------------------------------
 *              dispatcher=host:port
 *              memached=host:port
 *              currency=10
 *      
       protocol format (dispatcher as server):
 *      ----------------------------------------------------------------------        

        tcp header struct definition:

 *          magic:   edm_uint8_t;  //! 幻数
            command: edm_uint8_t;  //! 命令编号
            len:     edm_uint32_t; //! json len
            reserve: edm_uint64_t; //! 主要用于任务编号的存储

        ========= gloable ======================================

        1. [s -> c] request status 
           command: 1
           len: 0
           reserve: 0 

        2. [c -> s] reponse status 
           command: 2
           len: body len
           reserve: 0 
           prepare body: {"prepare_id": "bind.ip.address.", "status":"ok"}
 *         prepare body: {"prepare_id": "bind.ip.address.", "status":"full"}
           msp body:     {"msp":"qq.com", "block":true, "schedule":12}    
 * 
        3. [c -> s] block msp notification  
           command: 3
           len: body len
           reserve: 0 
           body: {"block":["qq.com"]}

        4. [c -> s] unblock msp notification  
           command: 4
           len: body len
           reserve: 0 
           body: {"unblock":["qq.com"]}

        ========= task ======================================

        11. [s -> c] task delivery request
           command: 11
           len: body len
           reserve: taskid 
           body: {"from":{"name":"", "addr":""}, "to":"", "top":"", "bottom":"", "replace":["",""]}

        12. [c -> s] msp delivery statistics
           command: 12
           len: body len
           reserve: taskid 
           body: {"msp":"163.com", "delivery":[{"aa@sina.com":0}, {"bb@sina.com":1}]}

        13. [c -> s] task template missed at memcached notification
           command: 13
           len: body len
           reserve: taskid 

        14. [s -> c] task delivery pause
           command: 14
           len: body len
           reserve: taskid 

        15. [s -> c] task delivery start
           command: 15
           len: body len
           reserve: taskid 

        16. [s -> c] task delivery delete
           command: 16
           len: body len
           reserve: taskid 
  
 *              
 *      ----------------------------------------------------------------------          
 *      
 *      protocol format (memcache): memcache protocol
 */
#include "prepare.h"
#include "heart_beat_service.h"
#include "daemon.h"
#include "signal_helper.h"
#include "conf_parser.h"
#include "prepare_service.h"
#include "delivery_mimer.h"
#include "delivery_invoker.h"
#include "delivery_cleaner.h"
#include "log.h"
#include <string>
#include <vector>
#include <iostream>

#include <signal.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <signal.h>

const char* bind_host = 0;
const char* option = 0;
int deamonize = 0;
int stop_flag = 0;

static void msg_info(const char* fmt, ...)
{
    static char msg[1024];
    va_list va;
    va_start(va, fmt);
    vsnprintf(msg, 1023, fmt, va);
    va_end(va);
    msg[1023] = '\0';
    cout << msg <<endl;
}

static void msg_exit(int code, const char* fmt, ...)
{
    static char msg[1024];
    va_list va;
    va_start(va, fmt);
    vsnprintf(msg, 1023, fmt, va);
    va_end(va);
    msg[1023] = '\0';
    cout << msg <<endl;
    exit(code);
}

void sig_child(int sig)
{
    int status;
    pid_t pid = waitpid(-1, &status, WNOHANG);
    if(pid > 0)
    {
        edm::common::singleton<edm::prepare::delivery_cleaner_t>::Instance().clear(pid, status);
    }
    //!msg_info("signal catch >>> sig_child callback: %d", sig);
    //!edm::common::singleton<edm::prepare::delivery_cleaner_t>::Instance().trigger_wait_nohang();
}

void sig_terminal(int sig)
{
    msg_info("signal catch >>> sig_terminal callback: %d", sig);
    stop_flag = 1;
}
 
//! Usage: prepare -b <bind.ip.for.delivery> -O </path/of/configure> [-d]
int main(int argc, char** argv)
{
    if(argc < 5) msg_exit(-1, "usage: prepare -b <bind.ip.for.delivery> -O </path/of/configure> [-d]\n");

    //! options analyse
    char ch;
    while ((ch = getopt(argc, argv, "b:O:d")) != EOF) {
        switch (ch) {
            case 'b':
                bind_host = optarg;
                break;           
            case 'O':
                option = optarg;
                break;
            case 'd':
                deamonize = 1;
                break;
            case '?':
                msg_exit(-1, "prepare unknown options\n\n"
                             "usage: prepare -b <bind.ip.for.delivery> -O </path/of/configure> [-d]\n");
        }
    }
    
    //! daemonize
    singleton<Log>::Instance().enable_print_screen(true);
    singleton<Log>::Instance().enable_print_file(true);
    if(deamonize)
    {
        edm::common::daemon_t daemon;
        daemon.daemonize();
        singleton<Log>::Instance().enable_print_screen(false);
    }
    
    //! parse configure
    edm::common::conf_parser_t conf_parser('=');
    if(conf_parser.open(option))
        msg_exit(-1, "prepare open conf <%s> failed\n", option);
    
    int                         int_value;
    std::string                 str_value;
    std::vector<std::string>    vec_value;
    
    //! log setting
    if(conf_parser.get_int_value("LOG_ENABLE", int_value, 0))
        msg_exit(-1, "Prepare Confiure LOG_ENABLE read failed\n");

    if(!int_value) goto prepare_settings;

    singleton<Log>::Instance().set_path(str_value.c_str());
    if(conf_parser.get_string_value("LOG_PATH", str_value, str_value))
        msg_exit(-1, "Prepare Confiure LOG_PATH read failed\n");
    singleton<Log>::Instance().set_path(str_value.c_str());

    if(conf_parser.get_string_value("LOG_FILE", str_value, str_value))
        msg_exit(-1, "Prepare Confiure LOG_FILE read failed\n");
    singleton<Log>::Instance().set_filename(str_value.c_str());

    if(conf_parser.get_int_value("LOG_LEVEL", int_value, 4))
        msg_exit(-1, "Prepare Confiure LOG_FILE read failed\n");
    if( int_value >=1 ) singleton<Log>::Instance().enable_log_level(LOG_FLAG(LF_FATAL), true);
    if( int_value >=2 ) singleton<Log>::Instance().enable_log_level(LOG_FLAG(LF_ERROR), true);
    if( int_value >=3 ) singleton<Log>::Instance().enable_log_level(LOG_FLAG(LF_WARN), true);
    if( int_value >=4 ) singleton<Log>::Instance().enable_log_level(LOG_FLAG(LF_INFO), true);
    if( int_value >=5 ) singleton<Log>::Instance().enable_log_level(LOG_FLAG(LF_TRACE), true);
    if( int_value >=6 ) singleton<Log>::Instance().enable_log_level(LOG_FLAG(LF_DEBUG), true);

    if(conf_parser.get_vector_value("LOG_MODULE", vec_value))
        msg_exit(-1, "Prepare Confiure LOG_MODULE read failed\n");
    for(size_t i = 0; i < vec_value.size(); i++)
    {
        singleton<Log>::Instance().enable_log_module(vec_value[i].c_str(), true);
    }
    singleton<Log>::Instance().open();

    
    //! prepare settings
 prepare_settings:
    
    std::string udp_host;
    std::string dispatcher_host, dispatcher_port;
    std::string memcached_host, memcached_port;
    std::string maildrop_path, maildrop_conf;
    std::string msp_control;
    int msp_concurrency, maildrop_concurrency;

    //! udp log
    if(conf_parser.get_string_value("UDP_HOST", udp_host, udp_host))
    msg_exit(-1, "Prepare Confiure DISPATCHER_HOST read failed\n");

    //! dispatch settings
    if(conf_parser.get_string_value("DISPATCHER_HOST", dispatcher_host, dispatcher_host))
        msg_exit(-1, "Prepare Confiure DISPATCHER_HOST read failed\n");

    if(conf_parser.get_string_value("DISPATCHER_PORT", dispatcher_port, dispatcher_port))
        msg_exit(-1, "Prepare Confiure DISPATCHER_PORT read failed\n");

    //! msp settings
    if(conf_parser.get_int_value("MSP_CONCURRENCY", msp_concurrency, MSP_DEFAULT_CURRENCY))
        msg_exit(-1, "Prepare Confiure MSP_CONCURRENCY read failed\n");

    //! maildrop settings
    if(conf_parser.get_int_value("MAILDROP_CONCURRENCY", maildrop_concurrency, MAILDROP_DEFAULT_CURRENCY))
        msg_exit(-1, "Prepare Confiure MSP_CONCURRENCY read failed\n");
    if(conf_parser.get_string_value("MAILDROP_PATH", maildrop_path, maildrop_path))
        msg_exit(-1, "Prepare Confiure MAILDROP_PATH read failed\n");
    if(conf_parser.get_string_value("MAILDROP_CONF", maildrop_conf, maildrop_conf))
        msg_exit(-1, "Prepare Confiure MAILDROP_CONF read failed\n");

    //! memcache settings
     if(conf_parser.get_string_value("MEMCACHE_HOST", memcached_host, memcached_host))
        msg_exit(-1, "Prepare Confiure MEMCACHE_HOST read failed\n");

    if(conf_parser.get_string_value("MEMCACHE_PORT", memcached_port, memcached_port))
        msg_exit(-1, "Prepare Confiure MEMCACHE_PORT read failed\n");       
    
    if(conf_parser.get_string_value("MSP_CONTROL", msp_control, msp_control))
        msg_exit(-1, "Prepare Confiure MEMCACHE_PORT read failed\n");   
    
    std::vector<std::string> blocks;
    if(conf_parser.get_vector_value("MSP_BLOCK", blocks))
        msg_exit(-1, "Prepare Confiure MSP_BLOCK read failed\n");   

    edm::common::conf_parser_t msp_control_conf(':');
    if(msp_control_conf.open(msp_control.c_str()))
        msg_exit(-1, "msp_control_conf open conf <%s> failed\n", msp_control.c_str());
    
/* main_entry */
    //! signal
    edm::common::signal_helper_t::sig_ignore(SIGPIPE);
    edm::common::signal_helper_t::sig_catch(SIGCHLD, sig_child);           
    edm::common::signal_helper_t::sig_catch(SIGINT,  sig_terminal);           
    edm::common::signal_helper_t::sig_catch(SIGTERM, sig_terminal);           
    
    try
    {
        //! prepare service
        edm::prepare::prepare_service_t prepare_service(bind_host, dispatcher_host, dispatcher_port, msp_control);
        prepare_service.block(blocks);
        
        //! delivery cleaner
        if(edm::common::singleton<edm::prepare::delivery_cleaner_t>::Instance().start(
                boost::bind(&edm::prepare::prepare_service_t::callback_delivery, &prepare_service, _1),
                boost::bind(&edm::prepare::prepare_service_t::callback_delivery, &prepare_service, _1)))
            msg_exit(-1, "delivery cleaner started failed\n");

        //! delivery invoker
        edm::prepare::delivery_invoker_t delviery_invoker(bind_host, udp_host.c_str(), maildrop_concurrency);
        if(delviery_invoker.start(maildrop_path,
                                  maildrop_conf,
                                  boost::bind(&edm::prepare::delivery_cleaner_t::delivery_clean,
                                              &edm::common::singleton<edm::prepare::delivery_cleaner_t>::Instance(),
                                               _1, _2),
                                  boost::bind(&edm::prepare::prepare_service_t::callback_delivery, &prepare_service, _1)))
            msg_exit(-1, "delivery invoker started failed\n");

        //! delivery mimer
        edm::prepare::delivery_mimer_t delivery_mimer(memcached_host, memcached_port);
        if(delivery_mimer.start(bind_host,
                                boost::bind(&edm::prepare::delivery_invoker_t::maildrop, 
                                            &delviery_invoker, _1),
                                boost::bind(&edm::prepare::prepare_service_t::prepare_missed_temp,
                                            &prepare_service, _1),
                                boost::bind(&edm::prepare::prepare_service_t::callback_delivery, &prepare_service, _1)))
            msg_exit(-1, "delivery mimer started failed\n");

        //! prepare service
        if(prepare_service.start(boost::bind(&edm::prepare::delivery_mimer_t::cook_delivery,
                                          &delivery_mimer, _1)))
            msg_exit(-1, "prepare service started failed\n");

        /*
        if(edm::common::singleton<edm::prepare::task_timeout_service_t>::Instance().set_callback_function(
                                boost::bind(&edm::prepare::prepare_service_t::task_timeout, 
                                            &prepare_service, _1)))
             msg_exit(-1, "prepare task_timeout_service_t set callback failed\n");
        
        if(edm::common::singleton<edm::prepare::task_timeout_service_t>::Instance().set_timeout(true, 900))
             msg_exit(-1, "prepare task_timeout_service_t set timeout failed\n");
        
        if(edm::common::singleton<edm::prepare::task_timeout_service_t>::Instance().set_max_limit(true, 100))
             msg_exit(-1, "prepare task_timeout_service_t set max limit failed\n");

        if(edm::common::singleton<edm::prepare::task_timeout_service_t>::Instance().start())
             msg_exit(-1, "prepare task_timeout_service_t start failed\n");
        */
        
        //! waiting for signal
        //! while(!stop_flag) pause();

        while(!stop_flag)
        {
            sleep(60);
            int status;
            pid_t pid;
            while((pid = waitpid(-1, &status, WNOHANG)) > 0)
            {
                edm::common::singleton<edm::prepare::delivery_cleaner_t>::Instance().clear(pid, status);
            }

        }
        
        delivery_mimer.stop();
        delviery_invoker.stop();
        edm::common::singleton<edm::prepare::delivery_cleaner_t>::Instance().stop();
        prepare_service.stop();
    }
    catch(exception& e)
    {
        msg_exit(-1, "prepare <%s> excption exit:<%s>", bind_host, e.what());
    }
    
    return 0;
}
