#!/usr/bin/env python
# -*- coding: utf-8 -*-

import os
import sys
import time
import signal
from tornado import stack_context, ioloop
from tornado.process import PipeIOStream

from tornado import gen
import tornado.process
import subprocess

import functools
import multiprocessing
import datetime

from pysigset import suspended_signals
from pysigset import SIG_SETMASK, sigsuspend, sigdelset, sigprocmask, SIGSET
from ConfigParser import ConfigParser

from daemonize import Daemon

from dm_py_util import INIT_LOG, ERROR_LOG, DEBUG_LOG, INFO_LOG, WARN_LOG


class Worker(object):

    def __init__(self, pip, config, sockets, server, block_threshold=None):
        self.pip = pip
        self.block_threshold = block_threshold or 1
        set_nonblocking(self.pip)

        self.config = config

        self.proc = multiprocessing.Process(target=self.__process)
        self.sockets = sockets
        self.server = server
        if ioloop.IOLoop.initialized():
            DEBUG_LOG('_process ioloop', ioloop.IOLoop.current())
            raise RuntimeError("Cannot run in multiple processes: IOLoop instance "
                               "has already been initialized. You cannot call "
                               "IOLoop.instance() before calling start_processes()")

    def __communicate_process(self):
        def pipe_recv(fd, event):
            q = fd.recv()
            DEBUG_LOG('pipe recv ', q, fd, fd.fileno(), self.pid())
            if q == 'q':
                self._uninit()
                sys.exit(0)
            elif q == 's':
                self.uninit()

        ioloop.IOLoop.current().add_handler(self.pip, pipe_recv, ioloop.IOLoop.READ)

    def init_server(self, config):
        if self.sockets and self.server:
            self.server.add_sockets(self.sockets)

    def __process(self):
        self.__communicate_process()
        self.init_server(self.config)

        self.init(self.config)
        self.start()
        ioloop.IOLoop.current().set_blocking_log_threshold(self.block_threshold)
        ioloop.IOLoop.current().start()

    def _uninit(self):
        self.uninit()
        if self.server:
            self.server.stop()
        ioloop.IOLoop.current().stop()
        DEBUG_LOG('uninit........')

    def _start(self):
        self.proc.start()

    def init(self, config):
        pass

    def start(self):
        pass

    def uninit(self):
        pass

    def is_running(self):
        return self.proc.is_alive()

    def join(self):
        self.proc.join()

    def pid(self):
        return self.proc.pid


class Master(object):

    def __init__(self, worker=Worker, main_config=''):

        self._has_child_down = False
        self._go_to_quit = False
        self._go_to_term = False
        self._go_to_reload_workers = False
        self._go_to_stop_workers = False
        self._send_all_quit = False
        self._worker = worker
        #master config file
        signal.signal(signal.SIGCLD, self._sig_child_handler)
        signal.signal(signal.SIGQUIT, self._sig_quit_handler)
        signal.signal(signal.SIGTERM, self._sig_term_handler)
        signal.signal(signal.SIGHUP, self._sig_hup_handler)
        signal.signal(signal.SIGUSR1, self._sig_usr1_handler)
        signal.signal(signal.SIGUSR2, self._sig_usr2_handler)

        self._master_config = ''
        self._worker_config = ''
        self._worker_num = 0
        self._force_kill_timeout = 0
        self._pid_file = ''
        self._log_config = ''
        self._error_log = ''
        self._listen_port = 0
        self._sockets = None
        self._server = None
        self.config = main_config

        self.access_log_file = ''
        self.app_log_file = ''
        self.gen_log_file = ''

        self._block_threshold = None

        config_parser = ConfigParser()
        config_parser.read(main_config)
        try:
            self.access_log_file = config_parser.get('Tornado', 'access_log')
            self.app_log_file = config_parser.get('Tornado', 'app_log')
            self.gen_log_file = config_parser.get('Tornado', 'gen_log')
        except Exception as e:
            pass
        try:
            self._worker_num = config_parser.has_option('Server', 'worker_num') and \
                config_parser.getint('Server', 'worker_num')
            self._force_kill_timeout = config_parser.has_option('Server', 'force_kill_timeout') and \
                config_parser.getint('Server', 'force_kill_timeout')
            self._pid_file = config_parser.has_option('Server', 'pid_file') and \
                config_parser.get('Server', 'pid_file')
            self._error_log =  config_parser.has_option('Server', 'error_log') and \
                config_parser.get('Server', 'error_log')
            self._log_config = config_parser.has_option('Server', 'log_config') and \
                config_parser.get('Server', 'log_config')
            self._master_config = config_parser.has_option('Server', 'master_config') and \
                config_parser.get('Server', 'master_config')
            self._worker_config = config_parser.has_option('Server', 'worker_config') and \
                config_parser.get('Server', 'worker_config')
            self._listen_port = config_parser.has_option('Server', 'listen_port') and \
                config_parser.get('Server', 'listen_port')
            self._block_threshold = config_parser.has_option('Server', 'block_threshold') and \
                int(config_parser.get('Server', 'block_threshold'))
        except:
            pass
        finally:
            self._worker_num = self._worker_num or 1
            self._force_kill_timeout = self._force_kill_timeout or 3
            self._log_config = self._log_config or ''
            self._master_config = self._master_config or ''
            self._worker_config = self._worker_config or ''

        #on quit, force to quit timeout
        self._workers = []
        self._pips = []
        self._daemon = Daemon(stderr=self._error_log, pidfile=self._pid_file)

    def _sig_child_handler(self, signum, frame):
        self._has_child_down = True

    def _sig_quit_handler(self, signum, frame):
        self._go_to_quit = True

    def _sig_term_handler(self, signum, frame):
        self._go_to_term = True

    def _sig_hup_handler(self, signum, frame):
        self._go_to_reload_workers = True

    def _sig_usr1_handler(self, signum, frame):
        pass

    def _sig_usr2_handler(self, signum, frame):
        self._go_to_stop_workers = True

    def bind_sockets(self):
        if self._listen_port:
            from tornado import netutil
            self._sockets = netutil.bind_sockets(self._listen_port)
            self._server = self.init_server()

    def init_server(self):
        pass

    def log_init(self):
        import logging
        import tornado.log
        DATEFMT = '%Y/%m/%d %H:%M:%S'

        if self.app_log_file:
            app_log_handler = logging.FileHandler(self.app_log_file)
            app_log_handler.setFormatter(
                tornado.log.LogFormatter(datefmt=DATEFMT))
            tornado.log.app_log.addHandler(app_log_handler)
            tornado.log.enable_pretty_logging(logger=tornado.log.app_log)
        if self.gen_log_file:
            gen_log_handler = logging.FileHandler(self.gen_log_file)
            gen_log_handler.setFormatter(
                tornado.log.LogFormatter(datefmt=DATEFMT))
            tornado.log.gen_log.addHandler(gen_log_handler)
            tornado.log.enable_pretty_logging(logger=tornado.log.gen_log)
        if self.access_log_file:
            access_log_handler = logging.FileHandler(self.access_log_file)
            access_log_handler.setFormatter(
                tornado.log.LogFormatter(datefmt=DATEFMT))
            tornado.log.access_log.addHandler(access_log_handler)
            tornado.log.enable_pretty_logging(logger=tornado.log.access_log)

        try:
            INIT_LOG(self._log_config)
        except Exception, e:
            print >>sys.stderr, 'init log error %s\n' % self._log_config, e
            raise

    def init_master(self, config):
        pass

    def serve_forever(self):
        self.log_init()
        try:
            if self._listen_port:
                self.bind_sockets()
        except:
            print >>sys.stderr, 'master init error %s\n' % self._master_config
            raise

        self._daemon.run()
        self.init_master(self.config)
        old_sig_set = SIGSET()
        sigprocmask(SIG_SETMASK, 0, old_sig_set)
        sigdelset(old_sig_set, signal.SIGCLD)
        sigdelset(old_sig_set, signal.SIGQUIT)
        with suspended_signals(signal.SIGCLD, signal.SIGQUIT):  # 设置信号屏蔽字, 出作用域恢复屏蔽字
            self._create_workers()
            while 1:
                try:
                    sigsuspend(old_sig_set)  # 用信号集替换原来的屏蔽字. 返回的时候恢复屏蔽字
                except:
                    continue
                else:
                    if (self._go_to_term or self._go_to_quit) and not self._send_all_quit:
                        self._order_all_quit()
                        self._send_all_quit = True
                        if self._go_to_quit:
                            continue
                        else:
                            time.sleep(1)

                    if self._go_to_quit or self._go_to_term:
                        self._process_quit_signal(self._force_kill_timeout)
                        if self._go_to_term:
                            break
                        else:
                            print >>sys.stderr, 'self.workers num %d', len(
                                self._workers)
                            if not len(self._workers):
                                break
                    elif self._go_to_reload_workers:
                        self._order_all_quit()
                        self._go_to_reload_workers = False
                    elif self._go_to_stop_workers:
                        self._order_all_stop()
                    elif self._has_child_down:
                        self._restart_works()
                    else:
                        continue
        self._daemon.exit()

    def _order_all_quit(self):
        for pip in self._pips:
            DEBUG_LOG('order all quit send q to quit')
            pip.send('q')

    def _order_all_stop(self):
        for pip in self._pips:
            DEBUG_LOG('order all stop send s to quit')
            pip.send('s')

    def _create_workers(self):
        for i in xrange(self._worker_num):
            pip0, pip1 = multiprocessing.Pipe()
            worker = apply(self._worker, (pip1, self._worker_config,
                                          self._sockets, self._server, self._block_threshold))
            self._workers.append(worker)
            self._pips.append(pip0)
            worker._start()

    def _process_quit_signal(self, timeout):
        wait_to_quit_time = (
            1 if
            self._go_to_quit else
            self._force_kill_timeout)
        for i in xrange(wait_to_quit_time):
            for worker in self._workers:
                if not worker.is_running():
                    worker.join()
                    self._workers.remove(worker)
            if len(self._workers) and self._go_to_term:
                time.sleep(1)
            else:
                break
        #force kill
        if self._go_to_term:
            if len(self._workers):
                for worker in self._workers:
                    if worker.is_running():
                        os.kill(worker.pid(), 9)
                time.sleep(1)
                for worker in self._workers:
                    worker.join()

    def _restart_works(self):
        for i in xrange(len(self._workers)):
            if not self._workers[i].is_running():
                self._workers[i].join()
                pip0, pip1 = multiprocessing.Pipe()
                worker = apply(self._worker, (pip1, self._worker_config,
                                              self._sockets, self._server, self._block_threshold))
                self._workers[i] = worker
                self._pips[i] = pip0
                worker._start()
        self._has_child_down = False


def set_timeout(callback, deadline, *args, **kwargs):
    deadline = datetime.timedelta(milliseconds=deadline)
    return ioloop.IOLoop.current().add_timeout(deadline, stack_context.wrap(callback), *args, **kwargs)


def set_interval(callback, interval, *args, **kwargs):
    pc = ioloop.PeriodicCallback(functools.partial(
        stack_context.wrap(callback), *args, **kwargs), interval)
    pc.start()
    return pc


def set_nonblocking(fd):
    import fcntl
    flags = fcntl.fcntl(fd, fcntl.F_GETFL)
    fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)


@gen.coroutine
def call_subprocess(*args, **kwargs):
    """
    Wrapper around subprocess call using Tornado's Subprocess class.
    """
    stdin_data = kwargs.pop('stdin_data', None)
    stdin_async = kwargs.pop('stdin_async', False)

    stdin = tornado.process.Subprocess.STREAM if stdin_async else subprocess.PIPE
    kwargs['stdin'] = stdin
    kwargs['stdout'] = tornado.process.Subprocess.STREAM
    kwargs['stderr'] = tornado.process.Subprocess.STREAM

    sub_process = tornado.process.Subprocess(*args, **kwargs)

    if stdin_data:
        if stdin_async:
            yield gen.Task(sub_process.stdin.write, stdin_data)
        else:
            sub_process.stdin.write(stdin_data)

    if stdin_async or stdin_data:
        sub_process.stdin.close()

    result = yield sub_process.stdout.read_until_close()
    error = yield sub_process.stderr.read_until_close()

    raise gen.Return((result, error))


class SubProcessor():

    def __init__(self):
        self.sub_process = None
        self._cancel = False
        self._done = False
        self._pid = None

    @gen.coroutine
    def run(self, *args, **kwargs):
        stdin_data = kwargs.pop('stdin_data', None)
        stdin_async = kwargs.pop('stdin_async', False)

        stdin = tornado.process.Subprocess.STREAM if stdin_async else subprocess.PIPE
        kwargs['stdin'] = stdin
        kwargs['stdout'] = tornado.process.Subprocess.STREAM
        kwargs['stderr'] = tornado.process.Subprocess.STREAM

        self.sub_process = sub_process = tornado.process.Subprocess(
            *args, **kwargs)
        self._pid = sub_process.proc.pid

        if stdin_data:
            if stdin_async:
                yield gen.Task(sub_process.stdin.write, stdin_data)
            else:
                sub_process.stdin.write(stdin_data)

        if stdin_async or stdin_data:
            sub_process.stdin.close()

        result = yield sub_process.stdout.read_until_close()
        error = yield sub_process.stderr.read_until_close()
        try:
            p, s = os.waitpid(self._pid, os.WNOHANG)
            DEBUG_LOG('waitpid pid, status', p, s)
        except Exception as e:
            ERROR_LOG('waitpid error', e)

        self._done = True
        raise gen.Return((result, error))

    def cancel(self):
        if not self._done and self._pid and not self._cancel:
            self.sub_process.proc.kill()
            try:
                p, s = os.waitpid(self._pid, os.WNOHANG)
                DEBUG_LOG('waitpid pid, status', p, s)
            except Exception as e:
                ERROR_LOG('waitpid error', e)
            self._cancel = True

    def pid(self):
        return self._pid


if __name__ == '__main__':
    import time
    from multiprocessing import Manager
    mm = None
    task = None

    class TestMaster(Master):

        def init_master(self, config):
            global task
            global mm
            mm = Manager()
            task = mm.list([1, 2, 3])
            DEBUG_LOG('init master', config)

    class TestWorker(Worker):

        def init(self, config):
            DEBUG_LOG('TestWorker init.......', task)

        def start(self):
            DEBUG_LOG('TestWorker start......')
            set_interval(self.interval_event, 1000, 2, 3, 4, a=2, b=3)
            set_timeout(self.timeout_event, 10000, 2, 3, 4, a=2, b=3)

        def interval_event(self, *args, **kw):
            time.sleep(10)
            DEBUG_LOG('interval event', args, kw)

        def timeout_event(self, *args, **kw):
            DEBUG_LOG('timeout event', args, kw)
    master = TestMaster(main_config='./conf/master.config', worker=TestWorker)
    master.serve_forever()
