# encoding=utf-8

from typing import Callable, Dict, Optional, Tuple, Union

import asyncio
import time
import os
import signal
import logging
from uuid import uuid4
from threading import (
    Thread,
)
from threading import RLock
from multiprocessing import (
    Process,
)
import faulthandler
import tempfile

from .asynclib import (
    get_loop,
    maybe_await,
    cancel_other_tasks,
    wait_other_tasks,
)


logger = logging.getLogger(__name__)

_cleanup_tasks = {}  # type: Dict[str, Callable]
_add_cleanup_mutex = RLock()

_is_running = True


def running():
    return _is_running


def set_running(status):
    global _is_running
    if _is_running != status:
        logger.debug('set running status to {}'.format(status))
        _is_running = status


def add_cleanup_task(task, task_name=None):
    assert callable(task)
    with _add_cleanup_mutex:
        if not task_name:
            task_name = uuid4().hex
        _cleanup_tasks[task_name] = task
        return task_name


def pop_cleanup_task(task_name):
    return _cleanup_tasks.pop(task_name, None)


async def async_cleanup(loop=None, is_cancel_all=True, is_stop=False, **kwargs):
    loop = loop or get_loop()
    set_running(False)
    logger.debug('do async cleanup use loop {}'.format(loop))
    try:
        await asyncio.wait_for(loop.shutdown_asyncgens(), 5)
    except Exception as err:
        logger.error('{!r} when shutdown_asyncgens'.format(err))
    for task_name, task in list(_cleanup_tasks.items()):
        # assert callable(task)
        try:
            logger.debug('executing cleanup task {}: {}'.format(task_name, task))
            await asyncio.wait_for(maybe_await(task()), 5)
            _cleanup_tasks.pop(task_name, None)
        except asyncio.CancelledError:
            pass
        except Exception as err:
            logger.exception('{!r}; failed to execute cleanup task {}: {}'.format(err, task_name, task))
    if is_cancel_all:
        logger.debug('cancel and wait all other tasks')
        count = cancel_other_tasks(loop=loop)
        logger.debug('{} tasks cancelled'.format(count))
        count = await wait_other_tasks(loop=loop)
        logger.debug('{} tasks waited'.format(count))
        # cancel twice to ensure
        count = cancel_other_tasks(loop=loop)
        logger.debug('{} tasks cancelled'.format(count))
    try:
        await asyncio.wait_for(loop.shutdown_asyncgens(), 3)
    except Exception as err:
        logger.error('{!r}; when shutdown_asyncgens'.format(err))
    if is_stop:
        loop.stop()
    logger.debug('completed async cleanup use loop {}'.format(loop))


def sync_cleanup(loop=None, is_close_loop=False, is_cleanup_kill=None, **kwargs):
    loop = loop or get_loop()
    kill_timeout = 1
    logger.debug('sync cleanup use loop {} with timeout {} kill: {}'.format(
        loop, kill_timeout, is_cleanup_kill,
    ))
    try:
        if loop.is_running():
            fut = asyncio.run_coroutine_threadsafe(
                async_cleanup(loop=loop, is_stop=True),
                loop=loop,
            )
            try:
                fut.result(kill_timeout)
            except Exception:
                pass
        else:
            assert not loop.is_running() and not loop.is_closed()
            loop.run_until_complete(asyncio.wait_for(
                async_cleanup(loop=loop, is_stop=True),
                kill_timeout,
            ))
        if is_close_loop:
            loop.close()
    except Exception as err:
        logger.exception(repr(err))
    if is_cleanup_kill:
        kill_group()


def start_loop_in_thread(
        loop: asyncio.AbstractEventLoop = None,
        is_daemon=True,
        is_start=True,
) -> Tuple[asyncio.AbstractEventLoop, Thread]:

    if loop is None:
        loop = asyncio.new_event_loop()

    def start_loop(loop):
        asyncio.set_event_loop(loop)
        logger.info('start event loop')
        loop.run_forever()
        logger.info('event loop ended')
        # loop.run_until_complete(async_cleanup(loop))
        # sync_cleanup(loop=loop, is_close_loop=is_close_loop)

    thread = Thread(target=start_loop, args=(loop,))
    thread.daemon = bool(is_daemon)
    if is_start:
        thread.start()
    return loop, thread


def register_sigterm_handler(**kwargs):
    kwargs.setdefault('is_close_loop', False)
    kwargs.setdefault('is_cleanup_kill', True)

    def sigterm_handler(signum, frame):
        logger.debug('SIGTERM handler triggered with {}'.format(kwargs))
        set_running(False)
        loop = kwargs.get('loop')
        thread = None
        if loop:
            thread = Thread(
                target=sync_cleanup,
                name='skernel_loop_cleanup',
                kwargs=kwargs,
            )
        elif kwargs['is_cleanup_kill']:
            thread = Thread(
                target=kill_group,
                name='skernel_cleanup',
            )
        if thread:
            thread.daemon = True
            thread.start()

    def sigusr1_handler(signum, frame):
        logger.debug('SIGUSR1 handler triggered with {}'.format(kwargs))
        with tempfile.TemporaryFile('w+') as fp:
            faulthandler.dump_traceback(fp, all_threads=True)
            fp.seek(0)
            data = fp.read()
        logger.critical(data)

    signal.signal(signal.SIGTERM, sigterm_handler)
    signal.signal(signal.SIGUSR1, sigusr1_handler)
    logger.debug('SIGTERM handler registered with {}'.format(kwargs))


_is_sigkill = False


def kill_group():
    global _is_sigkill
    if not _is_sigkill:
        _is_sigkill = True
        sig = signal.SIGTERM
        logger.info('killing process group using signal {}'.format(sig))
        os.killpg(0, sig)  # kill all processes in my group
        kill_timeout = 0
        time.sleep(kill_timeout)
        sig = signal.SIGKILL
        logger.info('killing process group using signal {}'.format(sig))
        os.killpg(0, sig)  # kill all processes in my group


def start_workers(
        name: str,
        target: Callable,
        mode: str = 'process',
        kwargs: Optional[Dict] = None,
        concurrency: int = 3,
        kill_timeout: float = 10,
        is_block: bool = False,
        is_pass_name: bool = False,
        is_kill_group=False,
):
    mode = mode or 'process'
    concurrency = int(concurrency)
    logger.info('start {} {} workers in {} mode with kill_timeout {}'.format(
        concurrency, name, mode, kill_timeout,
    ))
    kwargs = dict(kwargs or {})
    workers = []
    if mode in ('thread', 'threads'):
        for n in range(concurrency):
            _name = '{} {}'.format(name, n)
            args = ()  # type: Tuple
            if is_pass_name:
                args = (_name,)
            worker = Thread(target=target, name=_name, args=args, kwargs=kwargs)  # type: Union[Process, Thread]
            workers.append(worker)
            worker.daemon = False
            worker.start()
    elif mode in ('process', 'processes'):
        for n in range(concurrency):
            _name = '{} {}'.format(name, n)
            args = ()
            if is_pass_name:
                args = (_name,)
            worker = Process(target=target, name=_name, args=args, kwargs=kwargs)
            workers.append(worker)
            worker.daemon = False
            worker.start()
    else:
        raise ValueError('woker_mode should be `process` or `thread`')

    if is_block:
        try:
            for worker in workers:
                worker.join()
        except KeyboardInterrupt:
            logger.info('{} got exit signal; exiting'.format(name))
            # join all the workers with shared kill_timeout
            deadline = 0  # type: float
            if kill_timeout is not None:
                now = time.time()
                deadline = now + kill_timeout
            for worker in workers:
                if kill_timeout is None:
                    _timeout = None
                else:
                    now = time.time()
                    _timeout = deadline - now
                    if _timeout < 0:
                        _timeout = 0
                worker.join(timeout=_timeout)
        finally:
            if is_kill_group:
                os.killpg(0, 15)
    return workers
