#!/usr/bin/env python
# -*- coding: utf-8; tab-width: 4; indent-tabs: nil; -*-
# by Al Nikolov <roottoorfieuorg@gmail.com>
from __future__ import absolute_import, print_function

__metaclass__ = type

import glob
import sys

if sys.version_info >= (3,):
    try:
        from past.builtins import basestring
    except ImportError:
        basestring = str
    from queue import Queue, Empty
    import io
else:
    import StringIO
    from Queue import Queue, Empty  # noqa

if sys.version_info >= (3, 4):
    import importlib.util
else:
    import imp

try:
    import json
except ImportError:
    import simplejson as json
import logging
import os
import pickle
import signal
import subprocess
import threading
import time
import types

try:
    from urllib.parse import urlparse, urlencode
    from urllib.request import urlopen, Request
    from urllib.error import HTTPError
except ImportError:
    from urlparse import urlparse  # noqa
    from urllib import urlencode  # noqa
    from urllib2 import urlopen, Request, HTTPError  # noqa

from agent520.version import version_string
from agent520.cmd import info, test_plugin
from agent520.utils.load_plugin import get_plugins_path, get_plugin_name
from agent520.utils.ini_parser import config
from agent520.utils.log import logging_init


class Agent:
    execute = Queue()
    metrics = Queue()
    data = Queue()
    cemetery = Queue()
    shutdown = False

    def __init__(self):
        """
        Initialize internal strictures
        """
        self.config = config
        # Cache for plugins so they can store values related to previous checks
        self.plugins_cache = {}
        logging_init(config=self.config)
        self._plugins_init()
        self._data_worker_init()
        self._dump_config()

    def _plugins_init(self):
        """
        Discover the plugins and initialize them
        """
        logging.info("_plugins_init")
        plugins_path = get_plugins_path(self.config)
        filenames = glob.glob(os.path.join(plugins_path, "*.py"))
        if plugins_path not in sys.path:
            sys.path.insert(0, plugins_path)
        self.schedule = {}
        for filename in filenames:
            name = get_plugin_name(filename)
            if name == "plugins":
                continue
            self.config._config_section_create(name)
            if self.config.getboolean(name, "enabled"):
                if self.config.getboolean(name, "subprocess"):
                    self.schedule[filename] = 0
                else:
                    if sys.version_info >= (3, 4):
                        spec = importlib.util.find_spec(name)
                    else:
                        fp, pathname, description = imp.find_module(name)

                    try:
                        if sys.version_info >= (3, 4):
                            module = importlib.util.module_from_spec(spec)
                            spec.loader.exec_module(module)
                        else:
                            module = imp.load_module(name, fp, pathname, description)
                    except Exception:
                        module = None
                        logging.error(
                            "import_plugin_exception:%s", str(sys.exc_info()[0])
                        )
                    finally:
                        if sys.version_info < (3, 4):
                            # Since we may exit via an exception, close fp explicitly.
                            if fp:
                                fp.close()

                    if module:
                        self.schedule[module] = 0
                    else:
                        logging.error("import_plugin:%s", name)

    def _subprocess_execution(self, task):
        """
        Execute /task/ in a subprocess
        """
        process = subprocess.Popen(
            (sys.executable, task),
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
            universal_newlines=True,
        )
        logging.debug("%s:process:%i", threading.currentThread(), process.pid)
        interval = self.config.getint("execution", "interval")
        name = get_plugin_name(task)
        ttl = self.config.getint(name, "ttl")
        ticks = ttl / interval or 1
        process.poll()
        while process.returncode is None and ticks > 0:
            logging.debug("%s:tick:%i", threading.currentThread(), ticks)
            time.sleep(interval)
            ticks -= 1
            process.poll()
        if process.returncode is None:
            logging.error("%s:kill:%i", threading.currentThread(), process.pid)
            os.kill(process.pid, signal.SIGTERM)
        stdout, stderr = process.communicate()
        if process.returncode != 0 or stderr:
            logging.error(
                "%s:%s:%s:%s",
                threading.currentThread(),
                task,
                process.returncode,
                stderr,
            )
        if stdout:
            ret = pickle.loads(stdout)
        else:
            ret = None
        return ret

    def _execution(self):
        """
        Take queued execution requests, execute plugins and queue the results
        """
        while True:
            if self.shutdown:
                logging.info("%s:shutdown", threading.currentThread())
                break
            logging.debug(
                "%s:exec_queue:%i", threading.currentThread(), self.execute.qsize()
            )
            try:
                task = self.execute.get_nowait()
            except Empty:
                break
            logging.debug("%s:task:%s", threading.currentThread(), task)
            name = get_plugin_name(task)
            try:
                interval = self.config.get(name, "interval")
            except:
                interval = 60
            ts = time.time()
            if isinstance(task, basestring):
                payload = self._subprocess_execution(task)
            else:
                try:
                    # Setup cache for plugin instance
                    # if name not in self.plugins_cache.iterkeys():
                    #     self.plugins_cache[name] = []
                    self.plugins_cache.update({name: self.plugins_cache.get(name, [])})

                    plugin = task.Plugin(agent_cache=self.plugins_cache[name])
                    payload = plugin.run(self.config)
                except Exception as e:
                    logging.exception("plugin_exception %s", e)
                    payload = {"exception": str(sys.exc_info()[0])}
            self.metrics.put(
                {
                    "ts": ts,
                    "task": task,
                    "name": name,
                    "interval": interval,
                    "payload": payload,
                }
            )
        self.cemetery.put(threading.currentThread())
        self.hire.release()

    def _data(self):
        """
        Take and collect data,
        send and clean if needed 后续开发
        """
        logging.info("%s", threading.currentThread())
        max_age = self.config.getint("DEFAULT", "max_data_age")
        max_span = self.config.getint("DEFAULT", "max_data_span")
        interval = self.config.getint("DEFAULT", "interval")
        collection = []
        initial_data = True
        while True:
            if initial_data:
                max_span = 10
            else:
                max_span = self.config.getint("agent", "max_data_span")
            loop_ts = time.time()
            if self.shutdown:
                logging.info("%s:shutdown", threading.currentThread())
                break
            logging.debug(
                "%s:data_queue:%i:collection:%i",
                threading.currentThread(),
                self.data.qsize(),
                len(collection),
            )
            while self.data.qsize():
                try:
                    collection.append(self.data.get_nowait())
                except Exception as e:
                    logging.error("Data queue error: %s" % e)
            if collection:
                first_ts = min((e["ts"] for e in collection))
                last_ts = max((e["ts"] for e in collection))
                now = time.time()
                if last_ts - first_ts >= max_span:
                    logging.debug("Max data span")
                elif now - first_ts >= max_age:
                    logging.warning("Max data age")
                logging.info(
                    "collection: %s",
                    json.dumps(collection, indent=2, sort_keys=True),
                )
                collection = []
            sleep_interval = interval - (time.time() - loop_ts)
            if sleep_interval > 0:
                time.sleep(sleep_interval)

    def _data_worker_init(self):
        """
        Initialize data worker thread
        """
        logging.info("_data_worker_init")
        threading.Thread(target=self._data).start()

    def _dump_config(self):
        """
        Dumps configuration object
        """
        if sys.version_info >= (3,):
            buf = io.StringIO()
        else:
            buf = StringIO.StringIO()

        self.config.write(buf)
        logging.info("Config: %s", buf.getvalue())

    def _rip(self):
        """
        Join with dead workers
        Workaround for https://bugs.python.org/issue37788
        """
        logging.debug("cemetery:%i", self.cemetery.qsize())
        while True:
            try:
                thread = self.cemetery.get_nowait()
            except Empty:
                break
            logging.debug("joining:%s", thread)
            thread.join()

    def run(self):
        """
        Start all the worker threads
        """
        logging.info("Agent main loop")
        interval = self.config.getfloat("agent", "interval")
        self.hire = threading.Semaphore(self.config.getint("execution", "threads"))
        try:
            while True:
                self._rip()
                now = time.time()
                logging.debug("%i threads", threading.activeCount())
                while self.metrics.qsize():
                    metrics = self.metrics.get_nowait()
                    name = metrics["name"]
                    logging.debug("metrics:%s", name)
                    plugin = metrics.get("task")
                    if plugin:
                        self.schedule[plugin] = int(now) + self.config.getint(
                            name, "interval"
                        )
                        if isinstance(plugin, types.ModuleType):
                            metrics["task"] = plugin.__file__
                    self.data.put(metrics)
                execute = [what for what, when in self.schedule.items() if when <= now]
                for name in execute:
                    logging.debug("scheduling:%s", name)
                    del self.schedule[name]
                    self.execute.put(name)
                    if self.hire.acquire(False):
                        try:
                            thread = threading.Thread(target=self._execution)
                            thread.start()
                            logging.debug("new_execution_worker_thread:%s", thread)
                        except Exception as e:
                            logging.warning("Can not start new thread: %s", e)
                    else:
                        logging.warning("threads_capped")
                        self.metrics.put(
                            {
                                "ts": now,
                                "name": "agent_internal",
                                "payload": {
                                    "threads_capping": self.config.getint(
                                        "execution", "threads"
                                    )
                                },
                            }
                        )
                sleep_interval = 0.5 - (time.time() - now)
                if sleep_interval > 0:
                    time.sleep(sleep_interval)
                else:
                    logging.warning("not enough time to start worker threads")
                    time.sleep(0.1)

        except KeyboardInterrupt:
            logging.warning(sys.exc_info()[0])
            logging.info("Shutting down")
            self._rip()
            wait_for = True
            while wait_for:
                all_threads = threading.enumerate()
                logging.info("Remaining threads: %s", all_threads)
                wait_for = [
                    thread
                    for thread in all_threads
                    if not thread.isDaemon()
                       and not isinstance(thread, threading._MainThread)
                ]
                if not wait_for:
                    logging.info("Bye!")
                    sys.exit(0)
                self.shutdown = True
                logging.info("Waiting for %i threads to exit", len(wait_for))
                for thread in wait_for:
                    logging.info("Joining with %s/%f", thread, interval)
                    thread.join(interval)
        except Exception as e:
            logging.error("Worker error: %s" % e)


def usage():
    print("Usage: agent520 [options]")
    print("Options:")
    print("  --help     - this help")
    print("  --info     - print agent info")
    print("  --version  - print agent version")
    print("  --test     - run tests")
    sys.exit(1)


def main():
    if len(sys.argv) > 1:
        if sys.argv[1].startswith("--"):
            sys.argv[1] = sys.argv[1][2:]
        if sys.argv[1] == "help":
            usage()
            sys.exit()
        elif sys.argv[1] == "info":
            print(info.info())
            sys.exit()
        elif sys.argv[1] == "version":
            print(version_string())
            sys.exit()
        elif sys.argv[1] == "test":
            sys.exit(test_plugin.test_plugins(sys.argv[2:]))
        else:
            print("Invalid option:", sys.argv[1], file=sys.stderr)
            usage()
            sys.exit(1)
    else:
        Agent().run()


if __name__ == "__main__":
    main()
