#!/usr/bin/env python
#########################################################################
#
#  Copyright (2017, ) Institute of Software, Chinese Academy of Sciences
#  Copyright (2017, ) Beyondcent Co., Lmt
#
#  Author: songyk@otcaix.iscas.ac.cn
#  Date  : 2017/6/11
#
##########################################################################

import docker
from pyzabbix import ZabbixMetric, ZabbixSender
from optparse import OptionParser
from multiprocessing import Pool
import os
import logging
from logging.handlers import RotatingFileHandler
import time
import sys

# For python 2 and 3 compatibility
try:
    from StringIO import StringIO
    import ConfigParser as configparser
except ImportError:
    from io import StringIO
    import configparser

# global switch
USE_CONFIG = True
STATS_USING_STREAM = False
DEFAULT_LOG_LEVEL = logging.INFO

client = docker.APIClient(base_url='unix://var/run/docker.sock', version='auto')
client_env = docker.from_env(version='auto')


def create_default_logger():
    """ defalut logger level is DEBUG """
    logger = logging.getLogger("dobbix")
    logger.setLevel(DEFAULT_LOG_LEVEL)

    formatter = logging.Formatter('%(asctime)s.%(msecs)d [%(levelname)s] (%(filename)s:%(lineno)d) %(message)s', \
                                  '%a, %d %b %Y %H:%M:%S', )

    logname = 'dobbix.log'
    logdir = '/var/log/zabbix'
    logfile = os.path.join(logdir, logname)

    max_rotate_handler = RotatingFileHandler(logfile, maxBytes=20*1024*1024, backupCount=5)
    max_rotate_handler.setFormatter(formatter)
    max_rotate_handler.setLevel(logging.DEBUG)
    logger.addHandler(max_rotate_handler)

    return logger


log = create_default_logger()


def load_hostname_from_config(config_file):
    """Load node hostname from zabbix agent file.

    If Hostname variable won't be found in the file, they will be
    set up from defaults: socket.gethostname()

    :type config_file: str
    :param use_config: Path to zabbix_agentd.conf file to load settings
        from. If value is `True` then default config path will used:
        /etc/zabbix/zabbix_agentd.conf
    """

    if config_file and isinstance(config_file, bool):
        config_file = '/etc/zabbix/zabbix_agentd.conf'

    # logger.debug("Used config: %s", config_file)

    #  This is workaround for config wile without sections
    with open(config_file, 'r') as f:
        config_file_data = "[root]\n" + f.read()

    import socket
    default_params = {
        'Hostname': socket.gethostname()
    }

    config_file_fp = StringIO(config_file_data)
    config = configparser.RawConfigParser(default_params)
    config.readfp(config_file_fp)
    hostname = config.get('root', 'Hostname')
    # logger.debug("Loaded params: %s", result)

    return hostname


ON = '_ON_'


def make_docker_hostname(name, use_config=None):
    # kidding me?
    return str(name)
    if use_config is None:
        use_config = True

    hostname = load_hostname_from_config(use_config)
    dockername = str(name)
    return "%s%s%s" % (dockername, ON, hostname)


def calculate_cpu_percent(d):
    """
    Taken from Docker Go Client:
    https://github.com/docker/docker/blob/28a7577a029780e4533faf3d057ec9f6c7a10948/api/client/stats.go#L309
    """
    try:
        cpu_count = len(d["cpu_stats"]["cpu_usage"]["percpu_usage"])
        cpu_percent = 0.0
        cpu_delta = float(d["cpu_stats"]["cpu_usage"]["total_usage"]) - float(d["precpu_stats"]["cpu_usage"]["total_usage"])
        system_delta = float(d["cpu_stats"]["system_cpu_usage"]) - float(d["precpu_stats"]["system_cpu_usage"])
        if system_delta > 0.0 and cpu_delta > 0.0:
            cpu_percent = cpu_delta / system_delta * 100.0 * cpu_count
        return cpu_percent
    except KeyError, ex:
        log.error("some kye error occurs, just returns 0.0!!!!")
        return 0.0


def humanize_bytes(bytesize, precision=2):
    abbrevs = (
        (1 << 50, 'PB'),
        (1 << 40, 'TB'),
        (1 << 30, 'GB'),
        (1 << 20, 'MB'),
        (1 << 10, 'kB'),
        (1, 'bytes')
    )
    if bytesize is None:
        return '0 byte'
    if bytesize == 1:
        return '1 byte'
    for factor, suffix in abbrevs:
        if bytesize >= factor:
            break
    if factor == 1:
        precision = 0
    return '%.*f %s' % (precision, bytesize / float(factor), suffix)


def docker_stats_once_and_send(name):
    try:
        #name = container.name
        log.debug("begin collectiong container[%s]" % name)

        # last check container is running or not
        if client_env.containers.get(name).status != 'running':
            log.warn("container[%s] is not running")
            exit(1)

        if STATS_USING_STREAM:
            for metrics in client.stats(name, decode=True, stream=False):
                try:
                    # KeyError exception may occurs in the first time
                    metrics['precpu_stats']['system_cpu_usage']
                    s = metrics
                    break
                except KeyError, ex:
                    log.error('metrics[precpu_stats][system_cpu_usage] does not eists!!!')
                    continue
        else:
            # just stats once, no stream is connected
            s = client.stats(name, decode=True, stream=False)

        # cpu & mem
        cpu_percent = calculate_cpu_percent(s)
        mem_usage = humanize_bytes(s['memory_stats']['usage'])
        mem_limit = humanize_bytes(s['memory_stats']['limit'])
        mem_percent = (float(s['memory_stats']['usage']) / float(s['memory_stats']['limit'])) * 100

        # net i/o
        has_networks = False
        if 'networks' in s and 'eth0' in s['networks']:
            eth0_metrics = s['networks']['eth0']
            # the net metrics are the accumulated value
            rx_bytes = eth0_metrics['rx_bytes']
            rx_packets = eth0_metrics['rx_packets']
            rx_errors = eth0_metrics['rx_errors']
            rx_dropped = eth0_metrics['rx_dropped']
            tx_bytes = eth0_metrics['tx_bytes']
            tx_packets = eth0_metrics['tx_packets']
            tx_errors = eth0_metrics['tx_errors']
            tx_dropped = eth0_metrics['tx_dropped']
            has_networks = True

        # blkio
        blkio_read_bytes = 0
        blkio_write_bytes = 0
        io_service_bytes = s['blkio_stats']['io_service_bytes_recursive']
        for item in io_service_bytes:
            # accumulate values for multiple devices
            if item['op'] == 'Read':
                blkio_read_bytes += item['value']
            elif item['op'] == 'Wrtie':
                blkio_write_bytes += item['value']

        # zabbix trapper key for collecting docker statistics
        key_cpu_used = 'user.docker[cpu_used_percent]'
        key_mem_used = 'user.docker[memory_used_percent]'
        key_bytes_rec = 'user.docker[bytes_received]'
        key_bytes_sent = 'user.docker[bytes_sent]'
        key_pkt_rec = 'user.docker[packets_received]'
        key_pkt_sent = 'user.docker[packets_sent]'
        key_pkt_rec_drop = 'user.docker[packets_received_dropped]'
        key_pkt_sent_drop = 'user.docker[packets_sent_dropped]'
        key_pkt_rec_err = 'user.docker[packets_received_erros]'
        key_pkt_sent_err = 'user.docker[packets_sent_erros]'
        key_blk_write_bytes = 'user.docker[blk_write_bytes]'
        key_blk_read_bytes = 'user.docker[blk_read_bytes]'

        name_for_zabbix = make_docker_hostname(name, USE_CONFIG)
        packet = [
            ZabbixMetric(name_for_zabbix, key_cpu_used, cpu_percent),
            ZabbixMetric(name_for_zabbix, key_mem_used, mem_percent),
            ZabbixMetric(name_for_zabbix, key_blk_write_bytes, blkio_write_bytes),
            ZabbixMetric(name_for_zabbix, key_blk_read_bytes, blkio_read_bytes),
        ]

        if has_networks:
            net_metrics = [
                ZabbixMetric(name_for_zabbix, key_bytes_rec, rx_bytes),
                ZabbixMetric(name_for_zabbix, key_bytes_sent, tx_bytes),
                ZabbixMetric(name_for_zabbix, key_pkt_rec, rx_packets),
                ZabbixMetric(name_for_zabbix, key_pkt_sent, tx_packets),
                ZabbixMetric(name_for_zabbix, key_pkt_rec_drop, rx_dropped),
                ZabbixMetric(name_for_zabbix, key_pkt_sent_drop, tx_dropped),
                ZabbixMetric(name_for_zabbix, key_pkt_rec_err, rx_errors),
                ZabbixMetric(name_for_zabbix, key_pkt_sent_err, tx_errors)
            ]
            packet.extend(net_metrics)

        # zbx_sender = ZabbixSender(use_config=USE_CONFIG)
        # for m in packet:
        #     result = zbx_sender.send([m])
        #     print("sending(%s,%s)... result: %s" % (m.key, m.value, result))
        log.debug("metrics:")
        log.debug("\tCPU/MEM:cpu_percent:%s, memory_percent:%s" % (str(cpu_percent), str(mem_percent)))
        log.debug("\tBLKIO:  blk_read_bytes:%s, blk_write_bytes:%s " % (str(blkio_read_bytes), str(blkio_write_bytes)))
        if has_networks:
            log.debug("\tNETIO:  rx_bytes:%s tx_bytes:%s, rx_packets:%s, tx_packets:%s, "
                      "rx_dropped:%s, tx_dropped:%s, rx_errors:%s, tx_errors:%s"
                      % (str(rx_bytes), str(tx_bytes), str(rx_packets), str(tx_packets), str(rx_dropped),
                         str(tx_dropped), str(rx_errors), str(tx_errors)))
        log.debug("")
        result = ZabbixSender(use_config=USE_CONFIG).send(packet)
        log.debug("result for container[%s] via pid[%d]: %s" % (name_for_zabbix, os.getpid(), result))

    except Exception, ex:
        log.error("Error occurs: %s, stats and sending process failed( waiting next try.)" % repr(ex))


def main():
    # parse args
    parser = OptionParser()
    # parser.add_option('-u', '--url', default='unix://var/run/docker.sock',
    #                   help='URL for Docker service (Unix or TCP socket).')
    parser.add_option('-l', action="store_true", dest="list", default=False)
    (opts, args) = parser.parse_args()

    # Docker access
    containers = client_env.containers.list()

    if opts.list:
        # zabbix LLD for docker host auto discovery
        import json
        con_list = []
        for c in containers:
            con_list.append({'{#NAME}': make_docker_hostname(c.name, USE_CONFIG)})
        con_dict = {'data': con_list}
        print(json.dumps(con_dict))
    else:
        if containers is None or not len(containers):
            log.warn("No running container found, exiting docker_stats process...")
            exit(1)
        start = time.time()
        try:
            # get only names of running containers
            names = [c.name for c in containers if c.status == 'running']
            pool = Pool(processes=len(names))
            r = pool.map_async(docker_stats_once_and_send, names)
            r.wait()
        except Exception, ex:
            log.error("Error occurs: %s" % repr(ex))
        log.debug("docker stats took time: %s" % str(time.time() - start))


if __name__ == '__main__':
    try:
        main()
    except Exception, e:
        log.error("Excution failed: %s" % repr(e))

