#!python3.9
# -*- coding: utf-8 -*-
# @Time    : 2015/11/11 11:50
# @Author  : fy
# @File    : service.py
# @Software: PyCharm
import logging
import time
from datetime import datetime
from concurrent.futures import as_completed
from concurrent.futures.thread import ThreadPoolExecutor
from prometheus_client.exposition import generate_latest
from prometheus_client import CollectorRegistry
from util.base import my_assert
from test.myexecutor import MyDriver

from prometheus_client import (
    Gauge, Summary, Histogram, Info, Enum
)

PROMETHEUS_TYPES = {
    'COUNTER': Gauge, 'GAUGE': Gauge, 'SUMMARY': Summary,
    'HISTOGRAM': Histogram, 'INFO': Info, 'ENUM': Enum
}

_registry = CollectorRegistry()
query_instances = list()

PROMETHEUS_LABEL = 'LABEL'
PROMETHEUS_DISCARD = 'DISCARD'
FROM_INSTANCE_KEY = 'from_instance'

global_labels = {FROM_INSTANCE_KEY: ''}


def is_valid_version(version):
    return True

def cast_to_numeric(v):
    if v is None:
        return float('nan')
    elif isinstance(v, datetime):
        return int(v.timestamp() * 1000)
    else:
        return float(v)

class Metric:
    """Metric family structure:
    Only parsing the metric dict and
    lazy loading the Prometheus metric object."""

    def __init__(self, item):
        self.name = item['name']
        self.desc = item.get('description', '')
        self.usage = item['usage'].upper()
        self.value = None
        self.prefix = ''
        self.is_label = False
        self.is_valid = False

        if self.usage in PROMETHEUS_TYPES:
            """Supported metric type."""
            self.is_valid = True
        elif self.usage == PROMETHEUS_LABEL:
            """Use the `is_label` field to mark this metric as a label."""
            self.is_label = True
            self.is_valid = True
        elif self.usage == PROMETHEUS_DISCARD:
            """DISCARD means do nothing."""
            self.is_valid = False
        else:
            raise ValueError('Not support usage %s.' % self.usage)

    def activate(self, labels=()):
        """Instantiate specific Prometheus metric objects."""
        my_assert(not self.is_label and self.prefix)

        self.value = PROMETHEUS_TYPES[self.usage](
            # Prefix query instance name to the specific metric.
            '%s_%s' % (self.prefix, self.name), self.desc, labels
        )
        return self.value

class Query:
    def __init__(self, item, driver, url_dict):
        self.name = item.get('name')
        self.sql = item['cmd']
        self.timeout = item.get('timeout')
        self.ttl = item.get('ttl', 0)  # cache_seconds for PG exporter
        self.status = item.get('status', 'enable') == 'enable'  # enable or disable
        self.driver = MyDriver(driver, url_dict)

        self._cache = None
        self._last_scrape_timestamp = int(time.time() * 1000) - 15000  # Default value is 15 seconds ago.

    def fetch(self, alternative_timeout, force_connection_db=None):
        current_timestamp = int(time.time() * 1000)
        mapper = {
            'last_scrape_timestamp': self._last_scrape_timestamp,
            'scrape_interval': current_timestamp - self._last_scrape_timestamp,
            'scrape_interval_seconds': int((current_timestamp - self._last_scrape_timestamp) / 1000)
        }

        if self._cache and (current_timestamp - self._last_scrape_timestamp) < (self.ttl * 1000):
            return self._cache

        # Refresh cache:
        # If the query gives explict timeout, then use it,
        # otherwise use passed `alternative_timeout`.
        formatted = self.sql.format_map(mapper)  # If the SQL has placeholder, render it.
        logging.debug('Query the SQL statement: %s.', formatted)
        self._cache = self.driver.query(formatted,
                                   self.timeout or alternative_timeout
                                   )
        self._last_scrape_timestamp = current_timestamp
        return self._cache


class QueryInstance(object):
    def __init__(self, d, url_dict):
        self.name = d['name']
        self.desc = d.get('desc', '')
        self.queries = list()
        self.metrics = list()
        self.labels = list()
        self.status = d.get('status', 'enable') == 'enable'
        self.ttl = d.get('ttl', 0)
        self.timeout = d.get('timeout', 0)
        self.public = d.get('public', True)
        self.driver = d.get('driver', None)
        logging.debug("{} {}".format(self.name, self.driver))
        if isinstance(d['exector'], str):
            d['exector'] = [
                {'name': self.name, 'sql': d['cmd'], 'ttl': self.ttl, 'timeout': self.timeout}
            ]

        my_assert(isinstance(d['exector'], list))
        for q in d['exector']:
            query = Query(q, self.driver, url_dict)
            if query.status:
                self.queries.append(query)
            else:
                logging.info('Skip the exector %s (status: %s).' % (
                    query.name, query.status))

        for m in d['metrics']:
            # Compatible with PG-exporter
            if len(m) == len({'metric_name': {'usage': '?', 'description': '?'}}):
                # Covert to the openGauss-exporter format.
                # The following is a demo for metric structure in the openGauss-exporter:
                # {'name': 'metric_name', 'usage': '?', 'description': '?'}
                name, value = next(iter(m.items()))
                m = {'name': name}
                m.update(value)

            # Parse dict structure to a Metric object, then we can
            # use this object's fields directly.
            metric = Metric(m)
            if not metric.is_valid:
                continue
            if not metric.is_label:
                metric.prefix = self.name
                self.metrics.append(metric)
            else:
                self.labels.append(metric.name)

        # `global_labels` is required and must be added anytime.
        self.labels.extend(global_labels.keys())

    def register(self, registry):
        for metric in self.metrics:
            registry.register(
                metric.activate(self.labels)
            )


    def update(self):
        # Clear old metric's value and its labels.
        for metric in self.metrics:
            metric.value.clear()

        for query in self.queries:
            # Force the query into connecting to the specific database
            # rather than the default database, if needed.
            try:
                rows = query.fetch(self.timeout)
            except Exception as e:
                logging.exception(e)
                logging.info("Error SQL statement is '%s'.", query.sql)
                continue
            else:
                if len(rows) == 0:
                    logging.warning("Fetched nothing for metric '%s'." % query.name)
                    continue

            # Update for all metrics in current query instance.
            for row in rows:
                # `global_labels` is the essential labels for each metric family.
                labels = {}
                for field_name in self.labels:
                    field_value = str(row.get(field_name, global_labels.get(field_name)))

                    labels[field_name] = field_value

                for metric in self.metrics:
                    metric_family = metric.value.labels(**labels)
                    value = row.get(metric.name)
                    # None is equivalent to NaN instead of zero.
                    if value is None:
                        logging.warning(
                            'Not found field %s in the %s.', metric.name, self.name
                        )

                    value = cast_to_numeric(value)
                    # Different usages (Prometheus data type) have different setting methods.
                    # Thus, we have to select to different if-branches according to metric's usage.
                    if metric.usage == 'COUNTER':
                        metric_family.set(value)
                    elif metric.usage == 'GAUGE':
                        metric_family.set(value)
                    elif metric.usage == 'SUMMARY':
                        metric_family.observe(value)
                    elif metric.usage == 'HISTOGRAM':
                        metric_family.observe(value)
                    else:
                        logging.error(
                            'Not supported metric %s due to usage %s.' % (metric.name, metric.usage)
                        )

def config_collecting_params(
        url_dict,
        parallel,
        disable_cache
):
    global _use_cache, _thread_pool_executor
    # use global driver ???
    # global driver

    instance_url = url_dict.get('url', '')

    logging.debug("param instance_url: {}, parallel:{} disable_cache:{}".format(
        instance_url, parallel, disable_cache))


    _thread_pool_executor = ThreadPoolExecutor(max_workers=parallel)

    global_labels[FROM_INSTANCE_KEY] = instance_url
    _use_cache = not disable_cache

    logging.info(
        'service %s, use cache: %s, extra labels: %s.',
        global_labels[FROM_INSTANCE_KEY], _use_cache, global_labels
    )


def register_metrics(parsed_yml, url_dict=None):
    my_assert(isinstance(parsed_yml, dict))

    for name, raw_query_instance in parsed_yml.items():
        my_assert(isinstance(raw_query_instance, dict))

        logging.debug("{} {}".format(name, raw_query_instance))

        raw_query_instance.setdefault('name', name)
        instance = QueryInstance(raw_query_instance, url_dict)
        instance.register(_registry)
        query_instances.append(instance)


def query_all_metrics():
    futures = []
    logging.debug("query_instances: ".format(query_instances))
    for instance in query_instances:
        futures.append(_thread_pool_executor.submit(instance.update))

    for future in as_completed(futures):
        try:
            future.result()
        except Exception as e:
            logging.exception(e)

    data = generate_latest(_registry)
    logging.debug(data)
    return data