from prometheus_api_client import PrometheusConnect
from datetime import datetime, timedelta
from typing import Union
import pandas as pd
from tqdm import tqdm
import os, time
import json


METRIC_SETTINGS = {
    "url": "http://1.92.152.201:9090",
    "username": "elastic",
    "password": "elastic",
    "pods": [
        "cartservice",
        "checkoutservice",
        "currencyservice",
        "emailservice",
        "frontend",
        "paymentservice",
        "productcatalogservice",
        "recommendationservice",
        "shippingservice",
    ],
    "namespace": "default",
    "invalid_metrics": [
        "container_scrape_error",
        "container_ulimits_soft",
        # "container_network_receive_packets_dropped_total",
        "container_fs_io_time_seconds_total",
        "container_fs_io_time_weighted_seconds_total",
        "container_fs_limit_bytes",
        "container_fs_read_seconds_total",
        "container_fs_reads_merged_total",
        "container_blkio_device_usage_total",
    ],
    "valid_metrics": [
        # author psy7604
        # 根据筛选的指标构建需要的指标集合
        # cpu
        "container_cpu_usage_seconds_total",
        "container_cpu_user_seconds_total",
        "container_cpu_system_seconds_total",
        "container_cpu_cfs_throttled_seconds_total",
        "container_cpu_cfs_throttled_periods_total",
        "container_cpu_cfs_periods_total",
        "container_cpu_load_average_10s",
        # memory
        "container_memory_cache",
        "container_memory_usage_bytes",
        "container_memory_working_set_bytes",
        "container_memory_mapped_file",
        "container_memory_rss",
        # spec
        "container_spec_cpu_period",
        "container_spec_cpu_quota",
        "container_spec_memory_limit_bytes",
        "container_spec_cpu_shares",
        # threads
        "container_threads",
        "container_threads_max",
    ],
    "network_metrics": [
        # network
        "container_network_receive_errors_total",
        "container_network_receive_packets_dropped_total",
        "container_network_receive_packets_total",
        "container_network_receive_bytes_total",
        "container_network_transmit_bytes_total",
        "container_network_transmit_errors_total",
        "container_network_transmit_packets_dropped_total",
        "container_network_transmit_packets_total",
    ],
}


def parse_time(start_time, end_time):
    """
    Args:
        start_time (Union[int, str, datetime]): 开始时间
        end_time (Union[int, str, datetime]): 结束时间

    Returns:
        start_time (datatime)
        end_time (datatime)
    """
    if not isinstance(start_time, datetime):
        start_time = datetime.fromtimestamp(int(start_time))
    if not isinstance(end_time, datetime):
        end_time = datetime.fromtimestamp(int(end_time))
    # end_time now is end_time = datetime.utc
    return start_time, end_time


class MetricConnection:
    # disable_ssl –（bool）如果设置为 True，将禁用对向 prometheus 主机发出的 http 请求的 SSL 证书验证
    def __init__(self, config):
        self.config = config
        self.client = PrometheusConnect(config["url"], disable_ssl=True)

    def all_metrics(self):
        """调用 prometheus 的 all_metrics 方法获取所有的名称列表"""
        all_metrics = self.client.all_metrics()
        # 收集 valid_metrics 里的指标
        all_metrics = list(
            filter(
                lambda x: True if x in self.config["valid_metrics"] else False,
                all_metrics,
            )
        )
        return all_metrics

    # start_time: Union[int, datetime]表示变量既可以是int型也可以是datetime型
    def query_range(
        self,
        metric_name: str,
        pod: str,
        start_time: Union[int, datetime, str],
        end_time: Union[int, datetime, str],
        step: int = 1,
    ):
        """根据 metrix_name、pod、start_time、end_time 查询数据"""
        # 格式化时间
        start_time, end_time = parse_time(start_time, end_time)

        # 获取查询语句
        if metric_name.endswith("_total") or metric_name in [
            "container_last_seen",
            "container_memory_cache",
            "container_memory_max_usage_bytes",
        ]:
            if metric_name in self.config["network_metrics"]:
                query = f"irate({metric_name}{{pod=''}}[5m])"
            # 拼接prometheus 查询语句
            else:
                query = f"rate({metric_name}{{pod=~'{pod}.+'}}[5m])"
        else:
            query = f"{metric_name}{{pod=~'{pod}.+'}}"

        # 获取查询结果
        data_raw = self.client.custom_query_range(
            query, start_time, end_time, step=step
        )

        # 处理查询结果
        if len(data_raw) == 0:
            return {"error": f"No data found for metric {metric_name} and pod {pod}"}
        else:
            data = []
            for item in data_raw[0]["values"]:
                date_time = datetime.fromtimestamp(int(item[0]))
                float_value = round(float(item[1]), 3)
                data.append({"time": date_time, "value": float_value})
            return data

    def get_query_data(self, start_time, end_time):
        """
        根据起始时间和最终时间查询数据 (多线程)
        Args:
            start_time (Union[int, str, datetime]): 开始时间
            end_time (Union[int, str, datetime]): 结束时间
        Returns:
            all_dataframes_list (list): 由一条条数据信息组成的列表
        """
        all_metrics_name = self.all_metrics()
        all_dataframes_list = []
        nums = -1
        length = len(all_metrics_name)
        for metric in tqdm(
            all_metrics_name,
            desc="总进度: ",
        ):
            nums += 1
            print("=" * 63, f"已完成 {nums}/{length}", "=" * 63)
            timestamp_list = []
            value_list = []
            for pod in tqdm(self.config["pods"], desc=f"次进度 {metric}: "):
                tqdm.write(f"cur_pods: {pod}.")
                data = self.query_range(
                    metric, pod, start_time=start_time, end_time=end_time
                )

                if "error" in data:
                    continue
                for d in data:
                    timestamp_list.append(int(d["time"].timestamp()))
                    value_list.append(d["value"])

            dt = pd.DataFrame(
                {
                    "pod": pod,
                    "metric": metric,
                    "timestamp": timestamp_list,
                    "value": value_list,
                }
            )

            all_dataframes_list.append(dt)
            # time.sleep(1)
        return all_dataframes_list

    # 下面两个函数是获取时间范围所有数据，暂时不知道有什么差别
    # 用于 metric 数据下载
    def metric_extract(self, start_time, end_time):
        all_data_list = self.get_query_data(start_time, end_time)
        return pd.concat(all_data_list, ignore_index=True)

    # 用于算法获取 metric 数据
    def export_all_metrics(self, start_time, end_time, save_path):
        if not os.path.exists(save_path):
            os.makedirs(save_path)

        all_data_list = self.get_query_data(start_time, end_time)
        [
            dt[["timestamp", "value"]].to_csv(
                os.path.join(save_path, "kpi_" + dt["metric"].iloc[0] + ".csv")
            )
            for dt in all_data_list
        ]
