import json
import pandas as pd
import numpy as np

from sqlalchemy.orm import Session
from core.response import resp_500
from core.logger import logger
from datetime import datetime, timedelta

from common.welab_k8s import init_k8s
from common.welab_k8s.k8s_node import K8sNode
from app.service_quality.models.qos_server_info import QosServiceInfo
from app.service_quality.models.qos_server_date import QosServiceDate
from app.service_quality.models.qos_host_date import QosHostDate

from app.bill.handlers.bill_instance_handler import BillInstanceHandler

from common.welab_k8s.k8s_prometheus import K8sPrometheus



from setting import config


class QosPaintDiagramHandler():
    """  PaintDiagram Handler
    """
    def __init__(self):
        init_k8s()
        self.k8s_prometheus = K8sPrometheus()
        self.k8s_node = K8sNode()


    def get_container_server_resources_oversell(self, db: Session):
        """
        获取容器服务的超卖信息
        """
        oversell_info = {
            "cpu_alloc": 0,
            "mem_alloc": 0,
            "cpu_server": 0,
            "mem_server": 0
        }

        # 统计k8s节点可分配的资源
        k8s_node_infos = self.k8s_node.get_k8s_node_info()
        if not k8s_node_infos:
            logger.error(f"get k8s node info failed")
            return False

        for k8s_node_info in k8s_node_infos:
            if "zone" not in k8s_node_info["labels"] or k8s_node_info["labels"]["zone"] != "webapp":
                # 非业务节点，不统计
                continue

            oversell_info["cpu_alloc"] += k8s_node_info["cpu_alloc"]
            oversell_info["mem_alloc"] += k8s_node_info["mem_alloc"]

        # 统计业务服务分配的limit资源
        result = QosServiceInfo.filter_by_field_search(db, 1, 2000, {"all": None}, {"name": "asc"})
        if not result:
            logger.error(f"get server info, result is null")
            return False

        for server_info in result["items"]:
            if server_info["classes"] not in ["k8s_deployment","k8s_statefulset"]:
                # 非容器服务，不统计
                continue
            oversell_info["cpu_server"] += server_info["cpu_limit"] * server_info["number"]
            oversell_info["mem_server"] += server_info["mem_limit"] * server_info["number"]

        oversell_info["cpu_server"] = round(oversell_info["cpu_server"], 2)
        oversell_info["cpu_alloc"] = round(oversell_info["cpu_alloc"],2)
        oversell_info["mem_server"] = round(oversell_info["mem_server"],2)
        oversell_info["mem_alloc"] = round(oversell_info["mem_alloc"],2)
        oversell_info["cpu_over_ratio"] = round(oversell_info["cpu_server"] / oversell_info["cpu_alloc"] * 100, 2)
        oversell_info["mem_over_ratio"] = round(oversell_info["mem_server"] / oversell_info["mem_alloc"] * 100, 2)

        return oversell_info


    def set_server_cpu_aggregate_default(self, quality_date, datas: list):
        default_data = {
            "高(cpu平均使用率>=30%)": {
                "quality_date": quality_date,
                "cpu_level": "高(cpu平均使用率>=30%)",
                "server_count": 0
            },
            "中(cpu平均使用率>=5%)": {
                "quality_date": quality_date,
                "cpu_level": "中(cpu平均使用率>=5%)",
                "server_count": 0
            },
            "低(cpu平均使用率<5%)": {
                "quality_date": quality_date,
                "cpu_level": "低(cpu平均使用率<5%)",
                "server_count": 0
            }
        }
        for data in datas:
            if data["cpu_level"] in default_data.keys():
                default_data.pop(data["cpu_level"])

        datas.extend(default_data.values())
        return datas



    def set_server_mem_aggregate_default(self, quality_date, data: list):
        default_data = {
            "高(mem平均使用率>=50%)": {
                "quality_date": quality_date,
                "mem_level": "高(mem平均使用率>=50%)",
                "server_count": 0
            },
            "中(mem平均使用率>=10%)": {
                "quality_date": quality_date,
                "mem_level": "中(mem平均使用率>=10%)",
                "server_count": 0
            },
            "低(mem平均使用率<10%)": {
                "quality_date": quality_date,
                "mem_level": "低(mem平均使用率<10%)",
                "server_count": 0
            }
        }
        for item in data:
            if item["mem_level"] in default_data.keys():
                default_data.pop(item["mem_level"])

        data.extend(default_data.values())
        return data


    def set_server_livability_aggregate(self, quality_date, data:list):
        err_data = {
            "quality_date": quality_date,
            "livability": "异常",
            "server_count": 0
        }
        result_data = []
        for item in data:
            if item["livability"] == 100:
                item["livability"] = "正常"
                result_data.append(item)
            else:
                err_data["server_count"] += item["server_count"]

        result_data.append(err_data)
        return result_data




    def get_server_resources_overview(self, db: Session, quality_date: str, classes: str):
        """
        获取服务的资源概览
        """
        overview = {}
        # cpu使用率的等级聚合的服务数量
        result, data = QosServiceDate.get_server_date_aggregate(db, quality_date, classes, "cpu_level")
        if not result:
            logger.error(f"get server date aggregate:cpu_level failed")
            return False
        overview["cpu_level_agg"] = self.set_server_cpu_aggregate_default(quality_date, data)

        # cpu使用率最少的top10
        result, data = QosServiceDate.get_server_date_sort(db, quality_date, classes, "cpu_usage_avg")
        if not result:
            logger.error(f"get server date sort:cpu_usage_avg failed")
            return False
        overview["cpu_usage_sort"] = data

        # 内存使用率的等级聚合的服务数量
        result, data = QosServiceDate.get_server_date_aggregate(db, quality_date, classes, "mem_level")
        if not result:
            logger.error(f"get server date aggregate:mem_level failed")
            return False
        overview["mem_level_agg"] = self.set_server_mem_aggregate_default(quality_date, data)

        # 内存使用率最少的top10
        result, data = QosServiceDate.get_server_date_sort(db, quality_date, classes, "mem_usage_avg")
        if not result:
            logger.error(f"get server date sort:mem_usage_avg failed")
            return False
        overview["mem_usage_sort"] = data

        # 在线率聚合的服务数量
        result, data = QosServiceDate.get_server_date_aggregate(db, quality_date, classes, "livability")
        if not result:
            logger.error(f"get server date aggregate:livability failed")
            return False
        overview["livability_agg"] = self.set_server_livability_aggregate(quality_date,data)

        # 在线率最少的top10
        result, data = QosServiceDate.get_server_date_sort_livability(db, quality_date, classes)
        if not result:
            logger.error(f"get server date sort:livability failed")
            return False
        overview["livability_sort"] = data

        logger.info(f"get server resources overview success")
        return overview

    # 获取大数据集群的监控信息







    def set_host_cpu_aggregate_default(self, quality_date, datas: list):
        default_data = {
            "高(cpu平均使用率>=30%)": {
                "quality_date": quality_date,
                "cpu_level": "高(cpu平均使用率>=30%)",
                "host_count": 0
            },
            "中(cpu平均使用率>=5%)": {
                "quality_date": quality_date,
                "cpu_level": "中(cpu平均使用率>=5%)",
                "host_count": 0
            },
            "低(cpu平均使用率<5%)": {
                "quality_date": quality_date,
                "cpu_level": "低(cpu平均使用率<5%)",
                "host_count": 0
            }
        }
        for data in datas:
            if data["cpu_level"] in default_data.keys():
                default_data.pop(data["cpu_level"])

        datas.extend(default_data.values())
        return datas



    def set_host_mem_aggregate_default(self, quality_date, datas: list):
        default_data = {
            "高(mem平均使用率>=50%)": {
                "quality_date": quality_date,
                "mem_level": "高(mem平均使用率>=50%)",
                "host_count": 0
            },
            "中(mem平均使用率>=10%)": {
                "quality_date": quality_date,
                "mem_level": "中(mem平均使用率>=10%)",
                "host_count": 0
            },
            "低(mem平均使用率<10%)": {
                "quality_date": quality_date,
                "mem_level": "低(mem平均使用率<10%)",
                "host_count": 0
            }
        }
        for data in datas:
            if data["mem_level"] in default_data.keys():
                default_data.pop(data["mem_level"])

        datas.extend(default_data.values())
        return datas


    def set_host_livability_aggregate(self, quality_date, data:list):
        err_data = {
            "quality_date": quality_date,
            "livability": "异常",
            "host_count": 0
        }
        result_data = []
        for item in data:
            if item["livability"] == 100:
                item["livability"] = "正常"
                result_data.append(item)
            else:
                err_data["host_count"] += item["host_count"]

        result_data.append(err_data)
        return result_data




    def get_host_resources_overview(self, db: Session, quality_date: str, classes: str):
        """
        获取服务器的资源概览
        """
        overview = {}
        # cpu使用率的等级聚合的服务数量
        result, data = QosHostDate.get_host_date_aggregate(db, quality_date, classes, "cpu_level")
        if not result:
            logger.error(f"get host date aggregate:cpu_level failed")
            return False
        overview["cpu_level_agg"] = self.set_host_cpu_aggregate_default(quality_date, data)

        # cpu使用率最少的top10
        result, data = QosHostDate.get_host_date_sort(db, quality_date, classes, "cpu_usage_avg")
        if not result:
            logger.error(f"get host date sort:cpu_usage_avg failed")
            return False
        overview["cpu_usage_sort"] = data

        # 内存使用率的等级聚合的服务数量
        result, data = QosHostDate.get_host_date_aggregate(db, quality_date, classes, "mem_level")
        if not result:
            logger.error(f"get host date aggregate:mem_level failed")
            return False
        overview["mem_level_agg"] = self.set_host_mem_aggregate_default(quality_date, data)

        # 内存使用率最少的top10
        result, data = QosHostDate.get_host_date_sort(db, quality_date, classes, "mem_usage_avg")
        if not result:
            logger.error(f"get host date sort:mem_usage_avg failed")
            return False
        overview["mem_usage_sort"] = data

        # 在线率聚合的服务数量
        result, data = QosHostDate.get_host_date_aggregate(db, quality_date, classes, "livability")
        if not result:
            logger.error(f"get host date aggregate:livability failed")
            return False
        overview["livability_agg"] = self.set_host_livability_aggregate(quality_date, data)

        # 在线率最少的top10
        result, data = QosHostDate.get_host_date_sort_livability(db, quality_date, classes)
        if not result:
            logger.error(f"get host date sort:livability failed")
            return False
        overview["livability_sort"] = data

        logger.info(f"get host resources overview success")
        return overview


    def get_timestamp_range(self, time_range):
        end_timestamp = datetime.now().timestamp()

        if time_range == "seven_day":
            start_timestamp = (datetime.now() - timedelta(days=7)).timestamp()
            step = '168m'
        elif time_range == "three_day":
            start_timestamp = (datetime.now() - timedelta(days=3)).timestamp()
            step = '72m'
        elif time_range == "one_day":
            start_timestamp = (datetime.now() - timedelta(days=1)).timestamp()
            step = '24m'
        elif time_range == "twelve_hours":
            start_timestamp = (datetime.now() - timedelta(hours=12)).timestamp()
            step = '12m'
        elif time_range == "six_hours":
            start_timestamp = (datetime.now() - timedelta(hours=6)).timestamp()
            step = '6m'
        elif time_range == "four_hours":
            start_timestamp = (datetime.now() - timedelta(hours=4)).timestamp()
            step = '4m'
        elif time_range == "two_hours":
            start_timestamp = (datetime.now() - timedelta(hours=2)).timestamp()
            step = '2m'
        else:
            # 默认为一小时
            start_timestamp = (datetime.now() - timedelta(hours=1)).timestamp()
            step = '1m'

        return start_timestamp, end_timestamp, step



    def handle_prom_data_float(self, data):
        """
        转换prometheus的数据格式，value值换为float类型
        """
        data["time"] = datetime.strftime(datetime.fromtimestamp(data["time"]), "%Y-%m-%d %H:%M")
        data["value"] = round(float(data["value"]), 2)
        return data


    def handle_prom_data_byte(self, data):
        """
        转换prometheus的数据格式，value值是byte换为M
        """
        data["time"] = datetime.strftime(datetime.fromtimestamp(data["time"]), "%Y-%m-%d %H:%M")
        data["value"] = round(float(data["value"])/1024/1024, 2)
        return data



    def get_host_prom_for_cpu(self, instance_ip: str, start_timestamp: int, end_timestamp: int, step: str):
        # 获取cpu使用率
        cpu_usage_expr = "PrometheusRecordECSHostCpuUsage{" + f"instance='{instance_ip}:9100'" + "}"
        result, cpu_prom_data = self.k8s_prometheus.query_range_prometheus_data(cpu_usage_expr, start_timestamp, end_timestamp, step)
        if not result:
            logger.error(f"cpu_usage_expr:{cpu_usage_expr}, query range prometheus data failed")
            return False

        result = []
        for prom_data in cpu_prom_data:
            df = pd.DataFrame(prom_data["values"], columns=["time", "value"])
            df2 = df.apply(self.handle_prom_data_float, axis=1, raw=False)
            values = np.array(df2).tolist()
            data = {
                "metric": {
                    "metric_name": "cpu",
                    "host_name": prom_data["metric"]["i_host_name"],
                    "host_ip": prom_data["metric"]["instance"].replace(":9100", ""),
                    "step": step
                },
                "values": values
            }

            result.append(data)

        return result



    def get_host_prom_for_mem(self, instance_ip: str, start_timestamp: int, end_timestamp: int, step: str):
        # 获取mem使用率
        mem_usage_expr = "PrometheusRecordECSHostMemUsage{" + f"instance='{instance_ip}:9100'" + "}"
        result, mem_prom_data = self.k8s_prometheus.query_range_prometheus_data(mem_usage_expr, start_timestamp, end_timestamp, step)
        if not result:
            logger.error(f"mem_usage_expr:{mem_usage_expr}, query range prometheus data failed")
            return False

        result = []
        for prom_data in mem_prom_data:
            df = pd.DataFrame(prom_data["values"], columns=["time", "value"])
            df2 = df.apply(self.handle_prom_data_float, axis=1, raw=False)
            values = np.array(df2).tolist()
            data = {
                "metric": {
                    "metric_name": "mem",
                    "host_name": prom_data["metric"]["i_host_name"],
                    "host_ip": prom_data["metric"]["instance"].replace(":9100", ""),
                    "step": step
                },
                "values": values
            }
            result.append(data)

        return result



    def get_host_prom_for_diskio(self, instance_ip: str, start_timestamp: int, end_timestamp: int, step: str):
        # 获取disk的io
        diskio_expr = "PrometheusRecordECSHostDiskIO{" + f"instance='{instance_ip}:9100'" + "}"
        result, diskio_prom_data = self.k8s_prometheus.query_range_prometheus_data(diskio_expr, start_timestamp, end_timestamp, step)
        if not result:
            logger.error(f"diskio_expr:{diskio_expr}, query range prometheus data failed")
            return False

        result = []
        for prom_data in diskio_prom_data:
            df = pd.DataFrame(prom_data["values"], columns=["time", "value"])
            df2 = df.apply(self.handle_prom_data_float, axis=1, raw=False)
            values = np.array(df2).tolist()
            data = {
                "metric": {
                    "metric_name": "diskio",
                    "host_name": prom_data["metric"]["i_host_name"],
                    "host_ip": prom_data["metric"]["instance"].replace(":9100", ""),
                    "device": prom_data["metric"]["device"],
                    "step": step
                },
                "values": values
            }
            result.append(data)

        return result




    def get_host_prom_for_diskread(self, instance_ip: str, start_timestamp: int, end_timestamp: int, step: str):
        # 获取disk的ReadBytes
        diskread_expr = "PrometheusRecordECSHostDiskReadBytes{" + f"instance='{instance_ip}:9100'" + "}"
        result, diskread_prom_data = self.k8s_prometheus.query_range_prometheus_data(diskread_expr, start_timestamp, end_timestamp, step)
        if not result:
            logger.error(f"diskread_expr:{diskread_expr}, query range prometheus data failed")
            return False

        result = []
        for prom_data in diskread_prom_data:
            df = pd.DataFrame(prom_data["values"], columns=["time", "value"])
            df2 = df.apply(self.handle_prom_data_byte, axis=1, raw=False)
            values = np.array(df2).tolist()
            data = {
                "metric": {
                    "metric_name": "diskread",
                    "host_name": prom_data["metric"]["i_host_name"],
                    "host_ip": prom_data["metric"]["instance"].replace(":9100", ""),
                    "device": prom_data["metric"]["device"],
                    "step": step
                },
                "values": values
            }
            result.append(data)

        return result




    def get_host_prom_for_diskwritten(self, instance_ip: str, start_timestamp: int, end_timestamp: int, step: str):
        # 获取disk的WrittenBytes
        diskwritten_expr = "PrometheusRecordECSHostDiskWrittenBytes{" + f"instance='{instance_ip}:9100'" + "}"
        result, diskwritten_prom_data = self.k8s_prometheus.query_range_prometheus_data(diskwritten_expr, start_timestamp, end_timestamp, step)
        if not result:
            logger.error(f"diskwritten_expr:{diskwritten_expr}, query range prometheus data failed")
            return False

        result = []
        for prom_data in diskwritten_prom_data:
            df = pd.DataFrame(prom_data["values"], columns=["time", "value"])
            df2 = df.apply(self.handle_prom_data_byte, axis=1, raw=False)
            values = np.array(df2).tolist()
            data = {
                "metric": {
                    "metric_name": "diskwritten",
                    "host_name": prom_data["metric"]["i_host_name"],
                    "host_ip": prom_data["metric"]["instance"].replace(":9100", ""),
                    "device": prom_data["metric"]["device"],
                    "step": step
                },
                "values": values
            }
            result.append(data)

        return result




    def get_host_prom_by_cluster(self, db: Session, cluster: str, time_range: str, prom_metrics: str):
        instances_info = []
        instances_prom = []

        if cluster in ["大数据离线集群","大数据实时集群"]:
            bill_instance_handler = BillInstanceHandler()
            instances_info = bill_instance_handler.get_instance_ip_by_bigdata_cluster(db, cluster)
    
        start_timestamp, end_timestamp, step = self.get_timestamp_range(time_range)
        function_exec = getattr(self, f"get_host_prom_for_{prom_metrics}")

        for instance_info in instances_info:
            prom_data = function_exec(instance_info["intranet_ip"], start_timestamp, end_timestamp, step)
            if not prom_data:
                logger.error(f"get_host_prom_for_{prom_metrics} failed")
                return False, None

            instances_prom.append(prom_data)

        return True, instances_prom
