import json
import pandas as pd
import numpy as np

from sqlalchemy.orm import Session
from core.response import resp_500
from core.logger import logger
from datetime import date, datetime, timedelta


from app.service_quality.models.qos_server_date import QosServiceDate
from app.service_quality.models.qos_server_info import QosServiceInfo
# from app.monitor_metrics.models.prom_host_info import PromHostInfo
from app.celery_task.models.celery_task import CeleryTask
from common.welab_k8s.k8s_prometheus import K8sPrometheus
from common.utils import CommonTimeUtils

from setting import config


class QosServerDateHandler():
    """  ServerDate Handler
    """
    def __init__(self):
        self.k8s_prometheus = K8sPrometheus()
        self.k8s_qos_namespace = config.K8S_QOS_NAMESPACE


    def get_server_date_search(self, db: Session, quality_date: str, page: int, per_page: int, field: str, value: str):
        """
        获取日期内应用服务的监控数据， 根据表字段-模糊查询
        """
        try:
            result = QosServiceDate.get_server_date_search(db, quality_date, page, per_page, field, value)
            if not result:
                logger.error(f"get server date search failed, quality_date:{quality_date} {field}:{value}")
                return False, None

        except Exception as e:
            message = f"get server date search error, quality_date:{quality_date} {field}:{value}"
            logger.exception(f"{message} => {e}")
            return resp_500(message)

        return True, result



    def get_server_date_match(self, db: Session, quality_date: str, page: int, per_page: int, field: str, value: str):
        """
        获取日期内应用服务的监控数据， 根据表字段-完全查询
        """
        try:
            field_info = {"quality_date": quality_date, field: value}
            result = QosServiceDate.filter_by_field_match(db, page, per_page, field_info)
            if not result:
                logger.error(f"get server date match, result is null => {quality_date}:{field}:{value}")
                return False, None
        except Exception as e:
            message = f"get server date error, {quality_date}:{field}:{value}"
            logger.exception(f"{message} => {e}")
            return resp_500(message)

        return True, result




    def get_server_date_week(self, db: Session, server_name: str):
        """
        获取一个服务最近7天的监控信息
        """
        try:
            # 往前推7天
            quality_dates = []
            for day in range(1, 8):
                quality_date = (date.today() - timedelta(days=day)).strftime("%Y-%m-%d")
                quality_dates.append(quality_date)
            
            result, data = QosServiceDate.get_server_date_week(db, server_name, quality_dates)
            if not result:
                logger.error(f"get server date week, result is null => server:{server_name}")
                return False, None
        except Exception as e:
            message = f"get server date week error, server:{server_name}"
            logger.exception(f"{message} => {e}")
            return resp_500(message)

        return True, data




    # def get_prometheus_data_ecs(self, db: Session, server_date: dict):
    #     """
    #     获取物理机部署的应用服务prometheus监控数据库
    #     """
    #     start_timestamp, end_timestamp = CommonTimeUtils.get_timestamp_oneday(server_date["quality_date"])
    #     prometheus_data = {}

    #     # 获取物理机部署的服务cpu使用率
    #     ecs_cpu_usage_expr = "PrometheusRecordECSHostCpuUsage{" + f"i_host_name=~'{'|'.join(server_date['deploy_nodes'])}'" + "}"
    #     result, ecs_cpu_usage_datas = self.k8s_prometheus.query_range_prometheus_data(ecs_cpu_usage_expr, start_timestamp, end_timestamp)
    #     if not result:
    #         logger.error(f"ecs_cpu_usage_expr:{ecs_cpu_usage_expr}, query range prometheus data failed")
    #     else:
    #         prometheus_data["cpu_prom_data"] = {
    #             ecs_cpu_usage_data["metric"]["i_host_name"]: ecs_cpu_usage_data["values"]
    #             for ecs_cpu_usage_data in ecs_cpu_usage_datas
    #         }

    #         prometheus_data["cpu_df_data"] = {
    #             host_name: self.k8s_prometheus.handle_values_to_float(values)
    #             for host_name, values in prometheus_data["cpu_prom_data"].items()
    #         }

    #     # 获取物理机部署的服务mem使用率
    #     ecs_mem_usage_expr = "PrometheusRecordECSHostMemUsage{" + f"i_host_name=~'{'|'.join(server_date['deploy_nodes'])}'" + "}"
    #     result, ecs_mem_usage_datas = self.k8s_prometheus.query_range_prometheus_data(ecs_mem_usage_expr, start_timestamp, end_timestamp)
    #     if not result:
    #         logger.error(f"ecs_mem_usage_expr:{ecs_mem_usage_expr}, query range prometheus data failed or None")
    #     else:
    #         prometheus_data["mem_prom_data"] = {
    #             ecs_mem_usage_data["metric"]["i_host_name"]: ecs_mem_usage_data["values"]
    #             for ecs_mem_usage_data in ecs_mem_usage_datas
    #         }

    #         prometheus_data["mem_df_data"] = {
    #             host_name: self.k8s_prometheus.handle_values_to_float(values)
    #             for host_name, values in prometheus_data["mem_prom_data"].items()
    #         }

    #     # 获取物理机部署的服务存活率
    #     # 部署的物理服务器节点和端口
    #     instances = []
    #     server_info = QosServiceInfo.get_object_info_by_name(db, server_date["name"])
    #     if server_info['port'] != 0:
    #         for deploy_node in server_info["deploy_nodes"]:
    #             host_object = PromHostInfo.get_object_by_name(db, deploy_node)
    #             if not host_object:
    #                 logger.error(f"get deploy node:{deploy_node} failed")
    #                 continue
    #             instances.append(f"{host_object.ip}:{server_info['port']}")

    #         # 节点上端口探测数据
    #         livability_expr = "sum(probe_success{" + f"instance=~'{'|'.join(instances)}'" + "})"
    #         _, ecs_livability_data = self.k8s_prometheus.query_range_prometheus_data(livability_expr, start_timestamp, end_timestamp)
    #         if not ecs_livability_data:
    #             logger.error(f"livability_expr:{livability_expr}, query range prometheus data failed or None")
    #         else:
    #             # 服务存活率： 100*(1 - 统计探测结果为0的计数占比)
    #             # 结果为0： 每个节点：端口都探测失败，才认为服务完全不可用。
    #             prometheus_data["livability_prom_data"] = ecs_livability_data[0]["values"]
    #             prometheus_data["livability_df_data"] = self.k8s_prometheus.handle_values_to_int(prometheus_data["livability_prom_data"])
    #             df = pd.DataFrame.from_dict({"livability_df_data": prometheus_data["livability_df_data"]}, orient="columns")
    #             value_counts = df["livability_df_data"].value_counts().to_dict()
    #             prometheus_data["livability"] = round(100*(1 - value_counts.get("0", 0) / len(prometheus_data["livability_df_data"])), 2)
    #     else:
    #         # 服务没有端口，默认存活率为100
    #         prometheus_data["livability"] = 100

    #     logger.info(f"ECS server:{server_date['name']} quality_date:{server_date['quality_date']}, query prometheus data success")
    #     return prometheus_data




    def get_prometheus_data_container(self, server_date: dict):
        """
        获取应用服务的prometheus监控数据库
        """
        start_timestamp, end_timestamp = CommonTimeUtils.get_timestamp_oneday(server_date["quality_date"])
        prometheus_data = {}

        # 获取容器服务的cpu使用率
        pod_cpu_usage_expr = "PrometheusRecordPodCpuUsage{" + f"container='{server_date['name']}'," + f"namespace='{self.k8s_qos_namespace}'" + "}"
        result, pods_cpu_usage_data = self.k8s_prometheus.query_range_prometheus_data(pod_cpu_usage_expr, start_timestamp,
                                                                              end_timestamp)
        if not result:
            logger.error(f"pod_cpu_usage_expr:{pod_cpu_usage_expr}, query range prometheus data failed")
        else:
            prometheus_data["cpu_prom_data"] = {
                pod_cpu_usage_data["metric"]["pod"]: pod_cpu_usage_data["values"]
                for pod_cpu_usage_data in pods_cpu_usage_data
            }

            prometheus_data["cpu_df_data"] = {
                pod_name: self.k8s_prometheus.handle_values_100_to_float(values)
                for pod_name, values in prometheus_data["cpu_prom_data"].items()
            }

        # 获取容器服务的mem使用率
        pod_mem_usage_expr = "PrometheusRecordPodMemUsage{" + f"container='{server_date['name']}'," + f"namespace='{self.k8s_qos_namespace}'" + "}"
        result, pods_mem_usage_data = self.k8s_prometheus.query_range_prometheus_data(pod_mem_usage_expr, start_timestamp,
                                                                              end_timestamp)
        if not result:
            logger.error(f"pod_mem_usage_expr:{pod_mem_usage_expr}, query range prometheus data failed")
        else:
            prometheus_data["mem_prom_data"] = {
                pod_mem_usage_data["metric"]["pod"]: pod_mem_usage_data["values"]
                for pod_mem_usage_data in pods_mem_usage_data
            }

            prometheus_data["mem_df_data"] = {
                pod_name: self.k8s_prometheus.handle_values_to_float(values)
                for pod_name, values in prometheus_data["mem_prom_data"].items()
            }

        # 获取容器服务的存活监控数据
        livability_data = None
        livability_expr = None
        result = None
        if server_date["classes"] == "k8s_deployment":
            # deployment服务的总节点数不为0， 并且可用节点数为0，则任务服务不可用
            livability_expr = "kube_deployment_status_replicas_available{job='kube-state-metrics'," \
                              + f"deployment='{server_date['name']}'," + f"namespace='{self.k8s_qos_namespace}'" \
                              + "} == 0 and kube_deployment_spec_replicas{job='kube-state-metrics'," \
                              + f"deployment='{server_date['name']}'," + f"namespace='{self.k8s_qos_namespace}'" \
                              + "} != 0"
            result, livability_data = self.k8s_prometheus.query_range_prometheus_data(livability_expr, start_timestamp, end_timestamp)
        elif server_date["classes"] == "k8s_statefulset":
            # statefulset服务ready状态的节点为0，则认为服务不可用
            livability_expr = "kube_statefulset_status_replicas_ready{job='kube-state-metrics'," \
                              + f"statefulset='{server_date['name']}'," + f"namespace='{self.k8s_qos_namespace}'" \
                              + "} == 0"
            result, livability_data = self.k8s_prometheus.query_range_prometheus_data(livability_expr, start_timestamp, end_timestamp)
        else:
            pass

        if not result:
            logger.error(f"livability_expr:{livability_expr}, query range prometheus data failed or None")
        else:
            # 服务可用率=100-服务不可用的监控点/一天总的监控点（5分钟一个，一天288个）
            prometheus_data["livability_prom_data"] = livability_data[0]["values"] if livability_data  else []
            prometheus_data["livability_df_data"] = self.k8s_prometheus.handle_values_to_float(prometheus_data["livability_prom_data"])
            prometheus_data_len = len(pods_mem_usage_data[0]["values"]) if pods_mem_usage_data else 288
            prometheus_data["livability"] = round(100 - len(prometheus_data["livability_df_data"]) / prometheus_data_len, 2)

        logger.info(f"container server:{server_date['name']} quality_date:{server_date['quality_date']}, query prometheus data success")
        return prometheus_data




    def get_prometheus_data(self, db: Session, server_date: dict):
        """
        获取应用服务的prometheus监控数据库
        """
        if server_date["classes"] in ["k8s_deployment","k8s_statefulset"]:
            result = self.get_prometheus_data_container(server_date)
        elif server_date["classes"] == "ECS服务器":
            # result = self.get_prometheus_data_ecs(db, server_date)
            return None
        else:
            logger.error(f"classes:{server_date['classes']} is invalid")
            return None
        return result


    def get_server_monitor_data(self, db: Session, server_date_infos: list):
        """
        获取应用服务的监控数据
        """
        try:
            # 获取服务器的cpu和内存的监控数据
            for server_date in server_date_infos:
                prometheus_data = self.get_prometheus_data(db, server_date)
                if not prometheus_data:
                    logger.error(f"server:{server_date['name']}, get prometheus data failed")
                    continue


                # 服务存活率
                server_date["livability"] = prometheus_data.get("livability", 0)

                # 计算cpu的最大值\最小值\中位数
                if "cpu_prom_data" in prometheus_data and prometheus_data["cpu_prom_data"]:
                    cpu_usage_data = []
                    for cpu_usages in prometheus_data["cpu_df_data"].values():
                        cpu_usage_data.extend(cpu_usages)

                    df = pd.DataFrame.from_dict({"cpu_usage_data":cpu_usage_data}, orient="columns")
                    server_date["cpu_usage_max"] = df["cpu_usage_data"].max()
                    server_date["cpu_usage_min"] = df["cpu_usage_data"].min()
                    server_date["cpu_usage_avg"] = df["cpu_usage_data"].quantile(q=0.5)
                    server_date["cpu_prom_data"] = prometheus_data["cpu_prom_data"]
                    if server_date["cpu_usage_avg"] >= 30:
                        server_date["cpu_level"] = "高(cpu平均使用率>=30%)"
                    elif server_date["cpu_usage_avg"] >= 5:
                        server_date["cpu_level"] = "中(cpu平均使用率>=5%)"
                    else:
                        server_date["cpu_level"] = "低(cpu平均使用率<5%)"

                # 计算mem的最大值\最小值\中位数
                if "mem_prom_data" in prometheus_data and prometheus_data["mem_prom_data"]:
                    mem_usage_data = []
                    for mem_usages in prometheus_data["mem_df_data"].values():
                        mem_usage_data.extend(mem_usages)
                    df = pd.DataFrame.from_dict({"mem_usage_data": mem_usage_data}, orient="columns")
                    server_date["mem_usage_max"] = df["mem_usage_data"].max()
                    server_date["mem_usage_min"] = df["mem_usage_data"].min()
                    server_date["mem_usage_avg"] = df["mem_usage_data"].quantile(q=0.5)
                    server_date["mem_prom_data"] = prometheus_data["mem_prom_data"]
                    if server_date["mem_usage_avg"] >= 50:
                        server_date["mem_level"] = "高(mem平均使用率>=50%)"
                    elif server_date["mem_usage_avg"] >= 10:
                        server_date["mem_level"] = "中(mem平均使用率>=10%)"
                    else:
                        server_date["mem_level"] = "低(mem平均使用率<10%)"

        except Exception as e:
            logger.error(f"get hosts quantile error=>{e}")
            return False

        logger.info(f"get hosts quantile success")
        return True



    def task_update_server_date_handle(self, db: Session, data: dict):
        try:
            # 获取更新的日期
            if data and "quality_date" in data and data["quality_date"]:
                quality_date = data["quality_date"]
            else:
                # 定时任务不会传日期参数， 每天凌晨3天同步前一天的数据
                quality_date = (date.today() - timedelta(days=1)).strftime("%Y-%m-%d")

            # 获取所有在线服务
            result = QosServiceInfo.get_by_container_state(db)
            if not result:
                logger.error(f"get server info, result is null")
                return False

            server_date_infos = [
                {
                    "quality_date": quality_date,
                    "name": server_info["name"],
                    "classes": server_info["classes"],
                    "department": server_info["department"],
                    "cpu_requests": server_info["cpu_request"],
                    "cpu_limits": server_info["cpu_limit"],
                    "mem_requests": server_info["mem_request"],
                    "mem_limits": server_info["mem_limit"],
                    "number": server_info["number"],
                    "deploy_nodes": server_info["deploy_nodes"]
                }
                for server_info in result
            ]

            # 获取主机的资源使用情况
            result = self.get_server_monitor_data(db, server_date_infos)
            if not result:
                logger.error(f"get server monitor data failed")
                return False

            # 删除日期的数据
            QosServiceDate.delete_by_date(db, quality_date)

            # 写入数据库
            QosServiceDate.saves(db, server_date_infos)
        except Exception as e:
            logger.error(f"update server date infos error=>{e}")
            return False

        logger.info(f"update server date infos success")
        return True





    def task_update_server_date_start(self, db: Session, data: dict):
        """
        启动更新模板的异步任务
        """
        # 启动异步任务
        from app.service_quality.task import qos_update_server_date_async
        # 判断任务是否已经启动，控制只有一个任务在跑。 如果任务可以多个同时进行，则无需这一步判断。
        task_name = qos_update_server_date_async.name.split(".")[-1]
        if CeleryTask.is_progress_on_name(db, task_name):
            logger.error(f"task: {task_name} was stared")
            return False

        task = qos_update_server_date_async.delay(data)
        task_data = {
            "name": task_name,
            "tid": task.id,
            "state": task.state
        }

        logger.info(f"add task: {task_name} success")
        return task_data




    def post_host_date_task(self, db: Session, data: dict):
        """
        启动同步应用服务监控数据的异步任务
        """
        try:
            result = self.task_update_server_date_start(db, data)
            if not result:
                logger.error(f"start task:update_server_date failed, data:{data}")
                return False

        except Exception as e:
            message = f"start task:update_server_date error, data:{data}"
            logger.exception(f"{message} => {e}")
            return resp_500(message)

        return result



    def get_container_cpu_change(self, db: Session, quality_date: str):
        """
        获取容器服务cpu资源使用率变化
        """
        # 获取所有在线容器服务
        result = QosServiceInfo.get_by_container_state(db)
        if not result:
            logger.error(f"get server info, result is null")
            return False
        
        time1_start = datetime.strptime(f"{quality_date} 11:00:00", "%Y-%m-%d %H:%M:%S").timestamp()
        time1_end = datetime.strptime(f"{quality_date} 12:00:00", "%Y-%m-%d %H:%M:%S").timestamp()
        time2_start = datetime.strptime(f"{quality_date} 12:00:00", "%Y-%m-%d %H:%M:%S").timestamp()
        time2_end = datetime.strptime(f"{quality_date} 13:00:00", "%Y-%m-%d %H:%M:%S").timestamp()
        server_date = []
        for server_info in result:
            server_data = {
                "name": server_info["name"],
                "cpu_requests": server_info["cpu_request"],
                "cpu_limits": server_info["cpu_limit"],
                "number": server_info["number"],
            }
                
            # 获取容器服务的cpu使用率
            pod_cpu_usage_expr = "PrometheusRecordPodCpuUsage{" + f"container='{server_data['name']}', namespace='prod'" + "}"
            # value1
            result, pods_cpu_usage_data = self.k8s_prometheus.query_range_prometheus_data(pod_cpu_usage_expr, time1_start, time1_end, '1m')
            if not result:
                logger.error(f"pod_cpu_usage_expr query range prometheus data failed")
                return False
            
            cpu_usage_data = []
            for pod_cpu_usage_data in pods_cpu_usage_data:
                values = self.k8s_prometheus.handle_values_100_to_float(pod_cpu_usage_data["values"])
                cpu_usage_data.extend(values)
            df = pd.DataFrame.from_dict({"cpu_usage_data":cpu_usage_data}, orient="columns")
            server_data["cpu_usage_avg1"] = df["cpu_usage_data"].quantile(q=0.5)
                
            # value2
            result, pods_cpu_usage_data = self.k8s_prometheus.query_range_prometheus_data(pod_cpu_usage_expr, time2_start, time2_end, '1m')
            if not result:
                logger.error(f"pod_cpu_usage_expr query range prometheus data failed")
                return False
            
            cpu_usage_data = []
            for pod_cpu_usage_data in pods_cpu_usage_data:
                values = self.k8s_prometheus.handle_values_100_to_float(pod_cpu_usage_data["values"])
                cpu_usage_data.extend(values)
            df = pd.DataFrame.from_dict({"cpu_usage_data":cpu_usage_data}, orient="columns")
            server_data["cpu_usage_avg2"] = df["cpu_usage_data"].quantile(q=0.5)
            
            server_data["cpu_usage_change"] = server_data["cpu_usage_avg2"]-server_data["cpu_usage_avg1"]
            server_date.append(server_data)
            
        
        df = pd.DataFrame(server_date)
        df.to_excel(f"推广容器服务cpu使用率情况.xlsx", index=False)
        logger.info("write info to_excel success")
        return True
                
                
def main():
    from common.database import SessionLocal
    db = SessionLocal()
    qos_server_date_handler = QosServerDateHandler()
    qos_server_date_handler.get_container_cpu_change(db, "2023-09-22")
    
    db.close()


if __name__ == "__main__":
    main()
