# import json
# import pandas as pd
# import numpy as np

# from sqlalchemy.orm import Session
# from core.response import resp_500
# from core.logger import logger
# from datetime import date, datetime, timedelta

# from common.welab_k8s.k8s_prometheus import K8sPrometheus
# from app.monitor_metrics.models.prom_host_info import PromHostInfo
# from app.monitor_metrics.models.prom_host_group import PromHostGroup
# from app.service_quality.models.qos_host_date import QosHostDate

# from common.welab_k8s import init_k8s
# from common.welab_k8s.k8s_node import K8sNode
# from common.utils import CommonTimeUtils
# from app.celery_task.models.celery_task import CeleryTask


# class QosHostDateHandler():
#     """  HostDate Handler
#     """
#     def __init__(self):
#         init_k8s()
#         self.k8s_prometheus = K8sPrometheus()
#         self.k8s_node = K8sNode()



#     def get_host_date_search(self, db: Session, quality_date: str, page: int, per_page: int, field: str, value: str):
#         """
#         获取日期内服务器的监控数据， 根据表字段-模糊查询
#         """
#         try:
#             result = QosHostDate.get_host_date_search(db, quality_date, page, per_page, field, value)
#             if not result:
#                 logger.warning(f"get host date search failed, quality_date:{quality_date} {field}:{value}")
#                 return False, None

#         except Exception as e:
#             message = f"get host date search error, quality_date:{quality_date} {field}:{value}"
#             logger.exception(f"{message} => {e}")
#             return resp_500(message)

#         return True, result



#     def get_host_date_match(self, db: Session, quality_date: str, page: int, per_page: int, field: str, value: str):
#         """
#         获取日期内服务器的监控数据， 根据表字段-完全查询
#         """
#         try:
#             field_info = {"quality_date": quality_date, field: value}
#             result = QosHostDate.filter_by_field_match(db, page, per_page, field_info)
#             if not result:
#                 logger.warning(f"get host date match, result is null => {quality_date}:{field}:{value}")
#                 return False, None
#         except Exception as e:
#             message = f"get host date error, {quality_date}:{field}:{value}"
#             logger.exception(f"{message} => {e}")
#             return resp_500(message)

#         return True, result




#     def get_prometheus_data(self, host_name: str, date_info: str=None):
#         """
#         获取服务器的prometheus监控数据库
#         """
#         start_timestamp, end_timestamp = CommonTimeUtils.get_timestamp_oneday(date_info)
#         prometheus_data = {}

#         # 获取服务器存活率
#         livability_expr = "up{" +f"i_host_name='{host_name}'" + "}"
#         result, livability_prom_data = self.k8s_prometheus.query_range_prometheus_data(livability_expr, start_timestamp, end_timestamp)
#         if not result:
#             logger.error(f"livability_expr:{livability_expr}, query range prometheus data failed")
#         else:
#             prometheus_data["livability_prom_data"] = livability_prom_data[0]["values"] if livability_prom_data else []
#             prometheus_data["livability_df_data"] = self.k8s_prometheus.handle_values_to_float(prometheus_data["livability_prom_data"])

#         # 获取cpu使用率
#         cpu_usage_expr = "PrometheusRecordECSHostCpuUsage{" +f"i_host_name='{host_name}'" + "}"
#         result, cpu_prom_data = self.k8s_prometheus.query_range_prometheus_data(cpu_usage_expr, start_timestamp, end_timestamp)
#         if not result:
#             logger.error(f"cpu_usage_expr:{cpu_usage_expr}, query range prometheus data failed")
#         else:
#             prometheus_data["cpu_prom_data"] = cpu_prom_data[0]["values"] if cpu_prom_data else []
#             prometheus_data["cpu_df_data"] = self.k8s_prometheus.handle_values_to_float(prometheus_data["cpu_prom_data"])

#         # 获取mem使用率
#         mem_usage_expr = "PrometheusRecordECSHostMemUsage{" +f"i_host_name='{host_name}'" + "}"
#         result, mem_prom_data = self.k8s_prometheus.query_range_prometheus_data(mem_usage_expr, start_timestamp, end_timestamp)
#         if not result:
#             logger.error(f"mem_usage_expr:{mem_usage_expr}, query range prometheus data failed")
#         else:
#             prometheus_data["mem_prom_data"] = mem_prom_data[0]["values"] if mem_prom_data else []
#             prometheus_data["mem_df_data"] = self.k8s_prometheus.handle_values_to_float(prometheus_data["mem_prom_data"])

#         # 计算cpu的规格
#         cpu_spec_expr = "count(node_cpu_seconds_total{" +f"i_host_name='{host_name}'" + ", job='node-exporter', mode='idle'})"
#         cpu_spec_data = self.k8s_prometheus.query_prometheus_data(cpu_spec_expr)
#         if not cpu_spec_data:
#             logger.error(f"cpu_spec_expr:{cpu_spec_expr}, query prometheus data failed or None")
#         else:
#             prometheus_data["cpu_spec_data"] = int(float(cpu_spec_data[0]["value"][1])) if cpu_spec_data else 0

#         # 计算内存的规格
#         mem_spec_expr = "node_memory_MemTotal_bytes{" +f"i_host_name='{host_name}'" + "}/1000/1000/1000"
#         mem_spec_data = self.k8s_prometheus.query_prometheus_data(mem_spec_expr)
#         if not mem_spec_data:
#             logger.error(f"mem_spec_data:{mem_spec_data}, query prometheus data failed or None")
#         else:
#             # 内存算出来有差额，采用公式： 取整(内存/cpu) * cpu
#             prometheus_data["mem_spec_data"] = int(int(float(mem_spec_data[0]["value"][1]))/prometheus_data["cpu_spec_data"])*prometheus_data["cpu_spec_data"] if mem_spec_data and cpu_spec_data else 0

#         logger.info(f"host_name:{host_name}, query prometheus data success")
#         return prometheus_data


#     def get_hosts_monitor_data(self, host_infos: list):
#         """
#         获取服务器cpu和内存的中位数
#         """
#         try:
#             # 获取服务器的cpu和内存的监控数据
#             for host_info in host_infos:
#                 prometheus_data = self.get_prometheus_data(host_info['name'], host_info['quality_date'])
#                 if not prometheus_data:
#                     logger.error(f"host_name:{host_info['name']}, get prometheus data failed")
#                     continue

#                 # cpu和内存的规格
#                 if "cpu_spec_data" in prometheus_data:
#                     host_info["cpu_spec"] = prometheus_data["cpu_spec_data"]

#                 if "mem_spec_data" in prometheus_data:
#                     host_info["mem_spec"] = prometheus_data["mem_spec_data"]

#                 # 服务器存活率
#                 if "livability_prom_data" in prometheus_data and prometheus_data["livability_prom_data"]:
#                     df = pd.DataFrame.from_dict({"livability_df_data": prometheus_data["livability_df_data"]}, orient="columns")
#                     host_info["livability"] = round(df["livability_df_data"].sum() / len(prometheus_data["livability_df_data"]) * 100)

#                 # 计算cpu的最大值\最小值\中位数
#                 if "cpu_prom_data" in prometheus_data and prometheus_data["cpu_prom_data"]:
#                     df = pd.DataFrame.from_dict({"cpu_df_data":prometheus_data["cpu_df_data"]}, orient="columns")
#                     host_info["cpu_usage_max"] = df["cpu_df_data"].max()
#                     host_info["cpu_usage_min"] = df["cpu_df_data"].min()
#                     host_info["cpu_usage_avg"] = df["cpu_df_data"].quantile(q=0.5)
#                     host_info["cpu_prom_data"] = prometheus_data["cpu_prom_data"]
#                     if host_info["cpu_usage_avg"] >= 30:
#                         host_info["cpu_level"] = "高(cpu平均使用率>=30%)"
#                     elif host_info["cpu_usage_avg"] >= 5:
#                         host_info["cpu_level"] = "中(cpu平均使用率>=5%)"
#                     else:
#                         host_info["cpu_level"] = "低(cpu平均使用率<5%)"

#                 # 计算mem的最大值\最小值\中位数
#                 if "mem_prom_data" in prometheus_data and prometheus_data["mem_prom_data"]:
#                     df = pd.DataFrame.from_dict({"mem_df_data": prometheus_data["mem_df_data"]}, orient="columns")
#                     host_info["mem_usage_max"] = df["mem_df_data"].max()
#                     host_info["mem_usage_min"] = df["mem_df_data"].min()
#                     host_info["mem_usage_avg"] = df["mem_df_data"].quantile(q=0.5)
#                     host_info["mem_prom_data"] = prometheus_data["mem_prom_data"]
#                     if host_info["mem_usage_avg"] >= 50:
#                         host_info["mem_level"] = "高(mem平均使用率>=50%)"
#                     elif host_info["mem_usage_avg"] >= 10:
#                         host_info["mem_level"] = "中(mem平均使用率>=10%)"
#                     else:
#                         host_info["mem_level"] = "低(mem平均使用率<10%)"

#         except Exception as e:
#             logger.error(f"get hosts quantile error=>{e}")
#             return False

#         logger.info(f"get hosts quantile success")
#         return True


#     def get_host_infos(self, db: Session, quality_date: str):
#         result = PromHostInfo.filter_by_field_search(db, 1, 2000,  {"all": None}, {"name": "asc"})
#         if not result:
#             logger.error(f"get hosts info failed, result is null")
#             return False

#         k8s_node_infos = self.k8s_node.get_k8s_node_info()
#         if not k8s_node_infos:
#             logger.error(f"get k8s node info failed, result is null")
#             return False

#         k8s_node_infos = {
#             k8s_node_info["node_ip"]: k8s_node_info
#             for k8s_node_info in k8s_node_infos
#         }

#         host_infos = []
#         for host_info in result["items"]:
#             host = {
#                 "quality_date": quality_date,
#                 "name": host_info["name"],
#                 "ip": host_info["ip"],
#                 "classes": "ECS服务器"
#             }
#             # 获取部门信息
#             host_group_info = PromHostGroup.get_object_info_by_light(db, host_info["host_group_uuid"])
#             host["department"] = host_group_info["department"]

#             # 是否是k8s的node节点
#             if host["ip"] in k8s_node_infos:
#                 host["classes"] = "k8s集群node节点"

#             host_infos.append(host)

#         return host_infos



#     def task_update_host_date_handle(self, db: Session, data: dict):
#         try:
#             # 获取更新的日期
#             if data and "quality_date" in data and data["quality_date"]:
#                 quality_date = data["quality_date"]
#             else:
#                 # 定时任务不会传日期参数， 每天凌晨2天同步前一天的数据
#                 quality_date = (date.today() - timedelta(days=1)).strftime("%Y-%m-%d")

#             # 获取主机信息列表
#             host_infos = self.get_host_infos(db, quality_date)
#             if not host_infos:
#                 logger.error(f"get hosts info failed")
#                 return False

#             # 获取主机的资源使用情况
#             result = self.get_hosts_monitor_data(host_infos)
#             if not result:
#                 logger.error(f"get hosts info failed")
#                 return False

#             # 删除日期的数据
#             QosHostDate.delete_by_date(db, quality_date)

#             # 写入数据库
#             QosHostDate.saves(db, host_infos)
#         except Exception as e:
#             logger.error(f"update host dates info error=>{e}")
#             return False

#         logger.info(f"update host dates info success")
#         return True





#     def task_update_host_date_start(self, db: Session, data: dict):
#         """
#         启动更新模板的异步任务
#         """
#         from app.service_quality.task import qos_update_host_date_async

#         # 判断任务是否已经启动，控制只有一个任务在跑。 如果任务可以多个同时进行，则无需这一步判断。
#         task_name = qos_update_host_date_async.name.split(".")[-1]
#         if CeleryTask.is_progress_on_name(db, task_name):
#             logger.error(f"task: {task_name} was stared")
#             return False

#         # 启动异步任务
#         task = qos_update_host_date_async.delay(data)
#         task_data = {
#             "name": task_name,
#             "tid": task.id,
#             "state": task.state
#         }

#         logger.info(f"add task: {task_name} success")
#         return task_data




#     def post_host_date_task(self, db: Session, data: dict):
#         """
#         启动同步服务器监控数据的异步任务
#         """
#         try:
#             result = self.task_update_host_date_start(db, data)
#             if not result:
#                 logger.error(f"start task:update_host_date failed, data:{data}")
#                 return False

#         except Exception as e:
#             message = f"start task:update_host_date error, data:{data}"
#             logger.exception(f"{message} => {e}")
#             return resp_500(message)

#         return result

