from __base__ import set_django_path

set_django_path()

from collections import defaultdict
from typing import List, Dict
from lucommon.logger import lu_logger
from k8s.core_api import CoreAPI
import requests
import random

"""
根据节点资源来重新调度pod

"""

# ---------------------------------------------
env_master_map = {
    "stg11": "172.22.64.224",
    "stg18": "172.16.166.31",
    "stg4": "172.22.75.75",
    "brood-stg4-lufunds": "172.22.64.116",
    "qa6": "172.22.65.85",
    "brood-qa6-lufunds": "172.22.64.108",
    "brood-st27": "172.22.69.211",
    "brood-st29": "172.22.66.36",
    "base-luagt": "172.22.70.252",
    "brood-me196": "172.22.70.120",
}  # 运行的环境

namespaces = ["default"]  # 目标命名空间

no_re_scheduler_apps = [
    "wmc-kmq-app", "wmc-scheduler-app", "wmc-rcs-app", "wmc-lfs-app"
]  # 不进行重调度的app名称

evict_strategy = {
    "thresholds": {
        "memory_rate": 65.0  # 低于算低负荷
    },
    "targetThresholds": {
        "memory_rate": 75.0  # 超过算高负荷
    },
}  # 策略

node_labels = {
    "host_app_services": "true"
}  # 需要进行重调度的节点labels

evict_pod_num_per_node = 1  # 每个节点每次驱逐的最大pod数量


# ---------------------------------------------


def get_prometheus_data(query):
    url = "https://lujs.cn/lts/thanos-query/api/v1/query"

    res = requests.get(
        url=url,
        params={
            "query": query,
            "dedup": "true"
        }
    )

    if res.status_code != 200:
        lu_logger.error("请求thanos-query失败")
        return {}
    return res.json()


def get_env_node_memory_rate():
    node_map = {}

    data = get_prometheus_data(
        "(1 - (node_memory_MemAvailable_bytes{job=~\"node-exporter-brood.*\"} / (node_memory_MemTotal_bytes{job=~\"node-exporter-brood.*\"})))* 100")
    if data.get("status") != "success":
        lu_logger.error("thanos-query return status {}".format(data.get("status")))
        return {}
    for each in data.get("data", {}).get("result", []):
        metrics_address = each.get("metric", {}).get("instance", "")
        node_ip, metrics_port = metrics_address.split(":") if metrics_address else ("", "")
        memory_rate = float(each.get("value", [])[1]) if each.get("value", []) else 0.0
        node_map[node_ip] = memory_rate

    return node_map


def get_nodes_map(k8s_attr: dict, labels: dict = None):
    labels = {} if not labels else labels
    nodes_map: Dict[str, CoreAPI.client.V1Node] = {}
    with CoreAPI(**k8s_attr) as api:
        all_nodes = api.get_nodes()
        for node_ip, V1Node in all_nodes.items():
            flag = True
            node_labels = V1Node.metadata.labels
            for l, v in labels.items():
                if node_labels.get(l) != v:
                    flag = False
                    break
            if flag:
                nodes_map[node_ip] = V1Node

    return nodes_map


def get_node_pods(k8s_attr: dict, namespaces: List[str], node_ips: list) -> Dict[str, List[CoreAPI.client.V1Pod]]:
    node_pod_map = defaultdict(list)

    with CoreAPI(**k8s_attr) as api:
        for ns in namespaces:
            pod_map = api.get_pods(ns)
            for pod_name, V1Pod in pod_map.items():
                host_ip = V1Pod.status.host_ip
                if host_ip in node_ips:
                    node_pod_map[host_ip].append(V1Pod)

    return node_pod_map


def evict_pod(k8s_attr, V1Pod: CoreAPI.client.V1Pod):
    name = V1Pod.metadata.name
    ns = V1Pod.metadata.namespace
    lu_logger.info("evict pod {}/{}".format(ns, name))
    with CoreAPI(**k8s_attr) as api:
        r = api.delete_pod(ns, name)

    return r


def is_pod_can_scheduler(V1Pod: CoreAPI.client.V1Pod):
    if not no_re_scheduler_apps:
        return True
    pod_name = V1Pod.metadata.name
    pod_owners = V1Pod.metadata.owner_references
    for owner in pod_owners:
        if not getattr(owner, "controller"):
            return False
        if getattr(owner, "kind") in ["DaemonSet", "StatefulSet", "Node"]:
            return False

    for app_name in no_re_scheduler_apps:
        if app_name in pod_name:
            return False

    return True


def run_re_scheduler(k8s_attr, node_memory_rate_map) -> List[str]:
    """
    1、找出overutilized、underutilized 的节点，确保两种类型的节点都存在，才可进行重调度
    2、确定overutilized节点上需要驱逐的pod
    3、执行驱逐操作
    """

    node_map = get_nodes_map(k8s_attr, node_labels)
    overutilized_nodes, underutilized_nodes = [], []
    for node_ip, V1Node in node_map.items():
        memory_rate = node_memory_rate_map.get(node_ip)
        if memory_rate is None:
            lu_logger.error(
                "node {} 未获取到节点内存信息,请确认prometheus中配置该节点监控且查询链路正常".format(node_ip))
            continue
        if memory_rate > evict_strategy["targetThresholds"]["memory_rate"]:
            overutilized_nodes.append(node_ip)
            lu_logger.info("overutilized_nodes: {}-{}".format(node_ip, memory_rate))
        elif memory_rate < evict_strategy["thresholds"]["memory_rate"]:
            underutilized_nodes.append(node_ip)
    if not all([overutilized_nodes, underutilized_nodes]):
        lu_logger.info("不符合重调度策略,调度停止")
        return []

    overutilized_nodes.sort(key=lambda x: node_memory_rate_map[x], reverse=True)

    lu_logger.info("低负荷节点:{}".format(",".join(underutilized_nodes)))
    lu_logger.info("高负荷节点:{}".format(",".join(overutilized_nodes)))

    # 从负荷最高的节点开始驱逐
    evicted_pods = []
    overutilized_node_pod_map = get_node_pods(k8s_attr, namespaces, overutilized_nodes)
    overutilized_node_count, underutilized_node_count = len(overutilized_nodes), len(underutilized_nodes)
    done_count = 0
    for overutilized_node_ip, pod_list in overutilized_node_pod_map.items():
        random.shuffle(pod_list)  # 随机pod
        evict_num = 0
        for V1Pod in pod_list:
            if is_pod_can_scheduler(V1Pod):
                evict_pod(k8s_attr, V1Pod)
                evicted_pods.append(V1Pod.metadata.name)
                evict_num += 1
            if evict_num >= evict_pod_num_per_node:
                lu_logger.info("node {} 已驱逐pod数量到达限制数量，驱逐停止".format(overutilized_node_ip))
                break
        done_count += 1
        if done_count >= underutilized_node_count:
            lu_logger.info(
                "已重新调度高负荷节点{}个,达到低负荷节点数量{},调度停止".format(done_count, underutilized_node_count)
            )
            break
    return evicted_pods


def run():
    node_memory_rate_map = get_env_node_memory_rate()
    for env_name, master_ip in env_master_map.items():
        k8s_attr = {
            "ca": "pem/env_pem/k8s-ca.pem",
            "cert": "pem/env_pem/admin.pem",
            "key": "pem/env_pem/admin-key.pem",
            "apiserver": "https://{}:6443".format(master_ip),
        }
        evict_pods = run_re_scheduler(k8s_attr, node_memory_rate_map)
        lu_logger.info("env: {},re scheduler pod: {}".format(env_name, ",".join(evict_pods)))
        # todo 多线程


if __name__ == '__main__':
    run()
