from datetime import datetime, timezone
from pprint import pprint
from kubernetes import client, config
import subprocess
from math import floor
import logging

# 脚本功能：通过动态设置 pod-deletion-cost 注解， 优先缩容运行时间长 & 运行在nodepool节点上的pod


# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

class PodScaler:
    def __init__(self, context_name):
        self.context_name = context_name
        config.load_kube_config(context=self.context_name)
        self.api_instance = client.CoreV1Api()
        self.v1 = client.AppsV1Api()
        
    def calculate_time_passed(self, start_time_iso):
        """计算 Pod 已运行时间（小时）"""
        try:
            if not start_time_iso:
                return 0
                
            start_time = datetime.fromisoformat(start_time_iso).replace(tzinfo=timezone.utc)
            current_time = datetime.now(timezone.utc)
            return floor((current_time - start_time).total_seconds() / 3600)
        except ValueError as e:
            logger.error(f"Error parsing start time: {e}")
            return 0

    def get_node_priority(self, node_name):
        """获取节点优先级，标记为scale-down-priority=high的节点优先缩容"""
        try:
            node = self.api_instance.read_node(node_name)
            if node.metadata.labels.get("scale-down-priority") == "high":
                return -100  # 高优先级缩容
            return 0
        except client.exceptions.ApiException as e:
            logger.error(f"Error fetching node {node_name}: {e}")
            return 0

    def list_running_pods(self, namespace, label_selectors):
        """获取运行中的 Pod 信息"""
        running_pods = []
        
        for selector in label_selectors:
            try:
                pods = self.api_instance.list_namespaced_pod(
                    namespace,
                    label_selector=f"app={selector['app']}"
                )
                
                for pod in pods.items:
                    if pod.metadata.deletion_timestamp or pod.status.phase != "Running":
                        continue
                        
                    running_pods.append({
                        "name": pod.metadata.name,
                        "namespace": namespace,
                        "current_cost": int(pod.metadata.annotations.get(
                            "controller.kubernetes.io/pod-deletion-cost", "0"
                        )),
                        "start_time": pod.status.start_time.isoformat() if pod.status.start_time else None,
                        "node_name": pod.spec.node_name,
                        "threshold_hours": selector["threshold_time"],
                        "app": selector["app"]
                    })
                    
            except client.exceptions.ApiException as e:
                logger.error(f"Error fetching pods for {selector['app']}: {e}")
                
        return running_pods

    def calculate_pod_costs(self, pods):
        """计算每个 Pod 的新 deletion-cost"""
        for pod in pods:
            # 计算pod存活的小时数
            time_passed = self.calculate_time_passed(pod["start_time"])
            
            node_priority = self.get_node_priority(pod["node_name"])
            
            # 基础 cost 计算
            new_cost = 100  # 默认值
            
            # 时间因素 - 运行时间越长，cost 越低（越容易被删除）
            if time_passed > pod["threshold_hours"]:
                new_cost -= min(time_passed, 50)  # 最多减50
            
            # 节点因素 - 标记为优先缩容的节点上的 Pod cost 更低
            new_cost += node_priority
            
            # 确保 cost 在合理范围内
            pod["new_cost"] = max(-100, min(100, new_cost))
            
        return pods

    def update_pod_annotations(self, pods):
        """批量更新 Pod 注解"""
        for pod in pods:
            if pod["new_cost"] == pod["current_cost"]:
                continue
                
            cmd = (
                f"kubectl annotate pod {pod['name']} "
                f"-n {pod['namespace']} "
                f"controller.kubernetes.io/pod-deletion-cost={pod['new_cost']} "
                f"--overwrite --context {self.context_name}"
            )
            
            try:
                subprocess.run(
                    cmd, shell=True, check=True, 
                    capture_output=True, text=True
                )
                logger.info(
                    f"Updated {pod['name']} (node:{pod['node_name']}) "
                    f"cost: {pod['current_cost']} -> {pod['new_cost']}"
                )
            except subprocess.CalledProcessError as e:
                logger.error(
                    f"Failed to update {pod['name']}: {e.stderr}"
                )

if __name__ == '__main__':
    # 配置
    CONTEXT = "arn:aws:eks:eu-central-1:165263321613:cluster/doli-k8s"
    NAMESPACE = "meetcha-prod"
    
    # 定义应用和它们的阈值（小时）
    APPS = [
        {"app": "meetcha-prod-java-gateway", "threshold_time": 5},
        {"app": "meetcha-prod-java-chat", "threshold_time": 5},
        {"app": "meetcha-prod-java-user", "threshold_time": 5},
        {"app": "meetcha-prod-java-game", "threshold_time": 5},
        {"app": "meetcha-prod-java-union", "threshold_time": 10},
        {"app": "meetcha-prod-java-op", "threshold_time": 10},
        {"app": "meetcha-prod-java-athena", "threshold_time": 10},
        {"app": "meetcha-prod-java-recharge", "threshold_time": 10},
        {"app": "meetcha-prod-java-job", "threshold_time": 10}
    ]
    
    logger.info("Starting pod scaling priority adjustment")
    
    # 执行
    scaler = PodScaler(CONTEXT)
    pods = scaler.list_running_pods(NAMESPACE, APPS)
    pods_with_costs = scaler.calculate_pod_costs(pods)
    scaler.update_pod_annotations(pods_with_costs)
    
    logger.info("Completed pod scaling priority adjustment")