from  datetime import datetime, timedelta
from prometheus_api_client import PrometheusConnect
import requests
from kubernetes import client, config
import numpy as np
from scipy.stats import rankdata

class KubePrometheus:
    """
    返回的值，[时间戳, value]
    """
    def __init__(self) -> None:
        
        port = None

        config.load_kube_config('/etc/kubernetes/kubelet.conf')

        v1 = client.CoreV1Api()
        services = v1.list_service_for_all_namespaces()
        for service in services.items:
            if service.metadata.name == 'prometheus-k8s':
                port = service.spec.ports[0].node_port
                break
        if port==None:
            raise Exception("Prometheus service not found")
        
        self.prometheus_url = f'http://172.20.216.103:{port}'
        self.prom = PrometheusConnect(url=self.prometheus_url)

    def _get_metric_range_data(self, query, start_time=datetime.now() - timedelta(minutes=10), end_time=datetime.now(), latest_minutes=None,step='15s'):
        if latest_minutes:
            end_time = datetime.now()
            start_time = end_time - timedelta(minutes=latest_minutes)
        res = requests.get(
            f'{self.prometheus_url}/api/v1/query_range',
            params={
                "query": query,
                "start": datetime.timestamp(start_time),
                "end": datetime.timestamp(end_time),
                "step": step
            }
        )
        res.raise_for_status()
        return res.json()['data']['result']
    
    def _get_metric_data(self, query):
        res = requests.get(
            f'{self.prometheus_url}/api/v1/query',
            params={
                "query": query,
            }
        )
        res.raise_for_status()
        return res.json()['data']['result']
    def get_namespace_pods_names(self, namespace='current-semester',start_time=datetime.now() - timedelta(minutes=10), end_time=datetime.now(), step='15s', latest_minutes=None):
        query = "sum(container_memory_working_set_bytes{cluster=\"\", namespace=\""+namespace+"\", container!=\"\", image!=\"\"}) by (pod)",
        result =  self._get_metric_range_data(query=query, start_time=start_time, end_time=end_time, step=step, latest_minutes=latest_minutes)
        return [item['metric']['pod'] for item in result]
    
    def get_pod_memory(self, deploy_name, namespace='current-semester', start_time=datetime.now() - timedelta(minutes=10), end_time=datetime.now(), step='15s', latest_minutes=None):
        query = '''
        sum(
            container_memory_working_set_bytes{cluster="", namespace="'''+namespace+'''", container!="", image!=""}
        * on(namespace,pod)
            group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster="", namespace="'''+namespace+'''", workload="'''+deploy_name+'''", workload_type="deployment"}
        ) by (pod)'''        
        result =  self._get_metric_range_data(query=query, start_time=start_time, end_time=end_time, step=step, latest_minutes=latest_minutes)
        return np.array(result[0]['values'], dtype=np.float64)[:, 1]
    
    def get_pod_cpu(self, deploy_name, namespace='current-semester', start_time=datetime.now() - timedelta(minutes=10), end_time=datetime.now(), step='15s', latest_minutes=None):
        
        query = '''
        sum(
            node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{cluster="", namespace="'''+namespace+'''"}
        * on(namespace,pod)
            group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster="", namespace="'''+namespace+'''", workload="'''+deploy_name+'''", workload_type="deployment"}
        ) by (pod)'''
        result =  self._get_metric_range_data(query=query, start_time=start_time, end_time=end_time, step=step, latest_minutes=latest_minutes)
        return np.array(result[0]['values'], dtype=np.float64)[:, 1]
    def get_namespace_total_memory(self, namespace='current-semester', start_time=datetime.now() - timedelta(minutes=10), end_time=datetime.now(), step='15s', latest_minutes=None):
        query = "sum(container_memory_working_set_bytes{cluster=\"\", namespace=\""+namespace+"\", container!=\"\", image!=\"\"}) by (pod)",
        result =  self._get_metric_range_data(query=query, start_time=start_time, end_time=end_time, step=step, latest_minutes=latest_minutes)
        return {item['metric']['pod']: np.array(item['values'], dtype=np.float64)[:, 1] for item in result} 
  
    def get_namespace_total_cpu(self, namespace='current-semester', start_time=datetime.now() - timedelta(minutes=10), end_time=datetime.now(), step='15s', latest_minutes=None):
        query = "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{cluster=\"\", namespace=\""+namespace+"\"}) by (pod)",
        result =  self._get_metric_range_data(query=query, start_time=start_time, end_time=end_time, step=step, latest_minutes=latest_minutes)
        return {item['metric']['pod']: np.array(item['values'], dtype=np.float64)[:, 1] for item in result} 
  
    def get_namespace_total_receive_bandwidth(self, namespace='current-semester', start_time=datetime.now() - timedelta(minutes=10), end_time=datetime.now(), step='15s', latest_minutes=None):
        query = "sum(irate(container_network_receive_bytes_total{cluster=\"\",namespace=~\""+namespace+"\"}[3m:1m])) by (pod)",
        result =  self._get_metric_range_data(query=query, start_time=start_time, end_time=end_time, step=step, latest_minutes=latest_minutes)
        return {item['metric']['pod']: np.array(item['values'], dtype=np.float64)[:, 1] for item in result} 
  
    def get_namespace_total_transmit_bandwidth(self, namespace='current-semester', start_time=datetime.now() - timedelta(minutes=10), end_time=datetime.now(), step='15s', latest_minutes=None):
        query = "sum(irate(container_network_transmit_bytes_total{cluster=\"\",namespace=~\""+namespace+"\"}[3m:1m])) by (pod)",
        result =  self._get_metric_range_data(query=query, start_time=start_time, end_time=end_time, step=step, latest_minutes=latest_minutes)
        return {item['metric']['pod']: np.array(item['values'], dtype=np.float64)[:, 1] for item in result} 
  
    def get_namespace_total_rate_of_received_packets(self, namespace='current-semester', start_time=datetime.now() - timedelta(minutes=10), end_time=datetime.now(), step='15s', latest_minutes=None):
        query = "sum(rate(container_network_receive_packets_total{cluster=\"\",namespace=~\""+namespace+"\"}[3m:1m])) by (pod)",
        result =  self._get_metric_range_data(query=query, start_time=start_time, end_time=end_time, step=step, latest_minutes=latest_minutes)
        return {item['metric']['pod']: np.array(item['values'], dtype=np.float64)[:, 1] for item in result} 
  
    def get_namespace_total_rate_of_transmitted_packets(self, namespace='current-semester', start_time=datetime.now() - timedelta(minutes=10), end_time=datetime.now(), step='15s', latest_minutes=None):
        query = "sum(rate(container_network_transmit_packets_total{cluster=\"\",namespace=~\""+namespace+"\"}[3m:1m])) by (pod)",
        result =  self._get_metric_range_data(query=query, start_time=start_time, end_time=end_time, step=step, latest_minutes=latest_minutes)
        return {item['metric']['pod']: np.array(item['values'], dtype=np.float64)[:, 1] for item in result} 
  
    def get_namespace_total_IOPS(self, namespace='current-semester', start_time=datetime.now() - timedelta(minutes=10), end_time=datetime.now(), step='15s', latest_minutes=None):
        query = "ceil(sum by(pod) (rate(container_fs_reads_total{container!=\"\", cluster=\"\",namespace=~\""+namespace+"\"}[5m]) + rate(container_fs_writes_total{container!=\"\", cluster=\"\",namespace=~\""+namespace+"\"}[5m])))",
        result =  self._get_metric_range_data(query=query, start_time=start_time, end_time=end_time, step=step, latest_minutes=latest_minutes)
        return {item['metric']['pod']: np.array(item['values'], dtype=np.float64)[:, 1] for item in result} 
  
    def get_namespace_total_throughput(self, namespace='current-semester', start_time=datetime.now() - timedelta(minutes=10), end_time=datetime.now(), step='15s', latest_minutes=None):
        query = "sum by(pod) (rate(container_fs_reads_bytes_total{container!=\"\", cluster=\"\",namespace=~\""+namespace+"\"}[5m]) + rate(container_fs_writes_bytes_total{container!=\"\", cluster=\"\",namespace=~\""+namespace+"\"}[5m]))",
        result =  self._get_metric_range_data(query=query, start_time=start_time, end_time=end_time, step=step, latest_minutes=latest_minutes)
        return {item['metric']['pod']: np.array(item['values'], dtype=np.float64)[:, 1] for item in result} 
  
    def get_node_load1(self, node, start_time=datetime.now() - timedelta(minutes=10), end_time=datetime.now(), step='15s', latest_minutes=None):
        query = 'node_load1{job="node-exporter", instance="'+node+'"}'
        result =  self._get_metric_range_data(query=query, start_time=start_time, end_time=end_time, step=step, latest_minutes=latest_minutes)
        return np.array(result[0]['values'], dtype=np.float64)[:, 1]
  
    def get_node_load5(self, node, start_time=datetime.now() - timedelta(minutes=10), end_time=datetime.now(), step='15s', latest_minutes=None):
        query = 'node_load5{job="node-exporter", instance="'+node+'"}'
        result =  self._get_metric_range_data(query=query, start_time=start_time, end_time=end_time, step=step, latest_minutes=latest_minutes)
        return np.array(result[0]['values'], dtype=np.float64)[:, 1]
  
    def get_node_load15(self, node, start_time=datetime.now() - timedelta(minutes=10), end_time=datetime.now(), step='15s', latest_minutes=None):
        query = 'node_load15{job="node-exporter", instance="'+node+'"}'
        result =  self._get_metric_range_data(query=query, start_time=start_time, end_time=end_time, step=step, latest_minutes=latest_minutes)
        return np.array(result[0]['values'], dtype=np.float64)[:, 1]
  
    
    def get_node_cpu_seconds_total(self, node, start_time=datetime.now() - timedelta(minutes=10), end_time=datetime.now(), step='15s', latest_minutes=None):
        #   某段时间内消耗了多少cpu
        query = 'sum(node_cpu_seconds_total{ instance="'+node+'"})'
        result =  self._get_metric_range_data(query=query, start_time=start_time, end_time=end_time, step=step, latest_minutes=latest_minutes)
        return np.array(result[0]['values'], dtype=np.float64)[:, 1]
  
    def get_node_cpu_rate(self, node, start_time=datetime.now() - timedelta(minutes=10), end_time=datetime.now(), step='15s', latest_minutes=None):
        #   某段时间内消耗了多少cpu
        query = 'instance:node_cpu:rate:sum{instance="'+node+'"}'
        result =  self._get_metric_range_data(query=query, start_time=start_time, end_time=end_time, step=step, latest_minutes=latest_minutes)
        return np.array(result[0]['values'], dtype=np.float64)[:, 1]
    
    
    def get_node_network_rate(self, node, start_time=datetime.now() - timedelta(minutes=10), end_time=datetime.now(), step='15s', latest_minutes=None):
        query = 'instance:node_network_receive_bytes:rate:sum{instance="'+node+'"}'
        result =  self._get_metric_range_data(query=query, start_time=start_time, end_time=end_time, step=step, latest_minutes=latest_minutes)
        return np.array(result[0]['values'], dtype=np.float64)[:, 1]
    
    
    def get_node_memory_usage_persent(self, node, start_time=datetime.now() - timedelta(minutes=10), end_time=datetime.now(), step='15s', latest_minutes=None):
        query = '''
            100 -
                (
                avg(node_memory_MemAvailable_bytes{job="node-exporter", instance="'''+node+'''"})
                /
                avg(node_memory_MemTotal_bytes{job="node-exporter", instance="'''+node+'''"})
                * 100
                )'''
        result =  self._get_metric_range_data(query=query, start_time=start_time, end_time=end_time, step=step, latest_minutes=latest_minutes)
        return np.array(result[0]['values'], dtype=np.float64)[:, 1]
  
    def get_node_memory_usage(self, node, start_time=datetime.now() - timedelta(minutes=10), end_time=datetime.now(), step='15s', latest_minutes=None):
        query = '''
        (
        node_memory_MemTotal_bytes{job="node-exporter", instance="'''+node+'''"}
        -
        node_memory_MemFree_bytes{job="node-exporter", instance="'''+node+'''"}
        -
        node_memory_Buffers_bytes{job="node-exporter", instance="'''+node+'''"}
        -
        node_memory_Cached_bytes{job="node-exporter", instance="'''+node+'''"}
        )'''
        result =  self._get_metric_range_data(query=query, start_time=start_time, end_time=end_time, step=step, latest_minutes=latest_minutes)
        return np.array(result[0]['values'], dtype=np.float64)[:, 1]
  
    def get_node_memory_Buffers_bytes(self, node, start_time=datetime.now() - timedelta(minutes=10), end_time=datetime.now(), step='15s', latest_minutes=None): 
        query = 'node_memory_Buffers_bytes{job="node-exporter", instance="'+node+'"}'
        result =  self._get_metric_range_data(query=query, start_time=start_time, end_time=end_time, step=step, latest_minutes=latest_minutes)
        return np.array(result[0]['values'], dtype=np.float64)[:, 1]
  
    def get_node_memory_Cached_bytes(self, node, start_time=datetime.now() - timedelta(minutes=10), end_time=datetime.now(), step='15s', latest_minutes=None):  
        query = 'node_memory_Cached_bytes{job="node-exporter", instance="'+node+'"}'
        result =  self._get_metric_range_data(query=query, start_time=start_time, end_time=end_time, step=step, latest_minutes=latest_minutes)
        return np.array(result[0]['values'], dtype=np.float64)[:, 1]
  
    def get_node_memory_MemFree_bytes(self, node, start_time=datetime.now() - timedelta(minutes=10), end_time=datetime.now(), step='15s', latest_minutes=None):
        query = 'node_memory_MemFree_bytes{job="node-exporter", instance="'+node+'"}'
        result =  self._get_metric_range_data(query=query, start_time=start_time, end_time=end_time, step=step, latest_minutes=latest_minutes)
        return np.array(result[0]['values'], dtype=np.float64)[:, 1]
    
    def get_node_disk_io_rate(self, node, start_time=datetime.now() - timedelta(minutes=10), end_time=datetime.now(), step='15s', latest_minutes=None): 
        query = 'sum by(instance)(instance_device:node_disk_io_time_seconds:rate5m{instance="'+node+'"})'
        result =  self._get_metric_range_data(query=query, start_time=start_time, end_time=end_time, step=step, latest_minutes=latest_minutes)
        return np.array(result[0]['values'], dtype=np.float64)[:, 1]
    def get_node_filesystem_avail_bytes(self, node, start_time=datetime.now() - timedelta(minutes=10), end_time=datetime.now(), step='15s', latest_minutes=None):   
        # 获取硬盘剩余空间，node节点还剩400G左右，不用担心。master节点需要存放文件，后期可能需要维护
        query = '''
            sum(
                max by (device) (
                    node_filesystem_avail_bytes{job="node-exporter", instance="'''+node+'''", fstype!=""}
                )
            )
        '''
        result =  self._get_metric_range_data(query=query, start_time=start_time, end_time=end_time, step=step, latest_minutes=latest_minutes)
        return result
  
    def get_node_namespaced_pods(self, namespace, node):
        query = 'node_namespace_pod:kube_pod_info:{namespace="'+namespace+'", node="'+node+'"}'
        pods = self._get_metric_data(query=query)
        return [pod['metric']['pod'] for pod in pods]
    

        
    def get_all_nodes_names(self) :
        query = 'instance:node_cpu:ratio{job="node-exporter"}'
        nodes = self._get_metric_data(query=query)
        return [node['metric']['instance'] for node in nodes]

    def get_node_running_pods_count(self, node):
        query = 'sum(kubelet_running_pods{node="'+node+'" }) OR sum(kubelet_running_pod_count{node="'+node+'" })'
        node_info = self._get_metric_data(query=query)
        return node_info[0]['value'][1]

def test_all_function():
    kube_prometheus = KubePrometheus()
    nodes = kube_prometheus.get_all_nodes_names()
    namespace = 'current-semester'
    for node in nodes:
        # network_speed = kube_prometheus.get_current_network_speed(node, minutes=1)
        deploy_name = 'homework-52-student'
        start_time = datetime.now() - timedelta(minutes=1)
        end_time = datetime.now()
        pod_memory = kube_prometheus.get_pod_memory(deploy_name=deploy_name, latest_minutes=10)
        print(f"Memory for {deploy_name}: {pod_memory}\n\n")
        
        pod_names = kube_prometheus.get_namespace_pods_names(namespace=namespace)
        print(f"Pods for {namespace}: {pod_names}\n\n")
        
        pod_cpu = kube_prometheus.get_pod_cpu(deploy_name=deploy_name, latest_minutes=10)
        print(f"CPU for {deploy_name}: {pod_cpu}\n\n")
        
        namespace_all_memory = kube_prometheus.get_namespace_total_memory()
        print(f"Memory for {namespace}: {namespace_all_memory}\n\n")

        namespace_all_cpu = kube_prometheus.get_namespace_total_cpu()
        print(f"CPU for {namespace}: {namespace_all_cpu}\n\n")
        
        namespace_total_transmit_bandwidth = kube_prometheus.get_namespace_total_transmit_bandwidth()
        print(f"Transmit Bandwidth for {namespace}: {namespace_total_transmit_bandwidth}\n\n")
        
        namespace_total_receive_bandwidth = kube_prometheus.get_namespace_total_receive_bandwidth()
        print(f"Receive Bandwidth for {namespace}: {namespace_total_receive_bandwidth}\n\n")
        
        namespace_total_rate_of_received_packets = kube_prometheus.get_namespace_total_rate_of_received_packets()
        print(f"Rate of Received Packets for {namespace}: {namespace_total_rate_of_received_packets}\n\n")
        
        namespace_total_rate_of_transmitted_packets = kube_prometheus.get_namespace_total_rate_of_transmitted_packets()
        print(f"Rate of Transmitted Packets for {namespace}: {namespace_total_rate_of_transmitted_packets}\n\n")
        
        namespace_total_IOPS = kube_prometheus.get_namespace_total_IOPS()
        print(f"Total Transmit Packets for {namespace}: {namespace_total_IOPS}\n\n")
        
        namespace_total_throughput = kube_prometheus.get_namespace_total_throughput()
        print(f"Total Receive Packets for {namespace}: {namespace_total_throughput}\n\n")
        

        node_load1 = kube_prometheus.get_node_load1(node, latest_minutes=10)
        print(f"Load1 for {node}: {node_load1}\n\n")
        
        node_load5 = kube_prometheus.get_node_load5(node, latest_minutes=10)
        print(f"Load1 for {node}: {node_load5}\n\n")
        
        node_load15 = kube_prometheus.get_node_load15(node, latest_minutes=10)
        print(f"Load1 for {node}: {node_load15}\n\n")
        
        
        node_cpu_seconds_total = kube_prometheus.get_node_cpu_seconds_total(node, latest_minutes=10)
        print(f"CPU Seconds Total for {node}: {node_cpu_seconds_total}\n\n")
        
        node_cpu_rate = kube_prometheus.get_node_cpu_rate(node, latest_minutes=10)
        print(f"CPU rate for {node}: {node_cpu_rate}\n\n")
        
        node_network_rate = kube_prometheus.get_node_network_rate(node, latest_minutes=10)
        print(f"Network rate for {node}: {node_network_rate}\n\n")
        
        node_memory_usage_persent = kube_prometheus.get_node_memory_usage_persent(node, latest_minutes=10)
        print(f"Memory Usage Persent for {node}: {node_memory_usage_persent}\n\n")
        
        node_memory_usage = kube_prometheus.get_node_memory_usage(node, latest_minutes=10)
        print(f"Memory Usage for {node}: {node_memory_usage}\n\n")
        
        node_memory_Buffers_bytes = kube_prometheus.get_node_memory_Buffers_bytes(node, latest_minutes=10)
        print(f"Memory Buffers Bytes for {node}: {node_memory_Buffers_bytes}\n\n")
        
        node_memory_Cached_bytes = kube_prometheus.get_node_memory_Cached_bytes(node, latest_minutes=10)
        print(f"Memory Cached Bytes for {node}: {node_memory_Cached_bytes}\n\n")
        
        node_memory_MemFree_bytes = kube_prometheus.get_node_memory_MemFree_bytes(node, latest_minutes=10)
        print(f"Memory MemFree Bytes for {node}: {node_memory_MemFree_bytes}\n\n")
        
        get_node_disk_io_rate = kube_prometheus.get_node_disk_io_rate(node, latest_minutes=10)
        print(f"Disk IO Rate for {node}: {get_node_disk_io_rate}\n\n")
        
        node_filesystem_avail_bytes = kube_prometheus.get_node_filesystem_avail_bytes(node, latest_minutes=10)
        print(f"Filesystem Avail Bytes for {node}: {node_filesystem_avail_bytes}\n\n")
        
        
        node_namespaced_pods = kube_prometheus.get_node_namespaced_pods(namespace=namespace, node=node)
        print(f"Namespaced Pods on {node}: {node_namespaced_pods}\n\n")

        nodes = kube_prometheus.get_all_nodes_names()
        print(f"All Nodes: {nodes}\n\n")
        
        node_max_pods = kube_prometheus.get_node_running_pods_count(node)
        print(f"Node current Pods: {node_max_pods}\n\n")
        


class KubeScheduling():
    def __init__(self):
        self.score_weight = {
            'cpu':0.3,
            'memory':0.3,
            'network':0.2,
            'disk':0.1,
            'active_node':0.1,
        }
        self.minutes_span = 60 * 6 # 6小时
        self.stu_namespace = 'current-semester'
        self.kubePrometheus = KubePrometheus()
        self.best_node = None
        self.nodes = None
        self.worker_nodes_names =  None
        self.worker_nodes_info = {}
        self.stu_pods_names = None
        self.stu_pods_info = {}
        self.pods_memory = None
        self.pods_cpu = None
        self.pods_received_bandwidth = None
        self.pods_transmit_bandwidth = None
        self.pods_IOPS = None
        self.pods_throughput = None
        print("KubeScheduling init start...")
        self._init_cluster_data()
        print("Init cluster data success!")
        self._calculate_node_score()
        print("Init node score success!")
        self._calculate_pod_score()
        print("Init pod score success!")
        print("KubeScheduling init success!")

    def _init_cluster_data(self):
        self.nodes = self.kubePrometheus.get_all_nodes_names()
        self.worker_nodes_names =  [node for node in self.nodes if 'node' in node]
        self.worker_nodes_info = {}
        self.stu_pods_names = self.kubePrometheus.get_namespace_pods_names(namespace=self.stu_namespace)
        self.stu_pods_info = {}
        
        for worker in self.worker_nodes_names:
            self.worker_nodes_info[worker] = {
                'student_pods':self.kubePrometheus.get_node_namespaced_pods(namespace=self.stu_namespace,node=worker),
                'memory_usage_percent':self.kubePrometheus.get_node_memory_usage_persent(node=worker, latest_minutes=self.minutes_span),
                'memory_usage':self.kubePrometheus.get_node_memory_usage(node=worker, latest_minutes=self.minutes_span),
                'memory_free_bytes':self.kubePrometheus.get_node_memory_MemFree_bytes(node=worker, latest_minutes=self.minutes_span),
                'cpu_seconds_total':self.kubePrometheus.get_node_cpu_seconds_total(node=worker, latest_minutes=self.minutes_span),
                'cpu_rate':self.kubePrometheus.get_node_cpu_rate(node=worker, latest_minutes=self.minutes_span),
                'network_rate': self.kubePrometheus.get_node_network_rate(node=worker, latest_minutes=self.minutes_span),
                'disk_io_rate': self.kubePrometheus.get_node_disk_io_rate(node=worker, latest_minutes=self.minutes_span),
                'score':0,
                'cpu_util':0,
                'memory_util':0,
                'disk_util':0,
                'network_util':0,
                'active_pods_util':0,
            }
        self.pods_memory = self.kubePrometheus.get_namespace_total_memory(namespace=self.stu_namespace, latest_minutes=self.minutes_span)
        self.pods_cpu = self.kubePrometheus.get_namespace_total_cpu(namespace=self.stu_namespace, latest_minutes=self.minutes_span)
        self.pods_received_bandwidth = self.kubePrometheus.get_namespace_total_receive_bandwidth(namespace=self.stu_namespace, latest_minutes=self.minutes_span)
        self.pods_transmit_bandwidth = self.kubePrometheus.get_namespace_total_transmit_bandwidth(namespace=self.stu_namespace, latest_minutes=self.minutes_span)
        self.pods_IOPS = self.kubePrometheus.get_namespace_total_IOPS(namespace=self.stu_namespace, latest_minutes=self.minutes_span)
        self.pods_throughput = self.kubePrometheus.get_namespace_total_throughput(namespace=self.stu_namespace, latest_minutes=self.minutes_span)
    
    
    
    def _align_array_length(self, arr1, arr2):
        max_length = max(arr1.shape[0], arr2.shape[0])
        if arr1.shape[0] < max_length:
            arr1 = np.append(arr1, [arr1[-1]] * (max_length - arr1.shape[0]))
        else:
            arr2 = np.append(arr2, [arr2[-1]] * (max_length - arr2.shape[0]))
        return arr1, arr2
    
    def _divide_array(self, arr1, arr2):
        arr1, arr2 = self._align_array_length(arr1, arr2)
        return np.divide(arr1, arr2, out=np.zeros_like(arr1), where=arr2!=0)
    
    def _add_array(self, arr1, arr2, dtype=np.float64):
        arr1, arr2 = self._align_array_length(arr1, arr2)
        return np.add(arr1, arr2, dtype=dtype)

    def _calculate_node_score(self):
        nodes_total_cpu_rate = np.array([])
        nodes_total_memory_usage_percent = np.array([])
        nodes_total_disk_rate = np.array([])
        nodes_total_network_rate = np.array([])
        nodes_total_active_pods = 0
        
        score = 1.1 # 只要大于1就行
        # 统计总资源消耗
        for worker in self.worker_nodes_names:
            print(f"Calculate score for {worker}", end="...")
            if len(nodes_total_cpu_rate)!=0:
                nodes_total_cpu_rate = self._add_array(nodes_total_cpu_rate, np.array(self.worker_nodes_info[worker]['cpu_rate'], dtype=np.float64))
                nodes_total_memory_usage_percent = self._add_array(nodes_total_memory_usage_percent,np.array(self.worker_nodes_info[worker]['memory_usage_percent'], dtype=np.float64))
                nodes_total_disk_rate = self._add_array(nodes_total_disk_rate, np.array(self.worker_nodes_info[worker]['disk_io_rate'], dtype=np.float64))
                nodes_total_network_rate = self._add_array(nodes_total_network_rate, np.array(self.worker_nodes_info[worker]['network_rate'], dtype=np.float64))
                nodes_total_active_pods += len(self.worker_nodes_info[worker]['student_pods'])
            else:
                nodes_total_cpu_rate = np.array(self.worker_nodes_info[worker]['cpu_rate'], dtype=np.float64)
                nodes_total_memory_usage_percent = np.array(self.worker_nodes_info[worker]['memory_usage_percent'], dtype=np.float64)
                nodes_total_disk_rate = np.array(self.worker_nodes_info[worker]['disk_io_rate'], dtype=np.float64)
                nodes_total_network_rate = np.array(self.worker_nodes_info[worker]['network_rate'], dtype=np.float64)
                nodes_total_active_pods = len(self.worker_nodes_info[worker]['student_pods'])
            print("Done!")
            
        for worker in self.worker_nodes_names:
            node_info = self.worker_nodes_info[worker]
            print(f"Calculate score for {worker}", end="...")
            node_info['cpu_util'] = self._divide_array(node_info['cpu_rate'], nodes_total_cpu_rate)
            node_info['memory_util'] = self._divide_array(node_info['memory_usage_percent'], nodes_total_memory_usage_percent)
            node_info['disk_util'] = self._divide_array(node_info['disk_io_rate'], nodes_total_disk_rate)
            node_info['network_util'] = self._divide_array(node_info['network_rate'], nodes_total_network_rate)
            node_info['active_pods_util'] = np.divide(len(node_info['student_pods']), nodes_total_active_pods)
            node_info['score'] = self.score_weight['cpu']*node_info['cpu_util'] + self.score_weight['memory']*node_info['memory_util'] + self.score_weight['network']*node_info['network_util'] + self.score_weight['disk']*node_info['disk_util'] + self.score_weight['active_node']*node_info['active_pods_util']
            if node_info['score'][-1] < score:
                self.best_node = worker
            print("Done!")
    
    def _calculate_pod_score(self):
        data = {
            'variance_cpu' : [],
            'variance_IOPS' : [],
            'variance_throughput' : [],
            'variance_memory' : [],
            'variance_received_bandwidth' : [],
            'variance_transmit_bandwidth' : [],
            'average_cpu' : [],
            'average_IOPS' : [],
            'average_throughput' : [],
            'average_memory' : [],
            'average_received_bandwidth' : [],
            'average_transmit_bandwidth' : [],
            }
        rank_pods = []
        for worker in self.worker_nodes_names:
            pod_names = self.worker_nodes_info[worker]['student_pods']
            for pod in pod_names:
                data['variance_cpu'].append(np.var(self.pods_cpu[pod]))
                data['variance_memory'].append(np.var(self.pods_memory[pod]))
                data['variance_IOPS'].append(np.var(self.pods_IOPS[pod]))
                data['variance_throughput'].append(np.var(self.pods_throughput[pod]))
                data['variance_received_bandwidth'].append(np.var(self.pods_received_bandwidth[pod]))
                data['variance_transmit_bandwidth'].append(np.var(self.pods_transmit_bandwidth[pod]))
                data['average_cpu'].append(np.mean(self.pods_cpu[pod]))
                data['average_memory'].append(np.mean(self.pods_memory[pod]))
                data['average_received_bandwidth'].append(np.mean(self.pods_received_bandwidth[pod]))
                data['average_transmit_bandwidth'].append(np.mean(self.pods_transmit_bandwidth[pod]))
                data['average_IOPS'].append(np.mean(self.pods_IOPS[pod]))
                data['average_throughput'].append(np.mean(self.pods_throughput[pod]))
                active = True
                if np.var(self.pods_memory[pod]) == 0:
                    active = False
                rank_pods.append([pod,worker,active])
        ranked_data = {}
        for key, values in data.items():
            ranked_data[key] = rankdata(-np.array(values), method='dense')
        total_ranks = np.zeros(len(next(iter(ranked_data.values()))))

        for key, values in ranked_data.items():
            total_ranks += np.array(values)
            
        for i in range(len(rank_pods)):
            rank_pods[i].append(total_ranks[i])
        sorted_rank_pods = sorted(rank_pods, key=lambda x: x[-1])
        return sorted_rank_pods

    def get_best_node(self):
        self._init_cluster_data()
        self._calculate_node_score()
        return self.best_node  
    
    def delete_defunct_pods(self, minutes=120):
 
        try:
            self.minutes_span = minutes
            self._init_cluster_data()
            ranked_pods = self._calculate_pod_score()
            defunct_pods = [pod[0] for pod in ranked_pods if pod[2] == False]
            for pod in defunct_pods:
                items = pod.split('-')
                task_type = items[0]
                task_id = items[1]
                user_id = items[2]
                deployment_name = f"{task_type}-{task_id}-{user_id}"
                # 加载 Kubernetes 配置
                config.load_kube_config('/etc/kubernetes/admin.conf')

                # 创建 Kubernetes API 客户端
                v1 = client.CoreV1Api()
                v1.delete_namespaced_service(deployment_name, self.stu_namespace)
                
                api_instance = client.AppsV1Api()
                api_instance.delete_namespaced_deployment(deployment_name, self.stu_namespace)

               
        except Exception as e:
            print(e)
        
            


def test_scheduling():
    kube_scheduling = KubeScheduling()
    # kube_scheduling.delete_defunct_pods()
    print()

def get_data_7_days():
    kube_prometheus = KubePrometheus()
    data = kube_prometheus.get_namespace_total_cpu(namespace='current-semester', latest_minutes=60*24*7)
    print(len(data))
if __name__ == '__main__':
    # 测试所有函数是否正常
    # test_all_function()
    
    # 测试调度函数
    # test_scheduling()
    get_data_7_days()