# -*- coding: UTF-8 -*-  
import env_tina as environment  
import config  
import argparse  
import torch  
import numpy as np  
import copy  
import random  
from object import delay  

# 定义全局常量  
REJ_CONT_LIMIT = -2  # 容器数量限制拒绝标记  
REJ_STORE_LIMIT = -3  # 存储空间限制拒绝标记  
MAX_SUM_SIZE = 1424  # 最大总大小  
MIN_SUM_SIZE = 10  # 最小总大小  

# 全局变量  
global_requested_image_id = 0  
global_task_cpu = 0  
data = {}  # 存储实验数据的字典  

# 设备选择  
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")  


class KubeScheduler:  
    def __init__(self, env, args):  
        self.env = env  
        self.args = args  
        self.task_list = self.env.allexist_task_list  

    def kube_agent(self, task, node_list, lb_ratio=None):  
        """  
        基于Kubernetes调度逻辑的调度策略，选择已有镜像分数最高的节点  
        """  
        max_score, selected = -1, -1  
        requested_image_id = task.requested_image_id  
        self.visit_sequence = np.random.permutation(len(node_list))  
        task_cpu = task.task_cpu  
        task_mem = task.task_mem  

        for i in self.visit_sequence:  
            node = node_list[i]  
            if not self.get_node_conta_free(node, task_cpu, task_mem):  
                if selected < 0:  
                    selected = REJ_CONT_LIMIT  
                continue  
            total_size = sum([int(self.env.Layer[self.env.layer_dict[x][-2]].size) for x in self.env.Image[requested_image_id].layer_list])  

            if total_size > self.get_node_free_disk(node):  
                if selected < 0:  
                    selected = REJ_STORE_LIMIT  
                continue  

            node_images = node.image_01_list  

            score = 0  
            # 计算分数  
            for index, status in enumerate(node_images):  
                # print('status: {}'.format(status))  
                # print('self.env.Image[index].size: {}'.format(self.env.image_list[index].size))  
                score += status * int(self.env.Image[index].size)  
            # score = sum([status * self.env.image_list[index].size for index, status in enumerate(node_images)])  

            # 对分数的额外计算方法  
            if lb_ratio is not None:  
                score_locality = self.scaled_score_locality(score)  
                score_lb = (config.node_max_container_number - node.container_number) / config.node_max_container_number * 10  
                score = lb_ratio * score_lb + (1 - lb_ratio) * score_locality  

            if score > max_score:  
                max_score = score  
                selected = node.id  

        if selected < 0:  
            selected = config.EDGE_NODE_NUM  

        return selected  

    def get_node_conta_free(self, node, task_cpu, task_mem):  
        """  
        检查节点容器资源是否满足任务需求  
        """  
        if config.node_max_container_number < node.container_number:  
            return False  
        elif task_cpu > node.available_cpu or task_mem > node.available_mem:  
            return False  
        return True  

    def get_node_free_disk(self, node):  
        """  
        获取节点可用磁盘空间  
        """  
        return node.available_disk  

    def scaled_score_locality(self, score):  
        """  
        将分数缩放到0-10范围  

        参数:  
        - score: 原始分数  

        返回:  
        - 缩放后的分数  
        """  
        if score > MAX_SUM_SIZE:  
            score = 10  
        elif score < MIN_SUM_SIZE:  
            score = 0  
        else:  
            score /= MAX_SUM_SIZE - MIN_SUM_SIZE  
        return score  

    def run_baseline(self, policy="kube_agent"):  
        task_num = self.args.task  
        total_steps = 0  
        total_epochs = 0  

        rewards_list = []  
        migratio_list = []  
        total_download_time_list = []  
        trans_list = []  
        waiting_time_list = []  
        total_download_size_list = []  
        computation_delay_list = []  
        arrange_delay_list = []  
        scaling_delay_list = []  
        backhaul_delay_list = []  

        while total_epochs < self.args.episodes:  
            print(f"第 {total_epochs+1} 轮实验")  

            self.env.seed(self.args.seed)  
            np.random.seed(self.args.seed)  
            torch.manual_seed(self.args.seed)  

            obs, _ = self.env.reset(self.args.task)  

            self.task_list = self.env.task_queue  
            # print(self.task_list)  
            print(obs["users"])  
            print(obs["tasks"])  

            node_list_id = self.select_available_nodes_ids(self.env, obs)  

            ep_reward = 0  
            ep_migration = 0  
            ep_total_download_time = 0  
            ep_trans = 0  
            ep_waiting_time = 0  
            ep_total_download_size = 0  
            ep_computation_delay = 0  
            ep_arrange_delay = 0  
            ep_scaling_delay = 0  
            ep_backhaul_delay = 0  

            done = False  
            cloud_count = 0  
            while not done:  
                # 从任务队列获取下一个任务  
                tem_task = self.task_list.get()  
                self.task_list.put(tem_task)  
                task_id = tem_task[1]  
                task = self.env.Task[task_id]  

                node_list = []  
                node_id = []  
                for index, i in enumerate(node_list_id):  
                    if i == 1 and index != config.EDGE_NODE_NUM:  
                        node_id.append(index)  
                        node_list.append(self.env.Edge[index])  

                if node_list == []:  
                    action = config.EDGE_NODE_NUM  
                    cloud_count += 1  
                else:  
                    action = self.kube_agent(task, node_list)  
                
                # print("action", task_id, action,"\n")  
                obs_, reward, info, done, details = self.env.step(action)  

                obs = obs_  
                reward = -reward  

                # 更新候选节点列表  
                node_list_id = self.select_available_nodes_ids(self.env, obs)  

                ep_reward += reward * 1000  
                ep_migration += details[0] * 1000  
                ep_total_download_time += details[1] * 1000  
                ep_trans += details[2]* 1000  
                ep_waiting_time += details[3] * 1000  
                ep_total_download_size += details[4]* 1000  
                ep_computation_delay += details[5] * 1000  
                ep_arrange_delay += details[6] * 1000  
                ep_scaling_delay += details[7] * 1000  
                ep_backhaul_delay += details[8] * 1000  

            total_epochs += 1  

            tmp_r = copy.copy(ep_reward) / task_num  
            tmp_migration = copy.copy(ep_migration) / task_num  
            tmp_total_download_time = copy.copy(ep_total_download_time) / task_num  
            tmp_trans = copy.copy(ep_trans) / task_num  
            tmp_waiting_time = copy.copy(ep_waiting_time) / task_num  
            tmp_total_download_size = copy.copy(ep_total_download_size) / task_num  
            tmp_computation_delay = copy.copy(ep_computation_delay) / task_num  
            tmp_arrange_delay = copy.copy(ep_arrange_delay) / task_num  
            tmp_scaling_delay = copy.copy(ep_scaling_delay) / task_num  
            tmp_backhaul_delay = copy.copy(ep_backhaul_delay) / task_num  

            rewards_list.append(tmp_r)  
            migratio_list.append(tmp_migration)  
            total_download_time_list.append(tmp_total_download_time)  
            trans_list.append(tmp_trans)  
            waiting_time_list.append(tmp_waiting_time)  
            total_download_size_list.append(tmp_total_download_size)  
            computation_delay_list.append(tmp_computation_delay)  
            arrange_delay_list.append(tmp_arrange_delay)  
            scaling_delay_list.append(tmp_scaling_delay)  
            backhaul_delay_list.append(tmp_backhaul_delay)  

            # 打印结果  
            print("\n实验结果:")  
            print(f"平均奖励: {tmp_r}")  
            print(f"平均迁移延迟: {tmp_migration} ms")  
            print(f"平均下载时间: {tmp_total_download_time} ms")  
            print(f"平均传输延迟: {tmp_trans} ms")  
            print(f"平均等待时间: {tmp_waiting_time} ms")  
            print(f"平均下载大小: {tmp_total_download_size} MB")  
            print(f"平均计算延迟: {tmp_computation_delay} ms")  
            print(f"平均调度延迟: {tmp_arrange_delay} ms")  
            print(f"平均扩展延迟: {tmp_scaling_delay} ms")  
            print(f"平均回程延迟: {tmp_backhaul_delay} ms")  
            print(f"云次数：{cloud_count}")  

            data.update(  
                {  
                    ("kube_agent", self.args.task, config.EDGE_NODE_NUM): [  
                        tmp_r,  
                        tmp_computation_delay,  
                        tmp_arrange_delay,  
                        tmp_scaling_delay,  
                        tmp_total_download_size,  
                        tmp_backhaul_delay,  
                    ]  
                }  
            )  

    def select_available_nodes_ids(self, env, obs):  
        """  
        获取可用节点的ID列表  

        参数:  
        - env: 环境实例  
        - obs: 观测值  

        返回:  
        - 可用节点ID的列表（1表示可用，0表示不可用）  
        """  
        # 准备用户位置信息  
        each_use_loc = [  
            [user[0], user[1], user[2]] for user in obs["users"]  # [id, x, y]  
        ]  

        # 准备边缘节点位置信息  
        each_edge_loc = [  
            [i, node[7], node[8]]  # [node_id, x, y]  
            for i, node in enumerate(obs["nodes"])  
        ]  

        # 默认所有节点不可用  
        node_list_id = np.zeros(config.EDGE_NODE_NUM + 1, dtype=int)  

        # 如果没有待处理任务列表，直接返回  
        if not env.allexist_task_list:  
            return node_list_id  
        
        # 获取当前任务  
        current_task_idx = [  
            item.usr_has_tsk for item in env.User if item.uid == env.next_uid_idx  
        ][0][0]  
        current_task = env.Task[current_task_idx]  

        task_user_map = env.get_task_user_mapping()  
        # 获取当前用户ID  
        matching_users = task_user_map.get(current_task.task_id, -1)  

        # print("select_task_id:", current_task.task_id, matching_users)  
        # print("node_id", current_task.assigned_node)  

        if not matching_users:  
            uid = -1  
            print(  
                "No user found for task_id:",  
                current_task.task_id,  
                current_task.assigned_node  
            )  
            return node_list_id  

        else:  
            uid = matching_users  

        # 使用env的pool方法筛选位置  
        tem_next_can = env.pool(uid, each_use_loc, each_edge_loc)  

        # 标记可用节点  
        if current_task.reschedule_count <= 3:  
            for node_index in tem_next_can:  
                node = env.Edge[node_index]  

                # 资源约束条件  
                is_resource_sufficient = (  
                    node.container_number < config.node_max_container_number  
                    and node.available_mem >= current_task.task_mem  
                    and node.available_cpu >= current_task.task_cpu  
                )  

                if is_resource_sufficient:  
                    node_list_id[node_index] = 1  
        else:  
            # 如果重调度次数 > 3，所有边缘节点都标记为不可用  
            node_list_id = [0] * len(node_list_id)  
            node_list_id[config.EDGE_NODE_NUM] = 1   

        # 如果没有可用节点，默认云节点可用  
        if np.sum(node_list_id) == 0:  
            node_list_id[config.EDGE_NODE_NUM] = 1  

        return node_list_id  

# 主脚本入口  
if __name__ == "__main__":  
    parser = argparse.ArgumentParser(description="Kubernetes-like Scheduler")  
    parser.add_argument(  
        "--task", type=int, default=config.TASK_NUM, metavar="T", help="task number"  
    )  
    parser.add_argument("--seed", type=int, metavar="S", default=10)  
    parser.add_argument("--episodes", type=int, default=1, help="最大训练轮数")  
    args = parser.parse_args()  

    env = environment.Env()  
    kube_scheduler = KubeScheduler(env, args)  

    # 运行基准测试  
    kube_scheduler.run_baseline()  