# -*- coding: UTF-8 -*-  
import env_tina as environment  
import config  
import argparse  
import torch  
import numpy as np  
import copy  
import random  
from object import delay  

# 全局变量  
data = {}  # 存储实验数据的字典  

# 设备选择  
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")  


class Random:  
    def __init__(self, env, args):  
        self.env = env  
        self.args = args  
        self.task_list = self.env.allexist_task_list  

    def random_select(self, node_id_list):  
        """  
        在可用节点中随机选择一个节点  
        
        参数:  
        - node_id_list: 可用节点ID列表（1表示可用，0表示不可用）  
        
        返回:  
        - 随机选择的节点ID  
        """  
        # 找出所有可用节点的索引  
        available_nodes = [i for i, available in enumerate(node_id_list) if available == 1]  
        
        # 如果没有可用节点，返回云节点  
        if not available_nodes:  
            return config.EDGE_NODE_NUM  
            
        # 使用random库确保每次随机性  
        random.seed(random.randint(1, 10000))  # 每次调用使用不同的种子  
        return random.choice(available_nodes)  

    def select_available_nodes_ids(self, env, obs):  
        """  
        获取可用节点的ID列表  

        参数:  
        - env: 环境实例  
        - obs: 观测值  

        返回:  
        - 可用节点ID的列表（1表示可用，0表示不可用）  
        """  
        # 准备用户位置信息  
        each_use_loc = [  
            [user[0], user[1], user[2]] for user in obs["users"]  # [id, x, y]  
        ]  

        # 准备边缘节点位置信息  
        each_edge_loc = [  
            [i, node[7], node[8]]  # [node_id, x, y]  
            for i, node in enumerate(obs["nodes"])  
        ]  

        # 默认所有节点不可用  
        node_list_id = np.zeros(config.EDGE_NODE_NUM + 1, dtype=int)  

        # 如果没有待处理任务列表，直接返回  
        if not env.allexist_task_list:  
            return node_list_id  
        
        # 获取当前任务  
        current_task_idx = [  
            item.usr_has_tsk for item in env.User if item.uid == env.next_uid_idx  
        ][0][0]  
        current_task = env.Task[current_task_idx]  

        task_user_map = env.get_task_user_mapping()  
        # 获取当前用户ID  
        matching_users = task_user_map.get(current_task.task_id, -1)  

        if not matching_users:  
            uid = -1  
            print(  
                "No user found for task_id:",  
                current_task.task_id,  
                current_task.assigned_node  
            )  
            return node_list_id  

        else:  
            uid = matching_users  

        # 使用env的pool方法筛选位置  
        tem_next_can = env.pool(uid, each_use_loc, each_edge_loc)  

        # 标记可用节点  
        if current_task.reschedule_count <= 3:  
            for node_index in tem_next_can:  
                node = env.Edge[node_index]  

                # 资源约束条件  
                is_resource_sufficient = (  
                    node.container_number < config.node_max_container_number  
                    and node.available_mem >= current_task.task_mem  
                    and node.available_cpu >= current_task.task_cpu  
                )  

                if is_resource_sufficient:  
                    node_list_id[node_index] = 1  
        else:  
            # 如果重调度次数 > 3，所有边缘节点都标记为不可用  
            node_list_id = np.zeros(config.EDGE_NODE_NUM + 1, dtype=int)  
            node_list_id[config.EDGE_NODE_NUM] = 1   

        # 如果没有可用节点，默认云节点可用  
        if np.sum(node_list_id) == 0:  
            node_list_id[config.EDGE_NODE_NUM] = 1  

        return node_list_id  

    def run_baseline(self):  
        """  
        运行随机调度基准测试  
        """  
        task_num = self.args.task  
        total_epochs = 0  

        rewards_list = []  
        migratio_list = []  
        total_download_time_list = []  
        trans_list = []  
        waiting_time_list = []  
        total_download_size_list = []  
        computation_delay_list = []  
        arrange_delay_list = []  
        scaling_delay_list = []  
        backhaul_delay_list = []  

        while total_epochs < self.args.episodes:  
            print(f"第 {total_epochs+1} 轮实验")  

            # 设置随机种子但确保每轮的随机性  
            seed = self.args.seed + total_epochs  
            self.env.seed(seed)  
            np.random.seed(seed)  
            torch.manual_seed(seed)  
            random.seed(seed)  

            obs, _ = self.env.reset(self.args.task)  

            self.task_list = self.env.task_queue  
            print("用户数量:", len(obs["users"]))  
            print("任务数量:", task_num)  

            node_list_id = self.select_available_nodes_ids(self.env, obs)  

            ep_reward = 0  
            ep_migration = 0  
            ep_total_download_time = 0  
            ep_trans = 0  
            ep_waiting_time = 0  
            ep_total_download_size = 0  
            ep_computation_delay = 0  
            ep_arrange_delay = 0  
            ep_scaling_delay = 0  
            ep_backhaul_delay = 0  

            done = False  
            cloud_count = 0  
            step_count = 0  
            
            print("\n" + "="*80)  
            print("| {:<4} | {:<10} | {:<10} | {:<14} | {:<14} | {:<14} |".format(  
                "步骤", "动作", "奖励", "累计奖励", "云调用", "可用节点"  
            ))  
            print("="*80)  
            
            while not done:  
                # 从任务队列获取下一个任务  
                tem_task = self.task_list.get()  
                self.task_list.put(tem_task)  
                task_id = tem_task[1]  
                
                # 随机选择可用节点  
                action = self.random_select(node_list_id)  
                
                if action == config.EDGE_NODE_NUM:  
                    cloud_count += 1  
                
                # 执行动作  
                obs_, reward, info, done, details = self.env.step(action)  
                
                # 记录可用节点数  
                available_count = sum(node_list_id)  
                
                # 更新观测值和奖励（负号表示越小越好）  
                obs = obs_  
                reward = -reward  
                
                # 更新候选节点列表  
                node_list_id = self.select_available_nodes_ids(self.env, obs)  
                
                # 累积指标  
                ep_reward += reward * 1000  
                ep_migration += details[0] * 1000  
                ep_total_download_time += details[1] * 1000  
                ep_trans += details[2] * 1000  
                ep_waiting_time += details[3] * 1000  
                ep_total_download_size += details[4]  
                ep_computation_delay += details[5] * 1000  
                ep_arrange_delay += details[6] * 1000  
                ep_scaling_delay += details[7] * 1000  
                ep_backhaul_delay += details[8] * 1000  
                
                step_count += 1  
                
                # # 每10步或最后一步打印当前状态  
                # if step_count % 10 == 0 or done:  
                #     print("| {:<4} | {:<10} | {:<10.2f} | {:<14.2f} | {:<14} | {:<14} |".format(  
                #         step_count,  
                #         action,  
                #         reward * 1000,  
                #         ep_reward,  
                #         cloud_count,  
                #         available_count  
                #     ))  
                
                # # 每100步打印详细指标  
                # if step_count % 100 == 0:  
                #     print("\n当前详细指标 (步骤 {}):".format(step_count))  
                #     print("  迁移延迟: {:.2f} ms".format(ep_migration/step_count))  
                #     print("  下载时间: {:.2f} ms".format(ep_total_download_time/step_count))  
                #     print("  传输延迟: {:.2f} ms".format(ep_trans/step_count))  
                #     print("  等待时间: {:.2f} ms".format(ep_waiting_time/step_count))  
                #     print("  云调用率: {:.2f}%".format(cloud_count/step_count*100))  
                #     print("")  

            # 计算平均指标  
            total_epochs += 1  
            tmp_r = ep_reward / task_num  
            tmp_migration = ep_migration / task_num  
            tmp_total_download_time = ep_total_download_time / task_num  
            tmp_trans = ep_trans / task_num  
            tmp_waiting_time = ep_waiting_time / task_num  
            tmp_total_download_size = ep_total_download_size / task_num  
            tmp_computation_delay = ep_computation_delay / task_num  
            tmp_arrange_delay = ep_arrange_delay / task_num  
            tmp_scaling_delay = ep_scaling_delay / task_num  
            tmp_backhaul_delay = ep_backhaul_delay / task_num  

            # 记录指标  
            rewards_list.append(tmp_r)  
            migratio_list.append(tmp_migration)  
            total_download_time_list.append(tmp_total_download_time)  
            trans_list.append(tmp_trans)  
            waiting_time_list.append(tmp_waiting_time)  
            total_download_size_list.append(tmp_total_download_size)  
            computation_delay_list.append(tmp_computation_delay)  
            arrange_delay_list.append(tmp_arrange_delay)  
            scaling_delay_list.append(tmp_scaling_delay)  
            backhaul_delay_list.append(tmp_backhaul_delay)  

            # 打印结果  
            print("\n" + "="*80)  
            print("第 {} 轮实验结果:".format(total_epochs))  
            print("="*80)  
            print(f"总步数: {step_count}")  
            print(f"平均奖励: {tmp_r:.2f}")  
            print(f"平均迁移延迟: {tmp_migration:.2f} ms")  
            print(f"平均下载时间: {tmp_total_download_time:.2f} ms")  
            print(f"平均传输延迟: {tmp_trans:.2f} ms")  
            print(f"平均等待时间: {tmp_waiting_time:.2f} ms")  
            print(f"平均下载大小: {tmp_total_download_size:.2f}")  
            print(f"平均计算延迟: {tmp_computation_delay:.2f} ms")  
            print(f"平均调度延迟: {tmp_arrange_delay:.2f} ms")  
            print(f"平均扩展延迟: {tmp_scaling_delay:.2f} ms")  
            print(f"平均回程延迟: {tmp_backhaul_delay:.2f} ms")  
            print(f"云节点使用次数: {cloud_count} ({cloud_count/step_count*100:.2f}%)")  
            print("="*80)  

            # 更新数据字典  
            data.update(  
                {  
                    ("random", self.args.task, config.EDGE_NODE_NUM): [  
                        tmp_r,  
                        tmp_computation_delay,  
                        tmp_arrange_delay,  
                        tmp_scaling_delay,  
                        tmp_total_download_size,  
                        tmp_backhaul_delay,  
                    ]  
                }  
            )  


# 主脚本入口  
if __name__ == "__main__":  
    parser = argparse.ArgumentParser(description="Random Baseline")  
    parser.add_argument(  
        "--task", type=int, default=config.TASK_NUM, metavar="T", help="task number"  
    )  
    parser.add_argument("--seed", type=int, metavar="S", default=10)  
    parser.add_argument("--episodes", type=int, default=1, help="最大训练轮数")  
    args = parser.parse_args()  

    env = environment.Env()  
    random_baseline = Random(env, args)  

    # 运行随机基准测试  
    random_baseline.run_baseline()  