# -*- coding: UTF-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch_geometric.nn as gnn
from torch.nn import MultiheadAttention
import config
import numpy as np
import torch.optim as optim
from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler
from torch.distributions import Categorical
from torch_geometric.nn import GATConv
from object import delay


device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# 从配置文件获取维度信息
layer_num = config.LAYER_TOTAL_NUM
node_num = config.EDGE_NODE_NUM
user_num = config.USER_NUM


class GraphEncoder(nn.Module):
    def __init__(self, user_dim=3, node_dim=14, hidden_dim=64):
        super().__init__()

        self.user_encoder = nn.Sequential(
            nn.Linear(user_dim, hidden_dim), nn.LeakyReLU()
        )

        self.node_encoder = nn.Sequential(
            nn.Linear(node_dim, hidden_dim), nn.LeakyReLU()
        )

        self.conv1 = GATConv(hidden_dim, hidden_dim)
        self.conv2 = GATConv(hidden_dim, hidden_dim)

    def forward(self, users, nodes):
        # 转换为Tensor并保证在正确设备上
        users = torch.tensor(users, dtype=torch.float32).to(device)
        nodes = torch.tensor(nodes, dtype=torch.float32).to(device)

        # 处理批量输入
        if users.dim() == 2:
            users = users.unsqueeze(0)
        if nodes.dim() == 2:
            nodes = nodes.unsqueeze(0)

        batch_size, num_users, _ = users.shape
        batch_size, num_nodes, _ = nodes.shape

        # 编码用户和节点特征
        user_features = self.user_encoder(users)
        node_features = self.node_encoder(nodes)

        # 创建全连接的边索引
        edge_indices = []
        for b in range(batch_size):
            batch_edge_index = torch.cat(
                [
                    torch.tensor(
                        [
                            [i, j + num_users]
                            for i in range(num_users)
                            for j in range(num_nodes)
                        ]
                    ).t(),
                    torch.tensor(
                        [
                            [j + num_users, i]
                            for i in range(num_users)
                            for j in range(num_nodes)
                        ]
                    ).t(),
                ],
                dim=1,
            ).to(device)
            edge_indices.append(batch_edge_index)

        # 合并特征
        x = torch.cat([user_features, node_features], dim=1)

        # 图卷积
        graph_features = []
        for b in range(batch_size):
            batch_x = x[b]
            batch_edge_index = edge_indices[b]

            # 图卷积
            conv_features = self.conv2(
                self.conv1(batch_x, batch_edge_index), batch_edge_index
            )
            graph_features.append(conv_features)

        # 分割用户和节点特征
        graph_features = torch.stack(graph_features)
        user_graph_features = graph_features[:, :num_users]
        node_graph_features = graph_features[:, num_users:]

        return user_graph_features, node_graph_features


class DeepCompositeNetwork(nn.Module):
    def __init__(self, nodes_layer_dim, tasks_dim, tasks_layer_dim, hidden_dim=64):
        super(DeepCompositeNetwork, self).__init__()

        # 节点层特征处理：处理 (18, 719) 的输入
        self.nodes_layer_encoder = nn.Sequential(
            nn.Linear(nodes_layer_dim, hidden_dim * 2),  # 调整输入维度
            nn.LeakyReLU(),
            nn.Linear(hidden_dim * 2, hidden_dim),
        )

        # 任务特征处理
        self.tasks_encoder = nn.Sequential(
            nn.Linear(tasks_dim, hidden_dim // 2),
            nn.LeakyReLU(),
            nn.Linear(hidden_dim // 2, hidden_dim),
        )

        # 任务层特征处理：处理 (2, 719) 的输入
        self.tasks_layer_encoder = nn.Sequential(
            nn.Linear(tasks_layer_dim, hidden_dim * 2),  # 调整输入维度
            nn.LeakyReLU(),
            nn.Linear(hidden_dim * 2, hidden_dim),
        )

        # 融合层
        self.fusion_layer = nn.Sequential(
            nn.Linear(hidden_dim * (18 + 1 + 2), hidden_dim * 2),  # 注意这里的维度变化
            nn.LeakyReLU(),
            nn.Linear(hidden_dim * 2, hidden_dim),
        )

    def forward(self, nodes_layers, tasks, tasks_layers):
        # 转换为Tensor
        nodes_layers = torch.tensor(nodes_layers, dtype=torch.float32).to(device)
        tasks = torch.tensor(tasks, dtype=torch.float32).to(device)
        tasks_layers = torch.tensor(tasks_layers, dtype=torch.float32).to(device)

        # 处理批量输入
        if nodes_layers.dim() == 3:
            nodes_layers = nodes_layers.unsqueeze(0)
        if tasks.dim() == 1:
            tasks = tasks.unsqueeze(0)
        if tasks_layers.dim() == 2:
            tasks_layers = tasks_layers.unsqueeze(0)

        batch_size = nodes_layers.size(0)

        # 编码各个特征
        nodes_layer_features = self.nodes_layer_encoder(nodes_layers)
        tasks_features = self.tasks_encoder(tasks)
        tasks_layer_features = self.tasks_layer_encoder(tasks_layers)

        # 展平特征
        nodes_layer_features_flat = nodes_layer_features.view(batch_size, -1)
        tasks_layer_features_flat = tasks_layer_features.view(batch_size, -1)

        # 融合特征
        fused_features = torch.cat(
            [
                nodes_layer_features_flat,
                tasks_features.view(batch_size, -1),
                tasks_layer_features_flat,
            ],
            dim=-1,
        )

        return self.fusion_layer(fused_features)


class Actor(nn.Module):
    def __init__(self, input_dim=None, node_num=node_num):
        super(Actor, self).__init__()

        # 图神经网络编码器
        self.graph_encoder = GraphEncoder(
            user_dim=3,  # 用户信息维度
            node_dim=14,  # 节点信息维度
            hidden_dim=64,  # 隐藏层维度
        )

        # 深度复合网络
        self.dcn = DeepCompositeNetwork(
            nodes_layer_dim=1 * layer_num,  # 节点层信息维度
            tasks_dim=4,  # 任务信息维度（修改为4）
            tasks_layer_dim=1 * layer_num,  # 任务层信息维度
        )

        # 计算输入维度
        def calculate_input_dim():
            # 用户图特征
            user_features_dim = user_num * 64
            # 节点图特征
            node_features_dim = node_num * 64
            # DCN特征
            dcn_features_dim = 64
            return user_features_dim + node_features_dim + dcn_features_dim

        # 最终策略网络
        self.policy_network = nn.Sequential(
            nn.Linear(calculate_input_dim(), 128),
            nn.LeakyReLU(),
            nn.Linear(128, 64),
            nn.LeakyReLU(),
            nn.Linear(64, 16),
            nn.LeakyReLU(),
            nn.Linear(16, node_num + 1),  # +1 for cloud
        )

    def forward(self, x):
        user_dim = 3 * user_num
        node_dim = 14 * node_num
        nodes_layer_dim = 2 * layer_num  # 2 * 719
        tasks_dim = 4
        tasks_layer_dim = layer_num  # 719

        # 如果是单样本，增加批次维度
        if x.dim() == 1:
            x = x.unsqueeze(0)

        batch_size = x.size(0)

        users = x[:, :user_dim].view(batch_size, user_num, 3)
        nodes = x[:, user_dim : user_dim + node_dim].view(batch_size, node_num, 14)

        # 正确读取 nodes_layers: (batch_size, node_num, 2, layer_num)
        nodes_layers = x[
            :, user_dim + node_dim : user_dim + node_dim + nodes_layer_dim * node_num
        ].view(batch_size, node_num, 2, layer_num)

        tasks = x[
            :,
            user_dim
            + node_dim
            + nodes_layer_dim * node_num : user_dim
            + node_dim
            + nodes_layer_dim * node_num
            + tasks_dim,
        ]

        # 正确读取 tasks_layers: (batch_size, 2, layer_num)
        tasks_layers = x[
            :, user_dim + node_dim + nodes_layer_dim * node_num + tasks_dim :
        ].view(batch_size, 2, layer_num)

        # 图编码（使用优化后的GraphEncoder）
        user_graph_features, node_graph_features = self.graph_encoder(users, nodes)

        # DCN处理，使用批量处理
        dcn_features = self.dcn(nodes_layers, tasks, tasks_layers)

        # 合并特征
        combined_features = torch.cat(
            [
                user_graph_features.view(batch_size, -1),
                node_graph_features.view(batch_size, -1),
                dcn_features.view(batch_size, -1),
            ],
            dim=1,
        )

        # 策略/价值网络
        action_prob = F.softmax(self.policy_network(combined_features), dim=-1)
        return action_prob


class Critic(nn.Module):
    def __init__(self, input_dim=None):
        super(Critic, self).__init__()

        # 图神经网络编码器
        self.graph_encoder = GraphEncoder(
            user_dim=3,  # 用户信息维度
            node_dim=14,  # 节点信息维度
            hidden_dim=64,  # 隐藏层维度
        )

        # 深度复合网络
        self.dcn = DeepCompositeNetwork(
            nodes_layer_dim=1 * layer_num,  # 节点层信息维度
            tasks_dim=4,  # 任务信息维度（修改为4）
            tasks_layer_dim=1 * layer_num,  # 任务层信息维度
        )

        # 计算输入维度
        def calculate_input_dim():
            # 用户图特征
            user_features_dim = user_num * 64
            # 节点图特征
            node_features_dim = node_num * 64
            # DCN特征
            dcn_features_dim = 64
            return user_features_dim + node_features_dim + dcn_features_dim

        # 价值网络
        self.value_network = nn.Sequential(
            nn.Linear(calculate_input_dim(), 128),
            nn.LeakyReLU(),
            nn.Linear(128, 64),
            nn.LeakyReLU(),
            nn.Linear(64, 16),
            nn.LeakyReLU(),
            nn.Linear(16, 1),
        )

    def forward(self, x):
        batch_size = x.size(0)  # 获取批次大小

        user_dim = 3 * user_num
        node_dim = 14 * node_num
        nodes_layer_dim = 2 * layer_num  # 2 * 719
        tasks_dim = 4
        tasks_layer_dim = layer_num  # 719

        # 使用 view 保持批次维度
        users = x[:, :user_dim].view(batch_size, user_num, 3)
        nodes = x[:, user_dim : user_dim + node_dim].view(batch_size, node_num, 14)

        # 正确读取 nodes_layers: (batch_size, node_num, 2, layer_num)
        nodes_layers = x[
            :, user_dim + node_dim : user_dim + node_dim + nodes_layer_dim * node_num
        ].view(batch_size, node_num, 2, layer_num)

        tasks = x[
            :,
            user_dim
            + node_dim
            + nodes_layer_dim * node_num : user_dim
            + node_dim
            + nodes_layer_dim * node_num
            + tasks_dim,
        ]

        # 正确读取 tasks_layers: (batch_size, 2, layer_num)
        tasks_layers = x[
            :, user_dim + node_dim + nodes_layer_dim * node_num + tasks_dim :
        ].view(batch_size, 2, layer_num)

        # 图编码（使用优化后的GraphEncoder）
        user_graph_features, node_graph_features = self.graph_encoder(users, nodes)

        # DCN处理
        dcn_features = self.dcn(nodes_layers, tasks, tasks_layers)

        # 合并特征
        combined_features = torch.cat(
            [
                user_graph_features.view(batch_size, -1),
                node_graph_features.view(batch_size, -1),
                dcn_features.view(batch_size, -1),
            ],
            dim=1,
        )

        # 价值网络
        value = self.value_network(combined_features)
        return value


class PPO_GNN:
    def __init__(self, env, device="cuda", random_seed=42):
        # 训练超参数
        self.clip_param = 0.2  # PPO裁剪参数
        self.max_grad_norm = 0.5  # 梯度裁剪范数
        self.ppo_epoch = 10  # PPO训练轮数
        self.buffer_capacity = 64 * 22  # 经验回放缓冲区容量
        self.batch_size = 64  # 训练批次大小
        self.gamma = 0.98  # 折扣因子
        self.learning_rate_actor = 1e-4  # Actor学习率
        self.learning_rate_critic = 3e-4  # Critic学习率

        # 环境相关参数
        self.env = env
        self.node_num = len(env.Edge)
        self.max_cpu = env.max_cpu
        self.max_mem = env.max_mem
        # print(self.max_mem)

        # 设备和随机种子
        self.device = torch.device(device if torch.cuda.is_available() else "cpu")
        torch.manual_seed(random_seed)

        # 初始化网络
        self.actor_net = Actor().to(self.device)
        self.critic_net = Critic().to(self.device)

        # 优化器
        self.actor_optimizer = optim.Adam(
            self.actor_net.parameters(), lr=self.learning_rate_actor,weight_decay=1e-5 
        )
        self.critic_optimizer = optim.Adam(
            self.critic_net.parameters(), lr=self.learning_rate_critic
        )

        # 添加学习率衰减调度器
        self.actor_lr_scheduler = optim.lr_scheduler.StepLR(
            self.actor_optimizer,
            step_size=10,  # 每100个epoch降低学习率
            gamma=0.9,  # 每次降低10%
        )

        self.critic_lr_scheduler = optim.lr_scheduler.StepLR(
            self.critic_optimizer,
            step_size=10,  # 每100个epoch降低学习率
            gamma=0.9,  # 每次降低10%
        )

        # 经验缓冲区
        self.buffer = []
        self.counter = 0
        self.training_step = 0

    def expert_guidance(self, env, obs, current_task):
        """
        专家知识指导函数，提供更复杂的决策建议，包括下载时间
        """

        def resource_utilization_score(node):
            # print(self.max_cpu)
            # print(self.max_mem)
            cpu_util = 1 - (node.available_cpu / self.max_cpu)
            mem_util = 1 - (node.available_mem / self.max_mem)
            return (cpu_util + mem_util) / 2

        def load_balance_score(node):
            return node.container_number / config.node_max_container_number

        def get_download_time(requested_image_id, node_id):
            """
            计算下载时间
            """
            requested_layer_names = env.Image[requested_image_id].layer_list
            requested_layer_ids = [env.layer_dict[x][-2] for x in requested_layer_names]
            size = 0

            if node_id == config.EDGE_NODE_NUM:
                for layer in requested_layer_ids:
                    size += int(env.Layer[layer].size)
                download_time = size / (config.cloud_bandwidth / 8)
                return download_time

            node = env.Edge[node_id]
            node_layers = node.layer_01_list

            for layer in requested_layer_ids:
                if node_layers[layer] == 0:
                    size += int(env.Layer[layer].size)
            download_time = size / (node.node_bw2 / 8)
            return download_time

        expert_scores = np.zeros(config.EDGE_NODE_NUM + 1)

        # 获取当前用户位置和任务信息
        task_user_map = env.get_task_user_mapping()
        current_user_id = task_user_map.get(current_task.task_id, -1)
        # current_user = [u for u in obs['users'] if u.uid == current_user_id][0]

        for node_index in range(config.EDGE_NODE_NUM):
            node = env.Edge[node_index]

            # # 1. 距离得分（使用hop_distance）
            # distance = delay.hop_distance(
            #     current_user.user_locationx,   # 用户纬度
            #     current_user.user_locationy,   # 用户经度
            #     node.location_x,           # 节点纬度
            #     node.location_y            # 节点经度
            # )
            # distance_score = 1 / (distance + 1e-5)  # 距离越小得分越高

            # 2. 资源利用率得分（资源利用率低的节点得分高）
            resource_score = 1 - resource_utilization_score(node)

            # 3. 负载均衡得分（负载低的节点得分高）
            balance_score = 1 - load_balance_score(node)

            # 4. 下载时间得分（下载时间越短得分越高）
            download_time = get_download_time(
                current_task.requested_image_id, node_index
            )
            download_time_score = 1 / (download_time + 1e-5)  # 避免除零

            # 综合得分，可以调整各个因素的权重
            expert_scores[node_index] = (
                # 0.2 * distance_score +
                0.3 * resource_score
                + 0.3 * balance_score
                + 0.4 * download_time_score
            )

        # 云节点得分
        cloud_download_time = get_download_time(
            current_task.requested_image_id, config.EDGE_NODE_NUM
        )
        expert_scores[config.EDGE_NODE_NUM] = 1 / (cloud_download_time + 1e-5)

        # 归一化处理
        expert_scores = expert_scores / expert_scores.sum()

        return expert_scores

    def select_action(self, env, obs):
        # 准备用户位置信息
        # print("obs['tasks_layers'] shape:", np.shape(obs['tasks_layers']))
        # print("obs['nodes_layers'] shape:", np.shape(obs['nodes_layers']),"\n")
        each_use_loc = [
            [user[0], user[1], user[2]] for user in obs["users"]  # [id, x, y]
        ]

        # 准备边缘节点位置信息
        each_edge_loc = [
            [i, node[7], node[8]]  # [node_id, x, y]
            for i, node in enumerate(obs["nodes"])
        ]

        # 默认所有节点不可用
        available_actions = np.zeros(config.EDGE_NODE_NUM + 1, dtype=int)

        # 如果没有待处理任务列表，直接返回
        if not env.allexist_task_list:
            return available_actions

        # 获取当前任务
        current_task_idx = [
            item.usr_has_tsk for item in env.User if item.uid == env.next_uid_idx
        ][0][0]
        current_task = env.Task[current_task_idx]

        # 获取任务对应的用户
        task_user_map = env.get_task_user_mapping()
        matching_users = task_user_map.get(current_task.task_id, -1)

        if not matching_users:
            uid = -1
            print(
                "No user found for task_id:",
                current_task.task_id,
                current_task.assigned_node,
            )
            return config.EDGE_NODE_NUM, 0
        else:
            uid = matching_users

        # 使用env的pool方法筛选位置
        tem_next_can = env.pool(uid, each_use_loc, each_edge_loc)

        # 标记可用节点
        if current_task.reschedule_count <= 3:
            for node_index in tem_next_can:
                node = env.Edge[node_index]

                # 资源约束条件（注意使用task的实际值）
                is_resource_sufficient = (
                    node.container_number < config.node_max_container_number
                    and node.available_mem >= current_task.task_mem
                    and node.available_cpu >= current_task.task_cpu
                    and node.available_mem - current_task.task_mem
                    >= config.LIMITED_MEMORY * node.mem
                    and node.available_cpu - current_task.task_cpu
                    >= config.LIMITED_CPU * node.cpu
                )

                # 添加排除已分配节点的条件
                is_not_assigned_node = node_index != current_task.assigned_node

                if is_resource_sufficient and is_not_assigned_node:
                    available_actions[node_index] = 1
        else:
            # 如果重调度次数 > 3，所有边缘节点都标记为不可用
            available_actions = np.zeros(config.EDGE_NODE_NUM + 1, dtype=int)
            available_actions[config.EDGE_NODE_NUM] = 1
            return config.EDGE_NODE_NUM, 0

        # if np.sum(available_actions) == 0:
        #     available_actions[config.EDGE_NODE_NUM] = 1

        # 获取状态张量
        s_tensor = (
            torch.tensor(
                np.concatenate(
                    [
                        obs["users"].flatten(),
                        obs["nodes"].flatten(),
                        obs["nodes_layers"].flatten(),
                        obs["tasks"],
                        obs["tasks_layers"].flatten(),
                    ]
                ),
                dtype=torch.float,
            )
            .unsqueeze(0)
            .to(device)
        )

        with torch.no_grad():
            # 生成初始动作概率
            a_prob = self.actor_net(s_tensor).squeeze(0)

            # 找出可用动作的索引
            available_action_indices = np.where(available_actions == 1)[0]

            if len(available_action_indices) == 0:
                # 极端情况：没有可用动作，返回云节点
                action = config.EDGE_NODE_NUM
                log_prob = 0.0
            else:
                # 只保留可用动作的概率
                a_prob_available = a_prob[available_action_indices]

                # 获取当前任务
                current_task_idx = [
                    item.usr_has_tsk
                    for item in env.User
                    if item.uid == env.next_uid_idx
                ][0][0]
                current_task = env.Task[current_task_idx]

                # 获取专家得分
                expert_scores = self.expert_guidance(env, obs, current_task)

                # 只保留可用动作的专家得分
                expert_scores_available = expert_scores[available_action_indices]

                # 混合模型得分和专家得分
                a_prob_normalized = F.softmax(a_prob_available, dim=0)
                # print("(a_prob_normalized.cpu().numpy()",a_prob_normalized.cpu().numpy())
                # print("expert_scores",expert_scores_available / expert_scores_available.sum())
                hybrid_scores = (
                    0.8 * a_prob_normalized.cpu().numpy()
                    + 0.2 * expert_scores_available / expert_scores_available.sum()
                )


                # 从可用动作中采样
                dist = Categorical(
                    torch.tensor(hybrid_scores, device=device, dtype=torch.float32)
                )
                local_action_index = dist.sample().item()

                # 将局部索引转换为全局索引
                action = available_action_indices[local_action_index]
                log_prob = dist.log_prob(
                    torch.tensor(local_action_index, device=device)
                ).item()

        # 调试输出
        # print("available_actions:", available_actions)
        # print("available_action_indices:", available_action_indices)
        # print("a_prob_available:", a_prob_available)
        # print("a_prob_normalized:", a_prob_normalized)
        # print("task:", current_task.task_id, current_task.reschedule_count, "action:", action,"\n")

        return action, log_prob

    def store_transition(self, transition):
        """
        存储状态转移数据到经验回放缓冲区

        Args:
            transition (tuple): 状态转移数据(state, action, log_prob, reward, next_state, done, done)
        """
        # print("Transition")
        self.buffer.append(transition)
        self.counter += 1
        return self.counter % self.buffer_capacity == 0

    def update(self):
        """
        PPO算法更新函数，增强训练多样性和稳定性
        """
        # 检查是否有足够的经验
        if len(self.buffer) < self.batch_size:
            print(f"Buffer too small: {len(self.buffer)} < {self.batch_size}")
            return 0, 0

        # 准备训练数据
        states = [t[0] for t in self.buffer]
        actions = [t[1] for t in self.buffer]
        log_probs = [t[2] for t in self.buffer]
        rewards = [t[3] for t in self.buffer]
        next_states = [t[4] for t in self.buffer]
        dones = [t[5] for t in self.buffer]

        # 重新组织状态为单个张量
        def combine_state_tensor(states):
            combined_tensors = []
            for state in states:
                combined_state = np.concatenate(
                    [
                        state["users"].flatten(),
                        state["nodes"].flatten(),
                        state["nodes_layers"].flatten(),
                        state["tasks"],
                        state["tasks_layers"].flatten(),
                    ]
                )
                combined_tensors.append(combined_state)

            return torch.tensor(combined_tensors, dtype=torch.float32).to(self.device)

        # 转换为张量
        states_tensor = combine_state_tensor(states)
        actions_tensor = torch.tensor(actions, dtype=torch.long).to(self.device)
        rewards_tensor = torch.tensor(rewards, dtype=torch.float32).to(self.device)
        dones_tensor = torch.tensor(dones, dtype=torch.float32).to(self.device)
        old_log_probs_tensor = torch.tensor(log_probs, dtype=torch.float32).to(
            self.device
        )

        # 计算折扣回报
        Returns = []
        discounted_return = 0
        for reward, done in zip(reversed(rewards), reversed(dones)):
            if done:
                discounted_return = 0
            discounted_return = reward + self.gamma * discounted_return
            Returns.insert(0, discounted_return)
        Returns_tensor = torch.tensor(Returns, dtype=torch.float32).to(self.device)

        # 损失记录
        actor_losses, critic_losses = [], []

        # 添加随机种子增加训练随机性
        torch.manual_seed(np.random.randint(1000))

        # PPO更新过程
        for _ in range(self.ppo_epoch):
            # 随机打乱数据
            permutation = torch.randperm(len(states_tensor))
            states_tensor = states_tensor[permutation]
            actions_tensor = actions_tensor[permutation]
            Returns_tensor = Returns_tensor[permutation]
            old_log_probs_tensor = old_log_probs_tensor[permutation]

            # 计算需要的批次数
            num_batches = max(1, len(states_tensor) // self.batch_size)
            # print("num_batches:",num_batches)

            # 批次处理
            for i in range(num_batches):
                # 确定批次的起始和结束索引
                start_idx = i * self.batch_size
                end_idx = min((i + 1) * self.batch_size, len(states_tensor))

                batch_states = states_tensor[start_idx:end_idx]
                batch_actions = actions_tensor[start_idx:end_idx]
                batch_returns = Returns_tensor[start_idx:end_idx]
                batch_old_log_probs = old_log_probs_tensor[start_idx:end_idx]

                # 评判者网络估值
                values = self.critic_net(batch_states)

                # 优势估计
                advantages = batch_returns - values.detach()

                # 新策略概率
                action_probs = self.actor_net(batch_states)

                # 数值稳定性处理
                action_probs = torch.clamp(action_probs, min=1e-10, max=1.0)
                selected_action_probs = action_probs.gather(
                    1, batch_actions.unsqueeze(1)
                ).squeeze(1)
                action_log_probs = torch.log(selected_action_probs + 1e-10)

                # # 重要性采样比率
                # ratios = torch.exp(action_log_probs - batch_old_log_probs)

                # # PPO裁剪
                # surr1 = ratios * advantages
                # surr2 = (
                #     torch.clamp(ratios, 1 - self.clip_param, 1 + self.clip_param)
                #     * advantages
                # )
                # actor_loss = -torch.min(surr1, surr2).mean()
                # 改进的PPO损失计算
                ratios = torch.exp(action_log_probs - batch_old_log_probs)
                surr1 = ratios * advantages
                surr2 = (
                    torch.clamp(ratios, 1 - self.clip_param, 1 + self.clip_param)
                    * advantages
                )
                actor_loss = -torch.mean(torch.min(surr1, surr2))

                # 增加熵正则化
                entropy = -torch.mean(action_probs * torch.log(action_probs + 1e-10))
                actor_loss -= 0.01 * entropy  # 熵正则化系数

                # 检查损失是否为 NaN
                if torch.isnan(actor_loss):
                    print("Actor loss is NaN. Skipping this update.")
                    continue

                # 价值网络损失
                critic_loss = F.smooth_l1_loss(values, batch_returns)

                # Actor网络更新
                self.actor_optimizer.zero_grad()
                actor_loss.backward()
                nn.utils.clip_grad_norm_(
                    self.actor_net.parameters(), self.max_grad_norm
                )
                self.actor_optimizer.step()

                # Critic网络更新
                self.critic_optimizer.zero_grad()
                critic_loss.backward()
                nn.utils.clip_grad_norm_(
                    self.critic_net.parameters(), self.max_grad_norm
                )
                self.critic_optimizer.step()

                # 记录损失
                actor_losses.append(actor_loss.item())
                critic_losses.append(critic_loss.item())

                self.training_step += 1

                self.actor_lr_scheduler.step()
                self.critic_lr_scheduler.step()

        # 清空缓冲区
        self.buffer.clear()
        # print("update over!")
        # print("actor_losses",actor_losses)
        # print("critic_losse",critic_losses)
        return np.mean(actor_losses), np.mean(critic_losses)

    def satisfied_constrain(self, edge_id, uidre, obs=None):
        # original from baseline.py
        # print("each edge_id is: ", edge_id)
        usrhas_lyer = [(item.uid, item.usr_has_lay) for item in self.env.User]
        one_ulyr = [j for i, j in usrhas_lyer if i == uidre][0]
        if edge_id == config.EDGE_NODE_NUM:
            return True
        edge_cpumemcondisk = [
            (
                item.id,
                item.available_cpu,
                item.available_mem,
                item.container_number,
                item.available_disk,
            )
            for item in self.env.Edge
        ]
        edge_con = [d for a, b, c, d, f in edge_cpumemcondisk if a == edge_id][0]
        if config.node_max_container_number - edge_con - 1 < 0:
            print("edge {} container num is {} > 5.".format(edge_id, edge_con))
            return False
        # edge_usrnum_limit
        tmp_usrnum = []
        for idx, item in enumerate(self.env.user_group):
            tmp_usrnum.append(len(item))
        if config.node_usrnum_limit - tmp_usrnum[edge_id] - 1 < 0:
            print("edge {} has user num {} > 50.".format(edge_id, tmp_usrnum[edge_id]))
            return False
        # edge_storage_free - task_size - download_size
        tmp_downsize = 0
        all_layer_downsiz = [item.size for item in self.env.Layer]
        for item in one_ulyr:
            if item not in usrhas_lyer[edge_id]:
                tmp_downsize += all_layer_downsiz[item]

        edge_disk = [f for a, b, c, d, f in edge_cpumemcondisk if a == edge_id][0]

        usr_has_tsksiz = [k for i, j, k in edge_cpumemcondisk if i == uidre][0]

        # print("edge {a} has disk {b} - layer_download_size:{d}".format(a=edge_id, b=edge_disk, d=tmp_downsize))
        if edge_disk - usr_has_tsksiz - tmp_downsize < 0:
            print(
                "edge {a} has disk {b} - user_tsk_size:{c} - layer_download_size:{d} < 0.".format(
                    a=edge_id, b=edge_disk, c=usr_has_tsksiz, d=tmp_downsize
                )
            )
            return False
        
    def compute_loss(self, state, action, old_log_prob, reward, next_state, done):  
        """  
        计算当前状态动作对的policy loss和value loss，不执行梯度更新  
        
        Args:  
            state: 当前状态  
            action: 执行的动作  
            old_log_prob: 旧策略下的动作对数概率  
            reward: 获得的奖励  
            next_state: 下一个状态  
            done: 是否结束  
            
        Returns:  
            policy_loss: 策略网络损失  
            value_loss: 价值网络损失  
        """  
        with torch.no_grad():  # 不计算梯度  
            # 将状态转换为张量格式  
            state_tensor = torch.tensor(  
                np.concatenate([  
                    state["users"].flatten(),  
                    state["nodes"].flatten(),  
                    state["nodes_layers"].flatten(),  
                    state["tasks"],  
                    state["tasks_layers"].flatten(),  
                ]),  
                dtype=torch.float32  
            ).unsqueeze(0).to(self.device)  
            
            # 计算当前价值  
            value = self.critic_net(state_tensor)  
            
            # 计算回报（简化版本）  
            # 注意：这里使用简化的回报计算，实际PPO中通常使用多步骤回报  
            if done:  
                returns = torch.tensor([reward], dtype=torch.float32).to(self.device)  
            else:  
                # 计算下一个状态的价值  
                next_state_tensor = torch.tensor(  
                    np.concatenate([  
                        next_state["users"].flatten(),  
                        next_state["nodes"].flatten(),  
                        next_state["nodes_layers"].flatten(),  
                        next_state["tasks"],  
                        next_state["tasks_layers"].flatten(),  
                    ]),  
                    dtype=torch.float32  
                ).unsqueeze(0).to(self.device)  
                
                next_value = self.critic_net(next_state_tensor)  
                returns = torch.tensor([reward + self.gamma * next_value.item()],   
                                    dtype=torch.float32).to(self.device)  
            
            # 计算优势函数  
            advantages = returns - value  
            
            # 获取当前策略的动作概率  
            action_probs = self.actor_net(state_tensor).squeeze(0)  
            action_probs = torch.clamp(action_probs, min=1e-10, max=1.0)  
            
            # 获取所选动作的对数概率  
            selected_action_prob = action_probs[action]  
            action_log_prob = torch.log(selected_action_prob + 1e-10)  
            
            # 计算重要性权重  
            ratio = torch.exp(action_log_prob - old_log_prob)  
            
            # 计算策略损失（PPO目标）  
            surr1 = ratio * advantages  
            surr2 = torch.clamp(ratio, 1 - self.clip_param, 1 + self.clip_param) * advantages  
            policy_loss = -torch.min(surr1, surr2).mean()  
            
            # 增加熵正则化  
            entropy = -torch.sum(action_probs * torch.log(action_probs + 1e-10))  
            policy_loss -= 0.01 * entropy  # 熵正则化系数  
            
            # 计算价值损失  
            value_loss = F.smooth_l1_loss(value, returns)  
            
            return policy_loss.item(), value_loss.item()  
