# -*- coding: UTF-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch_geometric.nn as gnn
from torch.nn import MultiheadAttention
import config
import numpy as np
import torch.optim as optim
from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler
from torch.distributions import Categorical
from torch_geometric.nn import GATConv

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# 从配置文件获取维度信息
layer_num = config.LAYER_TOTAL_NUM
node_num = config.EDGE_NODE_NUM
user_num = config.USER_NUM

class GraphEncoder(nn.Module):  
    def __init__(self, user_dim=3, node_dim=14, hidden_dim=64):  
        super().__init__()  

        self.user_encoder = nn.Sequential(  
            nn.Linear(user_dim, hidden_dim), nn.LeakyReLU()  
        )  

        self.node_encoder = nn.Sequential(  
            nn.Linear(node_dim, hidden_dim), nn.LeakyReLU()  
        )  

        self.conv1 = GATConv(hidden_dim, hidden_dim)  
        self.conv2 = GATConv(hidden_dim, hidden_dim)  

    def forward(self, users, nodes):  
        # 转换为Tensor并保证在正确设备上  
        users = torch.tensor(users, dtype=torch.float32).to(device)  
        nodes = torch.tensor(nodes, dtype=torch.float32).to(device)  

        # 处理批量输入  
        if users.dim() == 2:  
            users = users.unsqueeze(0)  
        if nodes.dim() == 2:  
            nodes = nodes.unsqueeze(0)  

        batch_size, num_users, _ = users.shape  
        batch_size, num_nodes, _ = nodes.shape  

        # 编码用户和节点特征  
        user_features = self.user_encoder(users)  
        node_features = self.node_encoder(nodes)  

        # 创建全连接的边索引  
        edge_indices = []  
        for b in range(batch_size):  
            batch_edge_index = torch.cat([  
                torch.tensor(  
                    [[i, j + num_users] for i in range(num_users) for j in range(num_nodes)]  
                ).t(),  
                torch.tensor(  
                    [[j + num_users, i] for i in range(num_users) for j in range(num_nodes)]  
                ).t()  
            ], dim=1).to(device)  
            edge_indices.append(batch_edge_index)  

        # 合并特征  
        x = torch.cat([user_features, node_features], dim=1)  

        # 图卷积  
        graph_features = []  
        for b in range(batch_size):  
            batch_x = x[b]  
            batch_edge_index = edge_indices[b]  
            
            # 图卷积  
            conv_features = self.conv2(  
                self.conv1(batch_x, batch_edge_index),   
                batch_edge_index  
            )  
            graph_features.append(conv_features)  

        # 分割用户和节点特征  
        graph_features = torch.stack(graph_features)  
        user_graph_features = graph_features[:, :num_users]  
        node_graph_features = graph_features[:, num_users:]  

        return user_graph_features, node_graph_features  


class DeepCompositeNetwork(nn.Module):  
    def __init__(self, nodes_layer_dim, tasks_dim, tasks_layer_dim, hidden_dim=64):  
        super(DeepCompositeNetwork, self).__init__()  

        # 节点层特征处理：处理 (18, 719) 的输入  
        self.nodes_layer_encoder = nn.Sequential(  
            nn.Linear(nodes_layer_dim, hidden_dim * 2),  # 调整输入维度  
            nn.LeakyReLU(),  
            nn.Linear(hidden_dim * 2, hidden_dim),  
        )  

        # 任务特征处理  
        self.tasks_encoder = nn.Sequential(  
            nn.Linear(tasks_dim, hidden_dim // 2),  
            nn.LeakyReLU(),  
            nn.Linear(hidden_dim // 2, hidden_dim),  
        )  

        # 任务层特征处理：处理 (2, 719) 的输入  
        self.tasks_layer_encoder = nn.Sequential(  
            nn.Linear(tasks_layer_dim, hidden_dim * 2),  # 调整输入维度  
            nn.LeakyReLU(),  
            nn.Linear(hidden_dim * 2, hidden_dim),  
        )  

        # 融合层  
        self.fusion_layer = nn.Sequential(  
            nn.Linear(hidden_dim * (18 + 1 + 2), hidden_dim * 2),  # 注意这里的维度变化  
            nn.LeakyReLU(),  
            nn.Linear(hidden_dim * 2, hidden_dim),  
        )  

    def forward(self, nodes_layers, tasks, tasks_layers):  
        # 转换为Tensor  
        nodes_layers = torch.tensor(nodes_layers, dtype=torch.float32).to(device)  
        tasks = torch.tensor(tasks, dtype=torch.float32).to(device)  
        tasks_layers = torch.tensor(tasks_layers, dtype=torch.float32).to(device)  

        # 处理批量输入  
        if nodes_layers.dim() == 3:  
            nodes_layers = nodes_layers.unsqueeze(0)  
        if tasks.dim() == 1:  
            tasks = tasks.unsqueeze(0)  
        if tasks_layers.dim() == 2:  
            tasks_layers = tasks_layers.unsqueeze(0)  

        batch_size = nodes_layers.size(0)  

        # 编码各个特征  
        nodes_layer_features = self.nodes_layer_encoder(nodes_layers)  
        tasks_features = self.tasks_encoder(tasks)  
        tasks_layer_features = self.tasks_layer_encoder(tasks_layers)  

        # 展平特征  
        nodes_layer_features_flat = nodes_layer_features.view(batch_size, -1)  
        tasks_layer_features_flat = tasks_layer_features.view(batch_size, -1)  

        # 融合特征  
        fused_features = torch.cat(  
            [  
                nodes_layer_features_flat,  
                tasks_features.view(batch_size, -1),  
                tasks_layer_features_flat,  
            ],  
            dim=-1,  
        )  

        return self.fusion_layer(fused_features)  


class Actor(nn.Module):  
    def __init__(self, input_dim=None, node_num=node_num):  
        super(Actor, self).__init__()  

        # 图神经网络编码器  
        self.graph_encoder = GraphEncoder(  
            user_dim=3,  # 用户信息维度  
            node_dim=14,  # 节点信息维度  
            hidden_dim=64,  # 隐藏层维度  
        )  

        # 深度复合网络  
        self.dcn = DeepCompositeNetwork(  
            nodes_layer_dim=1 * layer_num,  # 节点层信息维度  
            tasks_dim=4,  # 任务信息维度（修改为4）  
            tasks_layer_dim=1 * layer_num,  # 任务层信息维度  
        )  

        # 计算输入维度  
        def calculate_input_dim():  
            # 用户图特征  
            user_features_dim = user_num * 64  
            # 节点图特征  
            node_features_dim = node_num * 64  
            # DCN特征  
            dcn_features_dim = 64  
            return user_features_dim + node_features_dim + dcn_features_dim  

        # 最终策略网络  
        self.policy_network = nn.Sequential(  
            nn.Linear(calculate_input_dim(), 128),  
            nn.LeakyReLU(),  
            nn.Linear(128, 64),  
            nn.LeakyReLU(),  
            nn.Linear(64, 16),  
            nn.LeakyReLU(),  
            nn.Linear(16, node_num + 1),  # +1 for cloud  
        )  

    def forward(self, x):  
        user_dim = 3 * user_num  
        node_dim = 14 * node_num  
        nodes_layer_dim = 2 * layer_num  # 2 * 719  
        tasks_dim = 4  
        tasks_layer_dim = layer_num  # 719  

        # 如果是单样本，增加批次维度  
        if x.dim() == 1:  
            x = x.unsqueeze(0)  

        batch_size = x.size(0)  

        users = x[:, :user_dim].view(batch_size, user_num, 3)  
        nodes = x[:, user_dim : user_dim + node_dim].view(batch_size, node_num, 14)  

        # 正确读取 nodes_layers: (batch_size, node_num, 2, layer_num)  
        nodes_layers = x[  
            :, user_dim + node_dim : user_dim + node_dim + nodes_layer_dim * node_num  
        ].view(batch_size, node_num, 2, layer_num)  

        tasks = x[  
            :,  
            user_dim  
            + node_dim  
            + nodes_layer_dim * node_num : user_dim  
            + node_dim  
            + nodes_layer_dim * node_num  
            + tasks_dim,  
        ]  

        # 正确读取 tasks_layers: (batch_size, 2, layer_num)  
        tasks_layers = x[  
            :, user_dim + node_dim + nodes_layer_dim * node_num + tasks_dim :  
        ].view(batch_size, 2, layer_num)  

        # 图编码（使用优化后的GraphEncoder）  
        user_graph_features, node_graph_features = self.graph_encoder(users, nodes)  

        # DCN处理，使用批量处理  
        dcn_features = self.dcn(nodes_layers, tasks, tasks_layers)  

        # 合并特征  
        combined_features = torch.cat(  
            [  
                user_graph_features.view(batch_size, -1),  
                node_graph_features.view(batch_size, -1),  
                dcn_features.view(batch_size, -1),  
            ],  
            dim=1,  
        )    

        # 策略/价值网络  
        action_prob = F.softmax(self.policy_network(combined_features), dim=-1)  
        return action_prob  


class Critic(nn.Module):  
    def __init__(self, input_dim=None):  
        super(Critic, self).__init__()  

        # 图神经网络编码器  
        self.graph_encoder = GraphEncoder(  
            user_dim=3,  # 用户信息维度  
            node_dim=14,  # 节点信息维度  
            hidden_dim=64,  # 隐藏层维度  
        )  

        # 深度复合网络  
        self.dcn = DeepCompositeNetwork(  
            nodes_layer_dim=1 * layer_num,  # 节点层信息维度  
            tasks_dim=4,  # 任务信息维度（修改为4）  
            tasks_layer_dim=1 * layer_num,  # 任务层信息维度  
        )  

        # 计算输入维度  
        def calculate_input_dim():  
            # 用户图特征  
            user_features_dim = user_num * 64  
            # 节点图特征  
            node_features_dim = node_num * 64  
            # DCN特征  
            dcn_features_dim = 64  
            return user_features_dim + node_features_dim + dcn_features_dim  

        # 价值网络  
        self.value_network = nn.Sequential(  
            nn.Linear(calculate_input_dim(), 128),  
            nn.LeakyReLU(),  
            nn.Linear(128, 64),  
            nn.LeakyReLU(),  
            nn.Linear(64, 16),  
            nn.LeakyReLU(),  
            nn.Linear(16, 1),  
        )  

    def forward(self, x):  
        batch_size = x.size(0)  # 获取批次大小  

        user_dim = 3 * user_num  
        node_dim = 14 * node_num  
        nodes_layer_dim = 2 * layer_num  # 2 * 719  
        tasks_dim = 4  
        tasks_layer_dim = layer_num  # 719  

        # 使用 view 保持批次维度  
        users = x[:, :user_dim].view(batch_size, user_num, 3)  
        nodes = x[:, user_dim : user_dim + node_dim].view(batch_size, node_num, 14)  

        # 正确读取 nodes_layers: (batch_size, node_num, 2, layer_num)  
        nodes_layers = x[  
            :, user_dim + node_dim : user_dim + node_dim + nodes_layer_dim * node_num  
        ].view(batch_size, node_num, 2, layer_num)  

        tasks = x[  
            :,  
            user_dim  
            + node_dim  
            + nodes_layer_dim * node_num : user_dim  
            + node_dim  
            + nodes_layer_dim * node_num  
            + tasks_dim,  
        ]  

        # 正确读取 tasks_layers: (batch_size, 2, layer_num)  
        tasks_layers = x[  
            :, user_dim + node_dim + nodes_layer_dim * node_num + tasks_dim :  
        ].view(batch_size, 2, layer_num)  

        # 图编码（使用优化后的GraphEncoder）  
        user_graph_features, node_graph_features = self.graph_encoder(users, nodes)   

        # DCN处理  
        dcn_features = self.dcn(nodes_layers, tasks, tasks_layers)  

        # 合并特征  
        combined_features = torch.cat(  
            [  
                user_graph_features.view(batch_size, -1),  
                node_graph_features.view(batch_size, -1),  
                dcn_features.view(batch_size, -1),  
            ],  
            dim=1,  
        )    

        # 价值网络  
        value = self.value_network(combined_features)  
        return value  


class PPO_GNN:
    def __init__(self, env, device="cuda", random_seed=42):
        # 训练超参数
        self.clip_param = 0.2  # PPO裁剪参数
        self.max_grad_norm = 0.5  # 梯度裁剪范数
        self.ppo_epoch = 10  # PPO训练轮数
        self.buffer_capacity = 64*15  # 经验回放缓冲区容量
        self.batch_size = 64  # 训练批次大小
        self.gamma = 0.98  # 折扣因子
        self.learning_rate_actor = 1e-4  # Actor学习率
        self.learning_rate_critic = 3e-4  # Critic学习率

        # 环境相关参数
        self.env = env
        self.node_num = len(env.Edge)
        self.max_cpu = env.max_cpu
        self.max_mem = env.max_mem

        # 设备和随机种子
        self.device = torch.device(device if torch.cuda.is_available() else "cpu")
        torch.manual_seed(random_seed)

        # 初始化网络
        self.actor_net = Actor().to(self.device)
        self.critic_net = Critic().to(self.device)

        # 优化器
        self.actor_optimizer = optim.Adam(
            self.actor_net.parameters(), lr=self.learning_rate_actor
        )
        self.critic_optimizer = optim.Adam(
            self.critic_net.parameters(), lr=self.learning_rate_critic
        )

        # 添加学习率衰减调度器  
        self.actor_lr_scheduler = optim.lr_scheduler.StepLR(  
            self.actor_optimizer,   
            step_size=100,  # 每100个epoch降低学习率  
            gamma=0.9       # 每次降低10%  
        )  
        
        self.critic_lr_scheduler = optim.lr_scheduler.StepLR(  
            self.critic_optimizer,   
            step_size=100,  # 每100个epoch降低学习率  
            gamma=0.9       # 每次降低10%  
        )  

        # 经验缓冲区
        self.buffer = []
        self.counter = 0
        self.training_step = 0

    def select_action(self, env, obs):
        # 准备用户位置信息
        # print("obs['tasks_layers'] shape:", np.shape(obs['tasks_layers']))  
        # print("obs['nodes_layers'] shape:", np.shape(obs['nodes_layers']),"\n") 
        each_use_loc = [
            [user[0], user[1], user[2]] for user in obs["users"]  # [id, x, y]
        ]

        # 准备边缘节点位置信息
        each_edge_loc = [
            [i, node[7], node[8]]  # [node_id, x, y]
            for i, node in enumerate(obs["nodes"])
        ]

        # 默认所有节点不可用
        available_actions = np.zeros(config.EDGE_NODE_NUM + 1, dtype=int)

        # 如果没有待处理任务列表，直接返回
        if not env.allexist_task_list:
            return available_actions

        # 获取当前任务
        current_task_idx = [
            item.usr_has_tsk for item in env.User if item.uid == env.next_uid_idx
        ][0][0]
        current_task = env.Task[current_task_idx]

        # 获取任务对应的用户
        task_user_map = env.get_task_user_mapping()
        matching_users = task_user_map.get(current_task.task_id, -1)

        if not matching_users:
            uid = -1
            print(
                "No user found for task_id:",
                current_task.task_id,
                current_task.assigned_node,
            )
            return config.EDGE_NODE_NUM,0
        else:
            uid = matching_users

        # 使用env的pool方法筛选位置
        tem_next_can = env.pool(uid, each_use_loc, each_edge_loc)

        # 标记可用节点
        if current_task.reschedule_count <= 3:  
            for node_index in tem_next_can:  
                node = env.Edge[node_index]  

                # 资源约束条件（注意使用task的实际值）  
                is_resource_sufficient = (  
                    node.container_number < config.node_max_container_number  
                    and node.available_mem >= current_task.task_mem  
                    and node.available_cpu >= current_task.task_cpu  
                )  

                # 添加排除已分配节点的条件  
                is_not_assigned_node = (  
                    node_index != current_task.assigned_node  
                )  

                if is_resource_sufficient and is_not_assigned_node:  
                    available_actions[node_index] = 1  
        else:  
            # 如果重调度次数 > 3，所有边缘节点都标记为不可用  
            available_actions = np.zeros(config.EDGE_NODE_NUM + 1, dtype=int)  
            available_actions[config.EDGE_NODE_NUM] = 1  
            return config.EDGE_NODE_NUM, 0  

        # if np.sum(available_actions) == 0:  
        #     available_actions[config.EDGE_NODE_NUM] = 1  

        # 获取状态张量  
        s_tensor = torch.tensor(  
            np.concatenate(  
                [  
                    obs["users"].flatten(),  
                    obs["nodes"].flatten(),  
                    obs["nodes_layers"].flatten(),  
                    obs["tasks"],  
                    obs["tasks_layers"].flatten(),  
                ]  
            ),  
            dtype=torch.float,  
        ).unsqueeze(0).to(device)  

        with torch.no_grad():  
            # 生成初始动作概率  
            a_prob = self.actor_net(s_tensor).squeeze(0)  

            # 找出可用动作的索引  
            available_action_indices = np.where(available_actions == 1)[0]  
            
            if len(available_action_indices) == 0:  
                # 极端情况：没有可用动作，返回云节点  
                action = config.EDGE_NODE_NUM  
                log_prob = 0.0  
            else:  
                # 只保留可用动作的概率  
                a_prob_available = a_prob[available_action_indices]  
                
                # 归一化可用动作的概率  
                a_prob_normalized = F.softmax(a_prob_available, dim=0)  
                
                # 从可用动作中采样  
                dist = Categorical(probs=a_prob_normalized)  
                local_action_index = dist.sample().item()  
                
                # 将局部索引转换为全局索引  
                action = available_action_indices[local_action_index]  
                log_prob = dist.log_prob(torch.tensor(local_action_index, device=device)).item()  

        # 调试输出  
        # print("available_actions:", available_actions)  
        # print("available_action_indices:", available_action_indices)  
        # print("a_prob_available:", a_prob_available)  
        # print("a_prob_normalized:", a_prob_normalized)  
        # print("task:", current_task.task_id, current_task.reschedule_count, "action:", action,"\n") 

        return action, log_prob  

    def store_transition(self, transition):
        """
        存储状态转移数据到经验回放缓冲区

        Args:
            transition (tuple): 状态转移数据(state, action, log_prob, reward, next_state, done, done)
        """
        # print("Transition")
        self.buffer.append(transition)
        self.counter += 1
        return self.counter % self.buffer_capacity == 0
    

    def update(self):  
        """  
        PPO算法更新函数，增强训练多样性和稳定性  
        """  
        # 检查是否有足够的经验  
        if len(self.buffer) < self.batch_size:  
            print(f"Buffer too small: {len(self.buffer)} < {self.batch_size}")  
            return 0, 0  

        # 准备训练数据  
        states = [t[0] for t in self.buffer]  
        actions = [t[1] for t in self.buffer]  
        log_probs = [t[2] for t in self.buffer]  
        rewards = [t[3] for t in self.buffer]  
        next_states = [t[4] for t in self.buffer]  
        dones = [t[5] for t in self.buffer]  

        # 重新组织状态为单个张量  
        def combine_state_tensor(states):  
            combined_tensors = []  
            for state in states:  
                combined_state = np.concatenate([  
                    state["users"].flatten(),  
                    state["nodes"].flatten(),  
                    state["nodes_layers"].flatten(),  
                    state["tasks"],  
                    state["tasks_layers"].flatten(),  
                ])  
                combined_tensors.append(combined_state)  
            
            return torch.tensor(combined_tensors, dtype=torch.float32).to(self.device)  

        # 转换为张量  
        states_tensor = combine_state_tensor(states)  
        actions_tensor = torch.tensor(actions, dtype=torch.long).to(self.device)  
        rewards_tensor = torch.tensor(rewards, dtype=torch.float32).to(self.device)  
        dones_tensor = torch.tensor(dones, dtype=torch.float32).to(self.device)  
        old_log_probs_tensor = torch.tensor(log_probs, dtype=torch.float32).to(self.device)  

        # 计算折扣回报  
        Returns = []  
        discounted_return = 0  
        for reward, done in zip(reversed(rewards), reversed(dones)):  
            if done:  
                discounted_return = 0  
            discounted_return = reward + self.gamma * discounted_return  
            Returns.insert(0, discounted_return)  
        Returns_tensor = torch.tensor(Returns, dtype=torch.float32).to(self.device)  

        # 损失记录  
        actor_losses, critic_losses = [], []  

        # 添加随机种子增加训练随机性  
        torch.manual_seed(np.random.randint(1000))  

        # PPO更新过程  
        for _ in range(self.ppo_epoch):  
            # 随机打乱数据  
            permutation = torch.randperm(len(states_tensor))  
            states_tensor = states_tensor[permutation]  
            actions_tensor = actions_tensor[permutation]  
            Returns_tensor = Returns_tensor[permutation]  
            old_log_probs_tensor = old_log_probs_tensor[permutation]  

            # 计算需要的批次数  
            num_batches = max(1, len(states_tensor) // self.batch_size)  
            print("num_batches:",num_batches)
            
            # 批次处理  
            for i in range(num_batches):  
                # 确定批次的起始和结束索引  
                start_idx = i * self.batch_size  
                end_idx = min((i + 1) * self.batch_size, len(states_tensor))  
                
                batch_states = states_tensor[start_idx:end_idx]  
                batch_actions = actions_tensor[start_idx:end_idx]  
                batch_returns = Returns_tensor[start_idx:end_idx]  
                batch_old_log_probs = old_log_probs_tensor[start_idx:end_idx]  

                # 评判者网络估值  
                values = self.critic_net(batch_states)  

                # 优势估计  
                advantages = batch_returns - values.detach()  

                # 新策略概率  
                action_probs = self.actor_net(batch_states)  
                
                # 数值稳定性处理  
                action_probs = torch.clamp(action_probs, min=1e-10, max=1.0)   
                selected_action_probs = action_probs.gather(1, batch_actions.unsqueeze(1)).squeeze(1)  
                action_log_probs = torch.log(selected_action_probs + 1e-10)   

                # 重要性采样比率  
                ratios = torch.exp(action_log_probs - batch_old_log_probs)  

                # PPO裁剪  
                surr1 = ratios * advantages  
                surr2 = (  
                    torch.clamp(ratios, 1 - self.clip_param, 1 + self.clip_param)  
                    * advantages  
                )  
                actor_loss = -torch.min(surr1, surr2).mean()  
                
                # 检查损失是否为 NaN  
                if torch.isnan(actor_loss):  
                    print("Actor loss is NaN. Skipping this update.")  
                    continue  

                # 价值网络损失  
                critic_loss = F.smooth_l1_loss(values, batch_returns)  

                # Actor网络更新  
                self.actor_optimizer.zero_grad()  
                actor_loss.backward()  
                nn.utils.clip_grad_norm_(  
                    self.actor_net.parameters(), self.max_grad_norm  
                )  
                self.actor_optimizer.step()  

                # Critic网络更新  
                self.critic_optimizer.zero_grad()  
                critic_loss.backward()  
                nn.utils.clip_grad_norm_(  
                    self.critic_net.parameters(), self.max_grad_norm  
                )  
                self.critic_optimizer.step()  

                # 记录损失  
                actor_losses.append(actor_loss.item())  
                critic_losses.append(critic_loss.item())  

                self.training_step += 1  
        
                self.actor_lr_scheduler.step()  
                self.critic_lr_scheduler.step() 

        # 清空缓冲区  
        self.buffer.clear()  
        print("update over!")  
        return np.mean(actor_losses), np.mean(critic_losses)   


    def save_model(self, path):
        """保存模型检查点"""
        torch.save(
            {
                "actor_state_dict": self.actor_net.state_dict(),
                "critic_state_dict": self.critic_net.state_dict(),
                "actor_optimizer_state_dict": self.actor_optimizer.state_dict(),
                "critic_optimizer_state_dict": self.critic_optimizer.state_dict(),
            },
            path,
        )

    def load_model(self, path):
        """加载模型检查点"""
        checkpoint = torch.load(path)
        self.actor_net.load_state_dict(checkpoint["actor_state_dict"])
        self.critic_net.load_state_dict(checkpoint["critic_state_dict"])
        self.actor_optimizer.load_state_dict(checkpoint["actor_optimizer_state_dict"])
        self.critic_optimizer.load_state_dict(checkpoint["critic_optimizer_state_dict"])

    def satisfied_constrain(self, edge_id, uidre, obs=None):
        # original from baseline.py
        # print("each edge_id is: ", edge_id)
        usrhas_lyer = [(item.uid, item.usr_has_lay) for item in self.env.User]
        one_ulyr = [j for i, j in usrhas_lyer if i == uidre][0]
        if edge_id == config.EDGE_NODE_NUM:
            return True
        edge_cpumemcondisk = [
            (
                item.id,
                item.available_cpu,
                item.available_mem,
                item.container_number,
                item.available_disk,
            )
            for item in self.env.Edge
        ]
        edge_con = [
            d for a, b, c, d, f in edge_cpumemcondisk if a == edge_id
        ][0]
        if config.node_max_container_number - edge_con - 1 < 0:
            print("edge {} container num is {} > 5.".format(edge_id, edge_con))
            return False
        # edge_usrnum_limit
        tmp_usrnum = []
        for idx,item in enumerate(self.env.user_group):
            tmp_usrnum.append(len(item))
        if config.node_usrnum_limit - tmp_usrnum[edge_id] - 1 < 0:
            print(
                "edge {} has user num {} > 50.".format(
                    edge_id, tmp_usrnum[edge_id]
                )
            )
            return False
        # edge_storage_free - task_size - download_size
        tmp_downsize = 0
        all_layer_downsiz = [item.size for item in self.env.Layer]
        for item in one_ulyr:
            if item not in usrhas_lyer[edge_id]:
                tmp_downsize += all_layer_downsiz[item]

        edge_disk = [
            f for a, b, c, d, f in edge_cpumemcondisk if a == edge_id
        ][0]

        usr_has_tsksiz = [k for i, j, k in edge_cpumemcondisk if i == uidre][0]

        # print("edge {a} has disk {b} - layer_download_size:{d}".format(a=edge_id, b=edge_disk, d=tmp_downsize))
        if edge_disk - usr_has_tsksiz - tmp_downsize < 0:
            print(
                "edge {a} has disk {b} - user_tsk_size:{c} - layer_download_size:{d} < 0.".format(
                    a=edge_id, b=edge_disk, c=usr_has_tsksiz, d=tmp_downsize
                )
            )
            return False
