import torch
import torch.nn as nn
import math
from parameter import *
import numpy as np
if not train_mode:
    from test_parameter import *
# a pointer network layer for policy output
class SingleHeadAttention(nn.Module):
    def __init__(self, embedding_dim):
        super(SingleHeadAttention, self).__init__()
        self.input_dim = embedding_dim
        self.embedding_dim = embedding_dim
        self.value_dim = embedding_dim
        self.key_dim = self.value_dim
        self.tanh_clipping = 10
        self.norm_factor = 1 / math.sqrt(self.key_dim)

        self.w_query = nn.Parameter(torch.Tensor(self.input_dim, self.key_dim))
        self.w_key = nn.Parameter(torch.Tensor(self.input_dim, self.key_dim))

        self.init_parameters()

    def init_parameters(self):
        for param in self.parameters():
            stdv = 1. / math.sqrt(param.size(-1))
            param.data.uniform_(-stdv, stdv)

    def forward(self, q, k, mask=None):

        n_batch, n_key, n_dim = k.size()
        n_query = q.size(1)

        k_flat = k.reshape(-1, n_dim)
        q_flat = q.reshape(-1, n_dim)

        shape_k = (n_batch, n_key, -1)
        shape_q = (n_batch, n_query, -1)

        Q = torch.matmul(q_flat, self.w_query).view(shape_q)
        K = torch.matmul(k_flat, self.w_key).view(shape_k)

        U = self.norm_factor * torch.matmul(Q, K.transpose(1, 2))
        U = self.tanh_clipping * torch.tanh(U)

        if mask is not None:
            U = U.masked_fill(mask == 1, -1e8)
        attention = torch.log_softmax(U, dim=-1)  # n_batch*n_query*n_key

        return attention


# standard multi head attention layer
class MultiHeadAttention(nn.Module):
    def __init__(self, embedding_dim, n_heads=8):
        super(MultiHeadAttention, self).__init__()
        self.n_heads = n_heads
        self.input_dim = embedding_dim
        self.embedding_dim = embedding_dim
        self.value_dim = self.embedding_dim // self.n_heads
        self.key_dim = self.value_dim
        self.norm_factor = 1 / math.sqrt(self.key_dim)

        self.w_query = nn.Parameter(torch.Tensor(self.n_heads, self.input_dim, self.key_dim))
        self.w_key = nn.Parameter(torch.Tensor(self.n_heads, self.input_dim, self.key_dim))
        self.w_value = nn.Parameter(torch.Tensor(self.n_heads, self.input_dim, self.value_dim))
        self.w_out = nn.Parameter(torch.Tensor(self.n_heads, self.value_dim, self.embedding_dim))

        self.init_parameters()

    def init_parameters(self):
        for param in self.parameters():
            stdv = 1. / math.sqrt(param.size(-1))
            param.data.uniform_(-stdv, stdv)

    def forward(self, q, k=None, v=None, key_padding_mask=None, attn_mask=None):
        if k is None:
            k = q
        if v is None:
            v = q

        n_batch, n_key, n_dim = k.size()
        n_query = q.size(1)
        n_value = v.size(1)

        k_flat = k.contiguous().view(-1, n_dim)
        v_flat = v.contiguous().view(-1, n_dim)
        q_flat = q.contiguous().view(-1, n_dim)
        shape_v = (self.n_heads, n_batch, n_value, -1)
        shape_k = (self.n_heads, n_batch, n_key, -1)
        shape_q = (self.n_heads, n_batch, n_query, -1)

        Q = torch.matmul(q_flat, self.w_query).view(shape_q)  # n_heads*batch_size*n_query*key_dim
        K = torch.matmul(k_flat, self.w_key).view(shape_k)  # n_heads*batch_size*targets_size*key_dim
        V = torch.matmul(v_flat, self.w_value).view(shape_v)  # n_heads*batch_size*targets_size*value_dim
        # print(Q.size(), K.size(), V.size())
        U = self.norm_factor * torch.matmul(Q, K.transpose(2, 3))  # n_heads*batch_size*n_query*targets_size
        # print(U.size())
        if attn_mask is not None:
            attn_mask = attn_mask.view(1, n_batch, n_query, n_key).expand_as(U)

        if key_padding_mask is not None:
            key_padding_mask = key_padding_mask.repeat(1, n_query, 1)
            key_padding_mask = key_padding_mask.view(1, n_batch, n_query, n_key).expand_as(U)  # copy for n_heads times

        if attn_mask is not None and key_padding_mask is not None:
            mask = (attn_mask + key_padding_mask)
        elif attn_mask is not None:
            mask = attn_mask
        elif key_padding_mask is not None:
            mask = key_padding_mask
        else:
            mask = None

        if mask is not None:
            U = U.masked_fill(mask > 0, -1e8)

        attention = torch.softmax(U, dim=-1)  # n_heads*batch_size*n_query*targets_size

        heads = torch.matmul(attention, V)  # n_heads*batch_size*n_query*value_dim

        # out = heads.permute(1, 2, 0, 3).reshape(n_batch, n_query, n_dim)
        out = torch.mm(
            heads.permute(1, 2, 0, 3).reshape(-1, self.n_heads * self.value_dim),
            # batch_size*n_query*n_heads*value_dim
            self.w_out.view(-1, self.embedding_dim)
            # n_heads*value_dim*embedding_dim
        ).view(-1, n_query, self.embedding_dim)

        return out, attention  # batch_size*n_query*embedding_dim


class Normalization(nn.Module):
    def __init__(self, embedding_dim):
        super(Normalization, self).__init__()
        self.normalizer = nn.LayerNorm(embedding_dim)

    def forward(self, input):
        return self.normalizer(input.view(-1, input.size(-1))).view(*input.size())


class EncoderLayer(nn.Module):
    def __init__(self, embedding_dim, n_head):
        super(EncoderLayer, self).__init__()
        self.multiHeadAttention = MultiHeadAttention(embedding_dim, n_head)
        self.normalization1 = Normalization(embedding_dim)
        self.feedForward = nn.Sequential(nn.Linear(embedding_dim, 512), nn.ReLU(inplace=True),
                                         nn.Linear(512, embedding_dim))
        self.normalization2 = Normalization(embedding_dim)

    def forward(self, src, key_padding_mask=None, attn_mask=None):
        h0 = src
        h = self.normalization1(src)
        h, _ = self.multiHeadAttention(q=h, key_padding_mask=key_padding_mask, attn_mask=attn_mask)
        h = h + h0
        h1 = h
        h = self.normalization2(h)
        h = self.feedForward(h)
        h2 = h + h1
        return h2


class DecoderLayer(nn.Module):
    def __init__(self, embedding_dim, n_head):
        super(DecoderLayer, self).__init__()
        self.multiHeadAttention = MultiHeadAttention(embedding_dim, n_head)
        self.normalization1 = Normalization(embedding_dim)
        self.feedForward = nn.Sequential(nn.Linear(embedding_dim, 512),
                                         nn.ReLU(inplace=True),
                                         nn.Linear(512, embedding_dim))
        self.normalization2 = Normalization(embedding_dim)

    def forward(self, tgt, memory, key_padding_mask=None, attn_mask=None):
        h0 = tgt
        tgt = self.normalization1(tgt)
        memory = self.normalization1(memory)
        h, w = self.multiHeadAttention(q=tgt, k=memory, v=memory, key_padding_mask=key_padding_mask, attn_mask=attn_mask)
        h = h + h0
        h1 = h
        h = self.normalization2(h)
        h = self.feedForward(h)
        h2 = h + h1
        return h2, w


class Encoder(nn.Module):
    def __init__(self, embedding_dim=128, n_head=8, n_layer=1):
        super(Encoder, self).__init__()
        self.layers = nn.ModuleList(EncoderLayer(embedding_dim, n_head) for i in range(n_layer))

    def forward(self, src, key_padding_mask=None, attn_mask=None):
        for layer in self.layers:
            src = layer(src, key_padding_mask=key_padding_mask, attn_mask=attn_mask)
        return src


class Decoder(nn.Module):
    def __init__(self, embedding_dim=128, n_head=8, n_layer=1):
        super(Decoder, self).__init__()
        self.layers = nn.ModuleList([DecoderLayer(embedding_dim, n_head) for i in range(n_layer)])

    def forward(self, tgt, memory, key_padding_mask=None, attn_mask=None):
        for layer in self.layers:
            tgt, w = layer(tgt, memory, key_padding_mask=key_padding_mask, attn_mask=attn_mask)
        return tgt, w


class PolicyNet(nn.Module):
    def __init__(self, input_dim, embedding_dim):
        super(PolicyNet, self).__init__()
        self.initial_embedding = nn.Linear(input_dim, embedding_dim) # layer for non-end position
        self.current_embedding = nn.Linear(embedding_dim * 2, embedding_dim)

        self.encoder = Encoder(embedding_dim=embedding_dim, n_head=8, n_layer=6)
        self.decoder = Decoder(embedding_dim=embedding_dim, n_head=8, n_layer=1)

        self.pointer = SingleHeadAttention(embedding_dim)

    def encode_graph(self, node_inputs, node_padding_mask, edge_mask):
        node_feature = self.initial_embedding(node_inputs)
        enhanced_node_feature = self.encoder(src=node_feature, key_padding_mask=node_padding_mask, attn_mask=edge_mask)

        return enhanced_node_feature

    def output_policy(self, enhanced_node_feature, edge_inputs, current_index, edge_padding_mask, node_padding_mask, greedy=False):
        current_edge = edge_inputs.permute(0, 2, 1)
        embedding_dim = enhanced_node_feature.size()[2]
        
        neigboring_feature = torch.gather(enhanced_node_feature, 1, current_edge.repeat(1, 1, embedding_dim))
        
        current_node_feature = torch.gather(enhanced_node_feature, 1, current_index.repeat(1, 1, embedding_dim))

        if edge_padding_mask is not None:
            current_mask = edge_padding_mask
        else:
            current_mask = None

        enhanced_current_node_feature, _ = self.decoder(current_node_feature, enhanced_node_feature, node_padding_mask) # batch_size*n_robot*embedding_dim == current_node_feature
        enhanced_current_node_feature = self.current_embedding(torch.cat((enhanced_current_node_feature, current_node_feature), dim=-1))

        logp = self.pointer(enhanced_current_node_feature, neigboring_feature, current_mask)
        logp = logp.squeeze(1) # batch_size*k_size
        return logp

    def forward(self, node_inputs, edge_inputs, current_index, node_padding_mask=None, edge_padding_mask=None, edge_mask=None, greedy=False):
        enhanced_node_feature = self.encode_graph(node_inputs, node_padding_mask, edge_mask)
        logp = self.output_policy(enhanced_node_feature, edge_inputs, current_index, edge_padding_mask, node_padding_mask)
        return logp


# class QNet(nn.Module):
#     def __init__(self, input_dim, embedding_dim):
#         super(QNet, self).__init__()
#         self.initial_embedding = nn.Linear(input_dim, embedding_dim) # layer for non-end position
#         self.action_embedding = nn.Linear(embedding_dim*3, K_SIZE*N_ROBOTS)
#         # self.action_embedding = nn.Linear(embedding_dim*3, embedding_dim)
#         # self.r_r_encoder = Encoder(embedding_dim=embedding_dim, n_head=8, n_layer=1)
#         # self.r_r_embedding = nn.Linear(embedding_dim * 2, embedding_dim)
#         self.encoder = Encoder(embedding_dim=embedding_dim, n_head=8, n_layer=6)
#         self.decoder = Decoder(embedding_dim=embedding_dim, n_head=8, n_layer=1)

#         self.q_values_layer = nn.Linear(embedding_dim, 1)
#         self.linear_layer = nn.Linear(in_features=(K_SIZE*N_ROBOTS)**2, out_features=K_SIZE*N_ROBOTS)

#     def encode_graph(self, node_inputs, node_padding_mask, edge_mask):
#         embedding_feature = self.initial_embedding(node_inputs)
#         embedding_feature = self.encoder(src=embedding_feature, key_padding_mask=node_padding_mask, attn_mask=edge_mask)

#         return embedding_feature

#     def output_q_values(self, enhanced_node_feature, edge_inputs, current_index, edge_padding_mask, node_padding_mask):
#         # edge_inputs current_index 一个机器人，改为三个机器人
#         # edge_inputs (bs, 1, 27), current_index (bs, 1, 3)
#         # k_size = edge_inputs.size()[2]
#         current_edge = edge_inputs
#         current_edge = current_edge.permute(0, 2, 1)
#         current_index = current_index.permute(0, 2, 1)
#         embedding_dim = enhanced_node_feature.size()[2] 
#         # enhanced_node_feature (bs, 400, 128), current_edge (bs, 27, 1)

#         neigboring_feature = torch.gather(enhanced_node_feature, 1, current_edge.repeat(1, 1, embedding_dim)) # (bs, 27, embedding_dim)
        
#         current_node_feature = torch.gather(enhanced_node_feature, 1, current_index.repeat(1, 1, embedding_dim)) #(batch_size, 3, embedding_dim)

#         enhanced_current_node_feature, attention_weights = self.decoder(current_node_feature, enhanced_node_feature, node_padding_mask)
#         action_features = torch.cat((enhanced_current_node_feature.repeat(1, K_SIZE, 1), current_node_feature.repeat(1, K_SIZE, 1), neigboring_feature), dim=-1)
#         # print(current_node_feature.shape, current_node_feature.shape, neigboring_feature.shape)
#         action_features = self.action_embedding(action_features) # (batch_size, k_size*3, k_size*3)
#         # print('action features: ', action_features.shape)
#         action_features = action_features.flatten(start_dim=1)
#         action_features = self.linear_layer(action_features)
#         # print('action_features: ', action_features.shape)

#         tensor = torch.split(action_features, K_SIZE, dim=1)
#         batch_size = edge_inputs.shape[0]
#         # print('tensor: ', tensor[0].shape)
#         # -----------------------------------------------------------------------------------
#         # # 扩展维度
#         # t1 = tensor[0].unsqueeze(2)  # (batch_size, 25, 1)
#         # t2 = tensor[1].unsqueeze(1)  # (batch_size, 1, 25)
        
#         # # # 广播相加
#         # result = t1 + t2  # (batch_size, 25, 25)
#         # -----------------------------------------------------------------------------------
#         # 循环相加
#         result = torch.zeros((tensor[0].shape[0], tensor[0].shape[1], tensor[1].shape[1]), device=action_features.device)    # (batch_size, K_SIZE, K_SIZE)

#         # 使用for循环相加
#         for i in range(tensor[0].shape[1]):  # 遍历第一个张量的列
#             for j in range(tensor[1].shape[1]):  # 遍历第二个张量的列
#                 result[:, i, j] = tensor[0][:, i] + tensor[1][:, j]
#         # -----------------------------------------------------------------------------------
#         result = torch.zeros((batch_size, K_SIZE, K_SIZE), device=action_features.device)    # (batch_size, K_SIZE, K_SIZE)
        
#         # # 创建所有的 i, j 组合
#         # i_indices, j_indices = np.meshgrid(range(K_SIZE), range(K_SIZE), indexing='ij')

#         # # 将 (i, j) 转换为一维索引
#         # flat_indices = np.ravel_multi_index((i_indices.ravel(), j_indices.ravel()), dims=(K_SIZE, K_SIZE))

#         # # 计算 next_logp 的值
#         # result[:, flat_indices, 0] = tensor[0][:, i_indices.ravel()] + tensor[1][:, j_indices.ravel()]
        
#         # # 创建所有的 i, j 组合
#         # i_indices, j_indices = torch.meshgrid(
#         #     torch.arange(K_SIZE, device=tensor[0].device),
#         #     torch.arange(K_SIZE, device=tensor[0].device),
#         #     indexing='ij'
#         # )

#         # # 将 (i, j) 转换为一维索引
#         # flat_indices = (i_indices * K_SIZE + j_indices).flatten()

#         # # 计算 result 的值
#         # result[:, flat_indices, 0] = tensor[0][:, i_indices.flatten()] + tensor[1][:, j_indices.flatten()]


#         # -----------------------------------------------------------------------------------
#         action_features = result.reshape(batch_size, -1)  # reshape成(batch_size, 25*25)

#         q_values = action_features.unsqueeze(2) # (batch_size, k_size^3, 1)
#         # q_values = self.q_values_layer(action_features) # batch_size * k_size * 1
#         # print('q_values', q_values.size())

#         if edge_padding_mask is not None:
#             current_mask = edge_padding_mask
#         else:
#             current_mask = None
        
#         #assert 0 in current_mask
#         current_mask = current_mask.permute(0, 2, 1) # (batch_size, k_size*3, 1)
#         # current_mask = current_mask.repeat(1, K_SIZE*3, 1)
#         zero = torch.zeros_like(q_values).to(q_values.device)
#         new_current_mask = zero
#         # -----------------------------------------------------------------------------------
#         # 3v1
#         # for i in range(K_SIZE):
#         #     for j in range(K_SIZE):
#         #         for k in range(K_SIZE):
#         #             new_current_mask[:, i*1 + j*K_SIZE + k*K_SIZE**2, :] = current_mask[:, i, :] * current_mask[:, j + K_SIZE, :] * current_mask[:, k + K_SIZE*2, :]
#         # 2v1
#         for i in range(K_SIZE):
#             for j in range(K_SIZE):
#                 new_current_mask[:, i*1 + j*K_SIZE, :] = current_mask[:, i, :] * current_mask[:, j + K_SIZE, :]
#         # -----------------------------------------------------------------------------------
#         # # 假设 current_mask 的形状为 (batch_size, 2 * K_SIZE, feature_dim)
#         # batch_size, total_K, feature_dim = current_mask.shape
#         # # K_SIZE = total_K // 2  # 确保总维度是 2 * K_SIZE

#         # # 将 current_mask 分成两部分：前 K_SIZE 和后 K_SIZE
#         # part1 = current_mask[:, :K_SIZE, :]  # (batch_size, K_SIZE, feature_dim)
#         # part2 = current_mask[:, K_SIZE:, :]  # (batch_size, K_SIZE, feature_dim)

#         # # 扩展维度以实现广播相乘
#         # part1_exp = part1.unsqueeze(2)  # (batch_size, K_SIZE, 1, feature_dim)
#         # part2_exp = part2.unsqueeze(1)  # (batch_size, 1, K_SIZE, feature_dim)

#         # # 广播相乘
#         # result = part1_exp * part2_exp  # (batch_size, K_SIZE, K_SIZE, feature_dim)

#         # # 重新调整形状为目标形状
#         # new_current_mask = result.reshape(batch_size, K_SIZE**2, feature_dim)  # (batch_size, K_SIZE**2, feature_dim)
#         # -----------------------------------------------------------------------------------
#         # 确保索引生成和计算均在 GPU 上
#         # i_indices, j_indices = torch.meshgrid(
#         #     torch.arange(K_SIZE, device=current_mask.device),
#         #     torch.arange(K_SIZE, device=current_mask.device),
#         #     indexing="ij",
#         # )

#         # # 展平索引
#         # i_indices = i_indices.flatten()
#         # j_indices = j_indices.flatten()

#         # # 检查索引范围
#         # # assert i_indices.max() < current_mask.shape[1], "i_indices 超出范围"
#         # # assert (j_indices + K_SIZE).max() < current_mask.shape[1], "j_indices + K_SIZE 超出范围"

#         # # 计算 new_current_mask 的值
#         # new_current_mask[:, :K_SIZE**2, :] = (
#         #     current_mask[:, i_indices, :] * current_mask[:, j_indices + K_SIZE, :]
#         # )

#         # -----------------------------------------------------------------------------------
#         q_values = torch.where(new_current_mask == 1, zero, q_values)

#         return q_values, attention_weights

#     def forward(self, node_inputs, edge_inputs, current_index, node_padding_mask=None, edge_padding_mask=None,
#                 edge_mask=None):
#         enhanced_node_feature = self.encode_graph(node_inputs, node_padding_mask, edge_mask)
#         q_values, attention_weights = self.output_q_values(enhanced_node_feature, edge_inputs, current_index, edge_padding_mask, node_padding_mask)
#         return q_values, attention_weights

class QNet(nn.Module):
    def __init__(self, input_dim, embedding_dim):
        super(QNet, self).__init__()
        self.initial_embedding = nn.Linear(input_dim, embedding_dim) # layer for non-end position
        self.action_embedding = nn.Linear(embedding_dim*3, K_SIZE*N_ROBOTS)
        # self.action_embedding = nn.Linear(embedding_dim*3, embedding_dim)
        # self.r_r_encoder = Encoder(embedding_dim=embedding_dim, n_head=8, n_layer=1)
        # self.r_r_embedding = nn.Linear(embedding_dim * 2, embedding_dim)
        self.encoder = Encoder(embedding_dim=embedding_dim, n_head=8, n_layer=6)
        self.decoder = Decoder(embedding_dim=embedding_dim, n_head=8, n_layer=1)

        self.q_values_layer = nn.Linear(embedding_dim, 1)
        self.linear_layer = nn.Linear(in_features=(K_SIZE*N_ROBOTS)**2, out_features=K_SIZE*N_ROBOTS)

    def encode_graph(self, node_inputs, node_padding_mask, edge_mask):
        embedding_feature = self.initial_embedding(node_inputs)
        embedding_feature = self.encoder(src=embedding_feature, key_padding_mask=node_padding_mask, attn_mask=edge_mask)

        return embedding_feature

    def output_q_values(self, enhanced_node_feature, edge_inputs, current_index, edge_padding_mask, node_padding_mask):
        current_edge = edge_inputs
        current_edge = current_edge.permute(0, 2, 1)
        current_index = current_index.permute(0, 2, 1)
        embedding_dim = enhanced_node_feature.size()[2] 
        # enhanced_node_feature (bs, 400, 128), current_edge (bs, 27, 1)
        # print('向量预处理', time.time()-start_time)  # 5e-4 vs 2e-4
        # t1 = time.time()

        neigboring_feature = torch.gather(enhanced_node_feature, 1, current_edge.repeat(1, 1, embedding_dim)) # (bs, 27, embedding_dim)
        current_node_feature = torch.gather(enhanced_node_feature, 1, current_index.repeat(1, 1, embedding_dim)) #(batch_size, 3, embedding_dim)
        # print('gather运算', time.time()-t1)  # 2e-2 vs 2e-2
        # t1 = time.time()

        enhanced_current_node_feature, attention_weights = self.decoder(current_node_feature, enhanced_node_feature, node_padding_mask)
        action_features = torch.cat((enhanced_current_node_feature.repeat(1, K_SIZE, 1), current_node_feature.repeat(1, K_SIZE, 1), neigboring_feature), dim=-1)
        # print('decoder运算', time.time()-t1)  # 7e-2 vs 4e-2
        # t1 = time.time()
        # print(current_node_feature.shape, current_node_feature.shape, neigboring_feature.shape)
        action_features = self.action_embedding(action_features) # (batch_size, k_size*3, k_size*3)
        # print('action features: ', action_features.shape)
        action_features = action_features.flatten(start_dim=1)
        action_features = self.linear_layer(action_features)
        # print('linear运算', time.time()-t1) # 5e-3 vs 2e-3
        # t1 = time.time()
        # q_values = self.q_values_layer(action_features) # batch_size * k_size * 1
        # print('q_values', q_values.size())

        tensor = torch.split(action_features, K_SIZE, dim=1)
        batch_size = edge_inputs.shape[0]
        # 循环相加
        # result = torch.zeros((batch_size, K_SIZE, K_SIZE), device=action_features.device)    # (batch_size, K_SIZE, K_SIZE)

        # 使用for循环相加
        # for i in range(K_SIZE):  # 遍历第一个张量的列
        #     for j in range(K_SIZE):  # 遍历第二个张量的列
        #         result[:, i, j] = tensor[0][:, i] + tensor[1][:, j]
        # print('head运算', time.time()-t1)  # 4e-1 vs 4e-2
        # t1 = time.time()
        # -----------------------------------------------------------------------------------
        # result = tensor[0].unsqueeze(-1)
        # for i in range(1, N_ROBOTS):
        #     new_tensor = tensor[i].unsqueeze(1)  # (batch_size, 1, K_SIZE)
        #     result = result + new_tensor
        tensor_0 = tensor[0].unsqueeze(2).unsqueeze(3).unsqueeze(4).unsqueeze(5)  # (batch_size, K_SIZE, 1, 1, 1, 1)
        tensor_1 = tensor[1].unsqueeze(1).unsqueeze(3).unsqueeze(4).unsqueeze(5)  # (batch_size, 1, K_SIZE, 1, 1, 1)
        tensor_2 = tensor[2].unsqueeze(1).unsqueeze(2).unsqueeze(4).unsqueeze(5)  # (batch_size, 1, 1, K_SIZE, 1, 1)
        tensor_3 = tensor[3].unsqueeze(1).unsqueeze(2).unsqueeze(3).unsqueeze(5)  # (batch_size, 1, 1, 1, K_SIZE, 1)
        tensor_4 = tensor[4].unsqueeze(1).unsqueeze(2).unsqueeze(3).unsqueeze(4)  # (batch_size, 1, 1, 1, 1, K_SIZE)
        result = tensor_0 + tensor_1 + tensor_2 + tensor_3 + tensor_4
        # print('head运算', time.time()-t1)  # 4e-1 vs 4e-2
        # t1 = time.time()
        # -----------------------------------------------------------------------------------
        
        action_features = result.reshape(batch_size, K_SIZE**N_ROBOTS)  # reshape成(batch_size, 25*25)
        # print('action_features: ', action_features.shape)
        
        q_values = action_features.unsqueeze(2) # (batch_size, k_size^3, 1)
        
        # if edge_padding_mask is not None:
        #     current_mask = edge_padding_mask
        # else:
        #     current_mask = None
        # print('current_mask', current_mask)
        # #assert 0 in current_mask
        # current_mask = current_mask.permute(0, 2, 1) # (batch_size, k_size*3, 1)
        # # current_mask = current_mask.repeat(1, K_SIZE*3, 1)
        # zero = torch.zeros_like(q_values).to(q_values.device)
        # new_current_mask = zero
        # for i in range(K_SIZE):
        #     for j in range(K_SIZE):
        #         new_current_mask[:, i*1 + j*K_SIZE, :] = current_mask[:, i, :] * current_mask[:, j + K_SIZE, :]
        # q_values = torch.where(new_current_mask == 1, zero, q_values)
        # print('mask运算', time.time()-t1)  # 5e-1 vs 7e-2
        # assert False
        return q_values, attention_weights

    def forward(self, node_inputs, edge_inputs, current_index, node_padding_mask=None, edge_padding_mask=None,
                edge_mask=None):
        enhanced_node_feature = self.encode_graph(node_inputs, node_padding_mask, edge_mask)
        q_values, attention_weights = self.output_q_values(enhanced_node_feature, edge_inputs, current_index, edge_padding_mask, node_padding_mask)
        return q_values, attention_weights
  