class GraphTransformerLayer(nn.Module):
    def __init__(self, d_model=256, nhead=8, dim_feedforward=768, dropout=0.1):
        super().__init__()
        self.linear1 = nn.Linear(d_model, dim_feedforward)
        self.self_attn = nn.MultiheadAttention(
            embed_dim=d_model,
            num_heads=nhead,
            dropout=dropout,
            batch_first=True
        )
        self.dropout = nn.Dropout(dropout)
        self.linear2 = nn.Linear(dim_feedforward, d_model)
        self.norm1 = nn.LayerNorm(d_model)
        self.norm2 = nn.LayerNorm(d_model)
        self.dropout1 = nn.Dropout(dropout)
        self.dropout2 = nn.Dropout(dropout)
        self.activation = nn.ReLU()

    def forward(self, src, adj_mask=None):
        attn_mask = self._create_attention_mask(adj_mask)

        src2 = self.norm1(src)
        src2, attn_weights = self.self_attn(
            src2, src2, src2,
            attn_mask=attn_mask
        )
        src = src + self.dropout1(src2)

        src2 = self.norm2(src)
        src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
        src = src + self.dropout2(src2)
        return src

    def _create_attention_mask(self, adj_mask):
        if adj_mask is None:
            return None
        mask = (adj_mask == 0).bool()
        mask = mask.repeat(self.self_attn.num_heads, 1, 1)
        return mask


class multi_HOGRL_Transformer(nn.Module):
    def __init__(self, in_feat, out_feat, relation_nums=3, d_model=256,
                 nhead=8, num_layers=5, dim_feedforward=768,
                 drop_rate=0.6, layers_tree=2, tsne_weight=0.1):
        super().__init__()
        self.relation_nums = relation_nums
        self.d_model = d_model

        self.feature_proj = nn.Linear(in_feat, d_model)

        self.transformer_layers = nn.ModuleList([
            nn.ModuleList([GraphTransformerLayer(
                d_model=d_model,
                nhead=nhead,
                dim_feedforward=dim_feedforward,
                dropout=drop_rate
            ) for _ in range(num_layers)])
            for _ in range(relation_nums)
        ])

        self.tree_projs = nn.ModuleList([
            nn.ModuleList([nn.Sequential(
                nn.Linear(d_model, dim_feedforward),
                nn.ReLU(),
                nn.Linear(dim_feedforward, d_model)
            ) for _ in range(layers_tree)])
            for _ in range(relation_nums)
        ])

        self.gating_networks = nn.ModuleList([
            nn.ModuleList([nn.Linear(d_model, 1)
                           for _ in range(layers_tree)])
            for _ in range(relation_nums)
        ])

        self.classifier = nn.Sequential(
            nn.Linear(relation_nums * d_model, 512),
            nn.ReLU(),
            nn.Dropout(drop_rate),
            nn.Linear(512, out_feat)
        )

        self.tsne_weight = tsne_weight

    def forward(self, x, edge_indexs, sub_nodes=None):
        if sub_nodes is not None:
            x = x[sub_nodes]

        x = self.feature_proj(x)

        relation_outputs = []
        for rel_idx in range(self.relation_nums):
            edge_index = edge_indexs[rel_idx][0]
            adj_matrix = to_dense_adj(edge_index, max_num_nodes=x.size(0))[0]
            tree_indices = edge_indexs[rel_idx][1]

            h = x
            for layer in self.transformer_layers[rel_idx]:
                h = layer(h, adj_matrix.bool())

            tree_features = []
            for tree_idx, tree_edges in enumerate(tree_indices):
                tree_adj = to_dense_adj(tree_edges, max_num_nodes=x.size(0))[0]
                h_tree = x
                for layer in self.tree_projs[rel_idx][tree_idx]:
                    h_tree = layer(h_tree)
                h_tree = self.transformer_layers[rel_idx][-1](h_tree, tree_adj.bool())
                tree_features.append(h_tree)

            gates = torch.stack([
                self.gating_networks[rel_idx][i](feat)
                for i, feat in enumerate(tree_features)
            ], dim=-1)
            alpha = F.softmax(gates, dim=-1)
            fused_tree = sum([feat * alpha[:, :, i]
                              for i, feat in enumerate(tree_features)])

            relation_output = h + fused_tree
            relation_outputs.append(relation_output)

        combined = torch.cat(relation_outputs, dim=-1)

        logits = self.classifier(combined)
        logits = F.log_softmax(logits, dim=-1)

        tsne_feats = torch.stack(relation_outputs, dim=1).mean(dim=1)

        return logits, tsne_feats