# -- coding: utf-8 --
import torch
from torch import nn
from egnn_module import ZEOEGNN_Sparse
from fp_module import FingerPrintModule
from craft_module import CraftModule
from torch_geometric.nn import global_mean_pool, TopKPooling
from bicrossattention import BidirectionalCrossAttentionSimple
from mlp_module import MLP

# from torch_geometric.nn.pool import TopKPooling

class ZEOEGNN(nn.Module):
    def __init__(
            self,
            n_layers,
            node_dim,
            out_node_nf,
            pos_dim=3,
            edge_attr_dim=0,
            hidden_f=128,
            dropout=0.,
            aggr="add",
            maccs_input_dim=167,
            maccs_hidden_dim=334,  # 167*2
            maccs_output_dim=256,
            ecfp_input_dim=2048,
            ecfp_hidden_dims=(1024, 512),
            ecfp_output_dim=256,
            use_attention=True,
            craft_dim=29,
            craft_hidden=58,
            craft_out_feat=29,
    ):
        super().__init__()
        self.n_layers = n_layers
        self.node_dim = node_dim
        self.pos_dim = pos_dim
        self.edge_attr_dim = edge_attr_dim
        self.hidden_f = hidden_f
        self.out_node_nf = out_node_nf
        self.maccs_input_dim = maccs_input_dim
        self.maccs_hidden_dim = maccs_hidden_dim
        self.maccs_output_dim = maccs_output_dim
        self.ecfp_input_dim = ecfp_input_dim
        self.ecfp_hidden_dims = ecfp_hidden_dims
        self.ecfp_output_dim = ecfp_output_dim
        self.use_attention = use_attention
        self.craft_dim = craft_dim
        self.craft_hidden = craft_hidden
        self.craft_out_feat = craft_out_feat

        # 指纹模块
        self.fp_module = FingerPrintModule(maccs_input_dim, maccs_hidden_dim, maccs_output_dim,
                                           ecfp_input_dim, ecfp_hidden_dims, ecfp_output_dim, use_attention)

        # EGNN模块
        self.egnn = ZEOEGNN_Sparse(node_dim=node_dim, out_node_nf=out_node_nf, n_layers=n_layers, pos_dim=pos_dim,
                                   edge_attr_dim=edge_attr_dim, hidden_f=hidden_f)

        # 全局特征模块
        self.craft = CraftModule(input_size=craft_dim, hidden_size=craft_hidden, output_size=craft_out_feat)

        # 特征融合层
        # 门控融合
        self.fusion = nn.Sequential(
            nn.Linear(256 + hidden_f, hidden_f),  # 输出单个权重
            nn.ReLU(),
            nn.Linear(hidden_f, 1),
            nn.Sigmoid()
        )

        # 输出层
        self.classifier = nn.Linear(256 + 128 + craft_out_feat, out_node_nf)

    def forward(self, node_attr, pos, edge_index, edge_attr, batch, ecfp, maccs, craft_feat):
        # 指纹模块处理
        fp_feat = self.fp_module(maccs_feat=maccs, ecfp_feat=ecfp)  # (32, 256)

        # EGNN模块处理
        egnn_feat, _ = self.egnn(node_attr=node_attr, pos=pos, edge_index=edge_index, batch=batch, edge_attr=edge_attr,
                                 mode="conv")  # (32, 256)

        # 全局特征模块处理
        craft_feat = self.craft(craft_feat)

        # 拼接特征
        combined = torch.cat([craft_feat, fp_feat, egnn_feat], dim=1)

        # 特征融合
        # gate_weight = self.fusion(combined)
        # fused_feat = gate_weight * fp_feat + (1 - gate_weight) * egnn_feat # (32, 256)
        fused_feat = combined

        # 输出
        output = self.classifier(fused_feat)  # (32, 256) -> (32, out_node_nf)

        return output


class CrossModalAttention(nn.Module):
    """跨模态注意力机制，增强不同特征空间的交互未使用"""

    def __init__(self, dim):
        super().__init__()
        self.query = nn.Linear(dim, dim)
        self.key = nn.Linear(dim, dim)
        self.value = nn.Linear(dim, dim)
        self.softmax = nn.Softmax(dim=-1)

    def forward(self, x1, x2):
        """信息流: x2 -> x1"""
        # x1作为query，x2作为key/value
        # 输入x1的维度: [batch_size, seq_len_1, dim]
        q = self.query(x1)  # q的维度: [batch_size, seq_len_1, dim]
        # 输入x2的维度: [batch_size, seq_len_2, dim]
        k = self.key(x2)  # k的维度: [batch_size, seq_len_2, dim]
        v = self.value(x2)  # v的维度: [batch_size, seq_len_2, dim]

        # 计算注意力分数
        # q @ k.transpose(-2, -1)的维度: [batch_size, seq_len_1, seq_len_2]
        attn = self.softmax(q @ k.transpose(-2, -1))  # attn的维度: [batch_size, seq_len_1, seq_len_2]
        # attn @ v的维度: [batch_size, seq_len_1, dim]
        return attn @ v


class ZEOEGNN2(nn.Module):
    """
    2比1只增加双向交叉注意力模块
    acc也可以到0.988左右
    """

    def __init__(
            self,
            n_layers,
            node_dim,
            out_node_nf,
            pos_dim=3,
            edge_attr_dim=0,
            hidden_f=128,
            dropout=0.,
            aggr="add",
            maccs_input_dim=167,
            maccs_hidden_dim=334,  # 167*2
            maccs_output_dim=256,
            ecfp_input_dim=2048,
            ecfp_hidden_dims=(1024, 512),
            ecfp_output_dim=256,
            use_attention=True,
            craft_dim=29,
            craft_hidden=58,
            craft_out_feat=29,
    ):
        super().__init__()
        self.n_layers = n_layers
        self.node_dim = node_dim
        self.pos_dim = pos_dim
        self.edge_attr_dim = edge_attr_dim
        self.hidden_f = hidden_f
        self.out_node_nf = out_node_nf
        self.maccs_input_dim = maccs_input_dim
        self.maccs_hidden_dim = maccs_hidden_dim
        self.maccs_output_dim = maccs_output_dim
        self.ecfp_input_dim = ecfp_input_dim
        self.ecfp_hidden_dims = ecfp_hidden_dims
        self.ecfp_output_dim = ecfp_output_dim
        self.use_attention = use_attention
        self.craft_dim = craft_dim
        self.craft_hidden = craft_hidden
        self.craft_out_feat = craft_out_feat

        # 指纹模块
        self.fp_module = FingerPrintModule(maccs_input_dim, maccs_hidden_dim, maccs_output_dim,
                                           ecfp_input_dim, ecfp_hidden_dims, ecfp_output_dim, use_attention)
        self.egnn = ZEOEGNN_Sparse(node_dim=node_dim, out_node_nf=out_node_nf, n_layers=n_layers,
                                   pos_dim=pos_dim, edge_attr_dim=edge_attr_dim, hidden_f=hidden_f)  # 3D图特征
        self.craft = CraftModule(craft_dim, craft_hidden, craft_out_feat)  # 工艺参数

        # 层次化图特征池化
        # self.egnn_pool = nn.ModuleList([
        #     TopKPooling(hidden_f, ratio=0.5)
        #     for _ in range(n_layers)
        # ])

        # 双向交叉注意力模块
        self.bi_cross_module = BidirectionalCrossAttentionSimple(dim=hidden_f,
                                                                 context_dim=craft_out_feat + ecfp_output_dim, heads=1,
                                                                 dim_head=hidden_f)

        # self.cross_attn1 = CrossModalAttention(maccs_output_dim)  # 工艺-指纹交互
        # self.cross_attn2 = CrossModalAttention(hidden_f)  # 指纹-图结构交互

        # # 动态融合模块
        # self.weight_net = nn.Sequential(
        #     nn.Linear(maccs_output_dim + hidden_f + craft_out_feat, hidden_f),
        #     nn.LeakyReLU(),
        #     nn.Linear(hidden_f, 3),
        #     nn.Softmax(dim=1)
        # )

        # # 特征增强模块
        # self.self_attn = nn.MultiheadAttention(embed_dim=256, num_heads=4)

        # # 分类决策模块
        # self.classifier = nn.Sequential(
        #     nn.Linear(256 + 128 + craft_out_feat, 512),
        #     nn.ELU(),
        #     nn.AlphaDropout(p=0.2),
        #     nn.Linear(512, out_node_nf)
        # )

        # self.dropout = nn.Dropout(dropout)

        # 输出层
        self.classifier = nn.Linear(craft_out_feat + ecfp_output_dim + hidden_f, out_node_nf)

    def forward(self, node_attr, pos, edge_index, edge_attr, batch, ecfp, maccs, craft_feat):
        # 特征提取分支
        fp_feat = self.fp_module(maccs, ecfp)  # [B, maccs/ecfp_output_dim]
        craft_feat = self.craft(craft_feat)  # [B, craft_out_feat]

        # 层次化图特征 [B, n_layers*hidden_f]
        egnn_feat, _ = self.egnn(node_attr, pos, edge_index, batch, edge_attr, mode="conv")  # [B, hidden_f]
        # pooled_features = []
        # for i in range(self.n_layers):
        #     x, edge_index_pool, _, batch_pool, _, _ = self.egnn_pool[i](
        #         x=egnn_feat,
        #         edge_index=edge_index,
        #         edge_attr=edge_attr,
        #         batch=batch
        #     )
        #     pooled_features.append(global_mean_pool(x, batch))
        # egnn_feat = torch.cat(pooled_features, dim=1)

        craft_fp_feat = torch.cat([craft_feat, fp_feat], dim=1)  # [B, maccs/ecfp_output_dim + craft_out_feat]
        egnn_feat_attn, fp_craft_feat_attn = self.bi_cross_module(egnn_feat.unsqueeze(1), craft_fp_feat.unsqueeze(1))

        concat_feat = torch.cat([egnn_feat_attn.squeeze(1), fp_craft_feat_attn.squeeze(1)],
                                dim=1)  # [B, hidden_f + maccs/ecfp_output_dim + craft_out_feat]

        # 跨模态特征交互
        # craft_attn = self.cross_attn1(craft_feat.unsqueeze(1), fp_feat.unsqueeze(1))
        # fp_attn = self.cross_attn2(fp_feat.unsqueeze(1), egnn_feat.unsqueeze(1))

        # 动态权重融合
        # weights = self.weight_net(torch.cat([craft_feat, fp_feat, egnn_feat], dim=1))
        # fused_feat = (weights[:, 0].unsqueeze(1) * craft_attn.squeeze(1) +
        #                 weights[:, 1].unsqueeze(1) * fp_attn.squeeze(1) +
        #                 weights[:, 2].unsqueeze(1) * egnn_feat)  # [B, hidden_f]

        # 自注意力增强
        # fused_feat, _ = self.self_attn(
        #     fused_feat.unsqueeze(0),
        #     fused_feat.unsqueeze(0),
        #     fused_feat.unsqueeze(0)
        # )
        # fused_feat = fused_feat.squeeze(0)

        # 最终分类
        return self.classifier(concat_feat)


class ZEOEGNN3(nn.Module):
    """
    3比2再增加自注意力模块
    acc也可以到0.980左右
    """

    def __init__(
            self,
            n_layers,
            node_dim,
            out_node_nf,
            pos_dim=3,
            edge_attr_dim=0,
            hidden_f=128,
            dropout=0.,
            aggr="add",
            maccs_input_dim=167,
            maccs_hidden_dim=334,  # 167*2
            maccs_output_dim=256,
            ecfp_input_dim=2048,
            ecfp_hidden_dims=(1024, 512),
            ecfp_output_dim=256,
            use_attention=True,
            craft_dim=29,
            craft_hidden=58,
            craft_out_feat=29,
    ):
        super().__init__()
        self.n_layers = n_layers
        self.node_dim = node_dim
        self.pos_dim = pos_dim
        self.edge_attr_dim = edge_attr_dim
        self.hidden_f = hidden_f
        self.out_node_nf = out_node_nf
        self.maccs_input_dim = maccs_input_dim
        self.maccs_hidden_dim = maccs_hidden_dim
        self.maccs_output_dim = maccs_output_dim
        self.ecfp_input_dim = ecfp_input_dim
        self.ecfp_hidden_dims = ecfp_hidden_dims
        self.ecfp_output_dim = ecfp_output_dim
        self.use_attention = use_attention
        self.craft_dim = craft_dim
        self.craft_hidden = craft_hidden
        self.craft_out_feat = craft_out_feat

        # 指纹模块
        self.fp_module = FingerPrintModule(maccs_input_dim, maccs_hidden_dim, maccs_output_dim,
                                           ecfp_input_dim, ecfp_hidden_dims, ecfp_output_dim, use_attention)
        self.egnn = ZEOEGNN_Sparse(node_dim=node_dim, out_node_nf=out_node_nf, n_layers=n_layers,
                                   pos_dim=pos_dim, edge_attr_dim=edge_attr_dim, hidden_f=hidden_f)  # 3D图特征
        self.craft = CraftModule(craft_dim, craft_hidden, craft_out_feat)  # 工艺参数

        # 层次化图特征池化
        # self.egnn_pool = nn.ModuleList([
        #     TopKPooling(hidden_f, ratio=0.5)
        #     for _ in range(n_layers)
        # ])

        # 双向交叉注意力模块
        self.bi_cross_module = BidirectionalCrossAttentionSimple(dim=hidden_f,
                                                                 context_dim=craft_out_feat + ecfp_output_dim, heads=1,
                                                                 dim_head=hidden_f)

        # self.cross_attn1 = CrossModalAttention(maccs_output_dim)  # 工艺-指纹交互
        # self.cross_attn2 = CrossModalAttention(hidden_f)  # 指纹-图结构交互

        # # 动态融合模块
        # self.weight_net = nn.Sequential(
        #     nn.Linear(maccs_output_dim + hidden_f + craft_out_feat, hidden_f),
        #     nn.LeakyReLU(),
        #     nn.Linear(hidden_f, 3),
        #     nn.Softmax(dim=1)
        # )

        # # 特征增强模块
        self.self_attn = nn.MultiheadAttention(embed_dim=craft_out_feat + ecfp_output_dim + hidden_f, num_heads=1,
                                               bias=False)

        # # 分类决策模块
        # self.classifier = nn.Sequential(
        #     nn.Linear(256 + 128 + craft_out_feat, 512),
        #     nn.ELU(),
        #     nn.AlphaDropout(p=0.2),
        #     nn.Linear(512, out_node_nf)
        # )

        # self.dropout = nn.Dropout(dropout)

        # 输出层
        self.classifier = nn.Linear(craft_out_feat + ecfp_output_dim + hidden_f, out_node_nf)

    def forward(self, node_attr, pos, edge_index, edge_attr, batch, ecfp, maccs, craft_feat):
        # 特征提取分支
        fp_feat = self.fp_module(maccs, ecfp)  # [B, maccs/ecfp_output_dim]
        craft_feat = self.craft(craft_feat)  # [B, craft_out_feat]

        # 层次化图特征 [B, n_layers*hidden_f]
        egnn_feat, _ = self.egnn(node_attr, pos, edge_index, batch, edge_attr, mode="conv")  # [B, hidden_f]
        # pooled_features = []
        # for i in range(self.n_layers):
        #     x, edge_index_pool, _, batch_pool, _, _ = self.egnn_pool[i](
        #         x=egnn_feat,
        #         edge_index=edge_index,
        #         edge_attr=edge_attr,
        #         batch=batch
        #     )
        #     pooled_features.append(global_mean_pool(x, batch))
        # egnn_feat = torch.cat(pooled_features, dim=1)

        craft_fp_feat = torch.cat([craft_feat, fp_feat], dim=1)  # [B, maccs/ecfp_output_dim + craft_out_feat]
        egnn_feat_attn, fp_craft_feat_attn = self.bi_cross_module(egnn_feat.unsqueeze(1), craft_fp_feat.unsqueeze(1))

        concat_feat = torch.cat([egnn_feat_attn.squeeze(1), fp_craft_feat_attn.squeeze(1)],
                                dim=1)  # [B, hidden_f + maccs/ecfp_output_dim + craft_out_feat]

        # 跨模态特征交互
        # craft_attn = self.cross_attn1(craft_feat.unsqueeze(1), fp_feat.unsqueeze(1))
        # fp_attn = self.cross_attn2(fp_feat.unsqueeze(1), egnn_feat.unsqueeze(1))

        # 动态权重融合
        # weights = self.weight_net(torch.cat([craft_feat, fp_feat, egnn_feat], dim=1))
        # fused_feat = (weights[:, 0].unsqueeze(1) * craft_attn.squeeze(1) +
        #                 weights[:, 1].unsqueeze(1) * fp_attn.squeeze(1) +
        #                 weights[:, 2].unsqueeze(1) * egnn_feat)  # [B, hidden_f]

        # 自注意力增强
        fused_feat, _ = self.self_attn(
            concat_feat.unsqueeze(0),
            concat_feat.unsqueeze(0),
            concat_feat.unsqueeze(0)
        )
        fused_feat = fused_feat.squeeze(0)

        # 最终分类
        return self.classifier(fused_feat)


class ZEOEGNN32(nn.Module):
    """
    3_2和3同级,不使用自注意力模块, 相对2丰富了输出层但是依然采用concat方法
    acc也可以到0.980左右
    """

    def __init__(
            self,
            n_layers,
            node_dim,
            out_node_nf,
            pos_dim=3,
            edge_attr_dim=0,
            hidden_f=128,
            dropout=0.,
            aggr="add",
            maccs_input_dim=167,
            maccs_hidden_dim=334,  # 167*2
            maccs_output_dim=256,
            ecfp_input_dim=2048,
            ecfp_hidden_dims=(1024, 512),
            ecfp_output_dim=256,
            use_attention=True,
            craft_dim=29,
            craft_hidden=58,
            craft_out_feat=29,
    ):
        super().__init__()
        self.n_layers = n_layers
        self.node_dim = node_dim
        self.pos_dim = pos_dim
        self.edge_attr_dim = edge_attr_dim
        self.hidden_f = hidden_f
        self.out_node_nf = out_node_nf
        self.maccs_input_dim = maccs_input_dim
        self.maccs_hidden_dim = maccs_hidden_dim
        self.maccs_output_dim = maccs_output_dim
        self.ecfp_input_dim = ecfp_input_dim
        self.ecfp_hidden_dims = ecfp_hidden_dims
        self.ecfp_output_dim = ecfp_output_dim
        self.use_attention = use_attention
        self.craft_dim = craft_dim
        self.craft_hidden = craft_hidden
        self.craft_out_feat = craft_out_feat

        # 指纹模块
        self.fp_module = FingerPrintModule(maccs_input_dim, maccs_hidden_dim, maccs_output_dim,
                                           ecfp_input_dim, ecfp_hidden_dims, ecfp_output_dim, use_attention)
        self.egnn = ZEOEGNN_Sparse(node_dim=node_dim, out_node_nf=out_node_nf, n_layers=n_layers,
                                   pos_dim=pos_dim, edge_attr_dim=edge_attr_dim, hidden_f=hidden_f)  # 3D图特征
        self.craft = CraftModule(craft_dim, craft_hidden, craft_out_feat)  # 工艺参数

        # 层次化图特征池化
        # self.egnn_pool = nn.ModuleList([
        #     TopKPooling(hidden_f, ratio=0.5)
        #     for _ in range(n_layers)
        # ])

        # 双向交叉注意力模块
        self.bi_cross_module = BidirectionalCrossAttentionSimple(dim=hidden_f,
                                                                 context_dim=craft_out_feat + ecfp_output_dim, heads=1,
                                                                 dim_head=hidden_f)

        # 分类决策模块
        self.classifier = nn.Sequential(
            nn.Linear(maccs_output_dim + hidden_f + craft_out_feat, 256),
            nn.ReLU(),  # nn.ELU(),
            # nn.AlphaDropout(p=0.2),
            nn.Linear(256, out_node_nf)
        )

        # self.dropout = nn.Dropout(dropout)

        # 输出层
        # self.classifier = nn.Linear(craft_out_feat+ecfp_output_dim+hidden_f, out_node_nf)

    def forward(self, node_attr, pos, edge_index, edge_attr, batch, ecfp, maccs, craft_feat):
        # 特征提取分支
        fp_feat = self.fp_module(maccs, ecfp)  # [B, maccs/ecfp_output_dim]
        craft_feat = self.craft(craft_feat)  # [B, craft_out_feat]

        # 层次化图特征 [B, n_layers*hidden_f]
        egnn_feat, _ = self.egnn(node_attr, pos, edge_index, batch, edge_attr, mode="conv")  # [B, hidden_f]

        # pooled_features = []
        # for i in range(self.n_layers):
        #     x, edge_index_pool, _, batch_pool, _, _ = self.egnn_pool[i](
        #         x=egnn_feat,
        #         edge_index=edge_index,
        #         edge_attr=edge_attr,
        #         batch=batch
        #     )
        #     pooled_features.append(global_mean_pool(x, batch))
        # egnn_feat = torch.cat(pooled_features, dim=1)

        # 交叉注意力模块
        craft_fp_feat = torch.cat([craft_feat, fp_feat], dim=1)  # [B, maccs/ecfp_output_dim + craft_out_feat]
        egnn_feat_attn, fp_craft_feat_attn = self.bi_cross_module(egnn_feat.unsqueeze(1), craft_fp_feat.unsqueeze(1))

        fused_feat = torch.cat([egnn_feat_attn.squeeze(1), fp_craft_feat_attn.squeeze(1)],
                               dim=1)  # [B, hidden_f + maccs/ecfp_output_dim + craft_out_feat]

        # 最终分类
        return self.classifier(fused_feat)


class ZEOEGNN4(nn.Module):
    def __init__(
            self,
            n_layers,
            node_dim,
            out_node_nf,
            pos_dim=3,
            edge_attr_dim=0,
            hidden_f=128,
            dropout=0.,
            aggr="add",
            maccs_input_dim=167,
            maccs_hidden_dim=334,  # 167*2
            maccs_output_dim=256,
            ecfp_input_dim=2048,
            ecfp_hidden_dims=(1024, 512),
            ecfp_output_dim=256,
            use_attention=True,
            craft_dim=29,
            craft_hidden=58,
            craft_out_feat=29,
    ):
        super().__init__()
        self.n_layers = n_layers
        self.node_dim = node_dim
        self.pos_dim = pos_dim
        self.edge_attr_dim = edge_attr_dim
        self.hidden_f = hidden_f
        self.out_node_nf = out_node_nf
        self.maccs_input_dim = maccs_input_dim
        self.maccs_hidden_dim = maccs_hidden_dim
        self.maccs_output_dim = maccs_output_dim
        self.ecfp_input_dim = ecfp_input_dim
        self.ecfp_hidden_dims = ecfp_hidden_dims
        self.ecfp_output_dim = ecfp_output_dim
        self.use_attention = use_attention
        self.craft_dim = craft_dim
        self.craft_hidden = craft_hidden
        self.craft_out_feat = craft_out_feat

        # 指纹模块
        self.fp_module = FingerPrintModule(maccs_input_dim, maccs_hidden_dim, maccs_output_dim,
                                           ecfp_input_dim, ecfp_hidden_dims, ecfp_output_dim, use_attention)
        self.egnn = ZEOEGNN_Sparse(node_dim=node_dim, out_node_nf=out_node_nf, n_layers=n_layers,
                                   pos_dim=pos_dim, edge_attr_dim=edge_attr_dim, hidden_f=hidden_f)  # 3D图特征
        self.craft = CraftModule(craft_dim, craft_hidden, craft_out_feat)  # 工艺参数

        # 双向交叉注意力模块
        self.bi_cross_module = BidirectionalCrossAttentionSimple(dim=hidden_f,
                                                                 context_dim=craft_out_feat + ecfp_output_dim, heads=1,
                                                                 dim_head=hidden_f)

        # 归一化
        self.norm_egnn = nn.BatchNorm1d(hidden_f)
        self.norm_craft_fp = nn.BatchNorm1d(craft_out_feat + ecfp_output_dim)
        
        # 输出层
        self.classifier = MLP(input_size=craft_out_feat + ecfp_output_dim + hidden_f,
                              hidden_size=256,
                              output_size=out_node_nf)

    def forward(self, node_attr, pos, edge_index, edge_attr, batch, ecfp, maccs, craft_feat):
        # 特征提取分支
        fp_feat = self.fp_module(maccs, ecfp)  # [B, maccs/ecfp_output_dim]
        craft_feat = self.craft(craft_feat)  # [B, craft_out_feat]

        # 层次化图特征 [B, n_layers*hidden_f]
        egnn_feat, _ = self.egnn(node_attr, pos, edge_index, batch, edge_attr, mode="conv")  # [B, hidden_f]

        # 交叉注意力
        craft_fp_feat = torch.cat([craft_feat, fp_feat], dim=1)  # [B, maccs/ecfp_output_dim + craft_out_feat]
        egnn_feat_attn, craft_fp_feat_attn = self.bi_cross_module(egnn_feat.unsqueeze(1), craft_fp_feat.unsqueeze(1))

        # 归一化
        egnn_feat_attn = self.norm_egnn(egnn_feat_attn.squeeze(1))
        craft_fp_feat_attn = self.norm_craft_fp(craft_fp_feat_attn.squeeze(1))
        
        # 残差连接
        egnn_feat = egnn_feat + egnn_feat_attn.squeeze(1)
        craft_fp_feat = craft_fp_feat + craft_fp_feat_attn.squeeze(1)
        
        # 分类
        concat_feat = torch.cat([egnn_feat.squeeze(1), craft_fp_feat.squeeze(1)],
                                dim=1)  # [B, hidden_f + maccs/ecfp_output_dim + craft_out_feat]

        output = self.classifier(concat_feat)
        return output

class ZEOEGNN5(nn.Module):
    """
    消融实验专用模型
    参数说明:
    - use_graph: 是否启用3D图结构特征 (默认True)
    - use_fp: 是否启用指纹特征 (默认True)
    - use_craft: 是否启用工艺参数特征 (默认True)
    """
    def __init__(
            self,
            n_layers,
            node_dim,
            out_node_nf,
            pos_dim=3,
            edge_attr_dim=0,
            hidden_f=128,
            dropout=0.,
            aggr="add",
            maccs_input_dim=167,
            maccs_hidden_dim=334,
            maccs_output_dim=256,
            ecfp_input_dim=2048,
            ecfp_hidden_dims=(1024, 512),
            ecfp_output_dim=256,
            use_attention=True,
            craft_dim=29,
            craft_hidden=58,
            craft_out_feat=29,
            use_graph=True,      # 新增消融参数
            use_fp=True,         # 新增消融参数
            use_craft=True       # 新增消融参数
    ):
        super().__init__()
        # 初始化各模块
        self.use_graph = use_graph
        self.use_fp = use_fp
        self.use_craft = use_craft

        # 指纹模块 (仅在启用指纹时初始化)
        if self.use_fp:
            self.fp_module = FingerPrintModule(maccs_input_dim, maccs_hidden_dim, maccs_output_dim,
                                              ecfp_input_dim, ecfp_hidden_dims, ecfp_output_dim, use_attention)

        # EGNN模块 (仅在启用图结构时初始化)
        if self.use_graph:
            self.egnn = ZEOEGNN_Sparse(node_dim=node_dim, out_node_nf=out_node_nf, n_layers=n_layers,
                                      pos_dim=pos_dim, edge_attr_dim=edge_attr_dim, hidden_f=hidden_f)

        # 工艺参数模块 (仅在启用工艺参数时初始化)
        if self.use_craft:
            self.craft = CraftModule(craft_dim, craft_hidden, craft_out_feat)

        # 动态计算分类器输入维度
        input_dim = 0
        if self.use_graph:
            input_dim += hidden_f
        if self.use_fp:
            input_dim += ecfp_output_dim
        if self.use_craft:
            input_dim += craft_out_feat

        # 交叉注意力模块 (仅在同时启用图结构和指纹/工艺时初始化)
        if self.use_graph and (self.use_fp or self.use_craft):
            context_dim = 0
            if self.use_fp:
                context_dim += ecfp_output_dim
            if self.use_craft:
                context_dim += craft_out_feat
            
            self.bi_cross_module = BidirectionalCrossAttentionSimple(
                dim=hidden_f,
                context_dim=context_dim,
                heads=1,
                dim_head=hidden_f
            )

        # 分类器
        self.classifier = MLP(
            input_size=input_dim,
            hidden_size=256,
            output_size=out_node_nf
        )

    def forward(self, node_attr, pos, edge_index, edge_attr, batch, ecfp, maccs, craft_feat):
        features = []
        
        # 处理图结构特征
        if self.use_graph:
            egnn_feat, _ = self.egnn(node_attr, pos, edge_index, batch, edge_attr, mode="conv")
            features.append(egnn_feat)

        # 处理指纹特征
        if self.use_fp:
            fp_feat = self.fp_module(maccs, ecfp)
            features.append(fp_feat)

        # 处理工艺参数特征
        if self.use_craft:
            craft_feat = self.craft(craft_feat)
            features.append(craft_feat)

        # 交叉注意力处理
        if self.use_graph and (self.use_fp or self.use_craft):
            context_feat = []
            if self.use_fp:
                context_feat.append(fp_feat)
            if self.use_craft:
                context_feat.append(craft_feat)
            context_feat = torch.cat(context_feat, dim=1) if len(context_feat) > 0 else None
            
            if context_feat is not None:
                egnn_feat_attn, _ = self.bi_cross_module(egnn_feat.unsqueeze(1), context_feat.unsqueeze(1))
                features[0] = egnn_feat_attn.squeeze(1)

        # 特征拼接
        combined = torch.cat(features, dim=1)
        return self.classifier(combined)
    
if __name__ == "__main__":
    # 设置随机种子以保证可重复性
    torch.manual_seed(42)

    device = "cpu"
    batch_size = 16

    # 创建指纹测试数据
    maccs_data = torch.randn(batch_size, 167)  # MACCS指纹数据
    ecfp_data = torch.randn(batch_size, 2048)  # ECFP指纹数据

    # 创建全局特征测试数据
    glob_n_feat = 256
    craft_feat = torch.randn(batch_size, glob_n_feat)

    # 创建图测试数据
    n_nodes = 10
    n_feat = 7
    pos_dim = 3
    edge_attr_dim = 3
    hidden_f = 256
    classes = 227

    # 创建测试数据
    # 节点特征和坐标 (batch_size * n_nodes, n_feat + pos_dim)
    node_attr = torch.randn(batch_size * n_nodes, n_feat)
    pos = torch.randn(batch_size * n_nodes, pos_dim)

    # 边连接关系 (2, num_edges)
    edge_index = torch.tensor([[0, 1, 2, 3, 4, 5, 6, 7],
                               [1, 2, 3, 0, 5, 6, 7, 4]])

    # 边特征 (num_edges, edge_attr_dim)
    edge_attr = torch.randn(edge_index.size(1), edge_attr_dim)

    # 批次信息 (batch_size * n_nodes,)
    batch = torch.repeat_interleave(torch.arange(batch_size), n_nodes)

    # 初始化模型
    # model = ZEOEGNN2(n_layers=1, node_dim=n_feat, out_node_nf=classes, pos_dim=pos_dim, edge_attr_dim=edge_attr_dim,
    #                  hidden_f=hidden_f, craft_dim=glob_n_feat, craft_hidden=glob_n_feat * 2,
    #                  craft_out_feat=glob_n_feat).to(device)
    # output = model(node_attr, pos, edge_index, edge_attr, batch, ecfp_data, maccs_data, craft_feat=craft_feat)
    

    model_test = {"use_graph=false":ZEOEGNN5(n_layers=1, node_dim=n_feat, out_node_nf=classes, pos_dim=pos_dim, edge_attr_dim=edge_attr_dim,
                     hidden_f=hidden_f, craft_dim=glob_n_feat, craft_hidden=glob_n_feat * 2,
                     craft_out_feat=glob_n_feat, use_graph=False, use_fp=True, use_craft=True).to(device),
                  "use_fp=false":ZEOEGNN5(n_layers=1, node_dim=n_feat, out_node_nf=classes, pos_dim=pos_dim, edge_attr_dim=edge_attr_dim,
                     hidden_f=hidden_f, craft_dim=glob_n_feat, craft_hidden=glob_n_feat * 2,
                     craft_out_feat=glob_n_feat, use_graph=True, use_fp=False, use_craft=True).to(device),
                  "use_craft=false":ZEOEGNN5(n_layers=1, node_dim=n_feat, out_node_nf=classes, pos_dim=pos_dim, edge_attr_dim=edge_attr_dim,
                     hidden_f=hidden_f, craft_dim=glob_n_feat, craft_hidden=glob_n_feat * 2,
                     craft_out_feat=glob_n_feat, use_graph=True, use_fp=True, use_craft=False).to(device),
        }   
    for model in model_test.values():
        output = model(node_attr, pos, edge_index, edge_attr, batch, ecfp_data, maccs_data, craft_feat=craft_feat)
        print(output.size())
