import torch
import torch.nn as nn
import itertools

torch.set_printoptions(profile="full")




class EDEE(nn.Module):
    def __init__(self, args, word_type_tag_num):
        super(EDEE, self).__init__()
        self.args = args

        # 词嵌入层
        num_embeddings, embed_dim = args.token_embedding.shape
        self.embed = nn.Embedding(num_embeddings, embed_dim)
        self.embed.weight = nn.Parameter(args.token_embedding, requires_grad=False)

        # 词类型嵌入层
        self.word_type_embed = nn.Embedding(word_type_tag_num, args.word_type_embedding_dim)

        # Dropout 层
        self.dropout = nn.Dropout(args.dropout)

        # 输入维度
        in_dim = args.word_embedding_dim + args.word_type_embedding_dim

        # 动态计算 transformer_input_dim
        if in_dim % args.nhead != 0:
            args.transformer_input_dim = (in_dim // args.nhead + 1) * args.nhead
        else:
            args.transformer_input_dim = in_dim

        # Transformer Encoder 配置
        encoder_layer = nn.TransformerEncoderLayer(
            d_model=in_dim,
            nhead=args.nhead,
            dim_feedforward=args.transformer_hidden_size,
            dropout=args.dropout
        )
        self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=args.num_transformer_layers)

        # MLP 分类器
        last_hidden_size = 2 * args.transformer_input_dim
        layers = [nn.Linear(last_hidden_size, args.final_hidden_size), nn.LeakyReLU()]
        for _ in range(args.num_mlps - 1):
            layers += [nn.Linear(args.final_hidden_size, args.final_hidden_size), nn.LeakyReLU()]
        self.fcs = nn.Sequential(*layers)
        self.fc_final = nn.Linear(args.final_hidden_size, args.role_role_num)


    def forward(self, word_ids, wType_ids):
        # 获取词嵌入
        token_feature = self.embed(word_ids)
        token_feature = self.dropout(token_feature)

        # 获取词类型嵌入
        token_type_feature = self.word_type_embed(wType_ids)
        token_type_feature = self.dropout(token_type_feature)

        # 拼接特征
        all_token_feature = torch.cat([token_feature, token_type_feature], dim=1)


        # 调整输入形状以匹配 Transformer 的要求
        transformer_input = all_token_feature.unsqueeze(0)  # [1, batch_size, feature_dim]

        # Transformer 编码
        transformer_output = self.transformer_encoder(transformer_input)
        transformer_output = self.dropout(transformer_output).squeeze(0)  # [batch_size, feature_dim]

        # 构造实体对特征
        ent_ent_list = list(itertools.product(transformer_output, repeat=2))
        ent_ent_emb = []
        for ent_ent in ent_ent_list:
            ent_ent_emb.append(torch.cat([ent_ent[0], ent_ent[1]], dim=0))
        ent_ent_feature = torch.stack(ent_ent_emb, dim=0)

        # MLP 分类
        out = self.fcs(ent_ent_feature)
        logits = self.fc_final(out)

        return logits



