import torch
import torch.nn as nn
import torch.nn.functional as F

class WordRepresentationTransformer(nn.Module):
    def __init__(self, vocab_size, embed_size=200, num_layers=2, nhead=4, dropout=0.2):
        super(WordRepresentationTransformer, self).__init__()
        self.vocab_size = vocab_size

        # 一个简单的线性层将高维稀疏的BoW映射到低维稠密空间
        self.input_projection = nn.Linear(vocab_size, embed_size)
        
        # 标准的Transformer编码器层
        encoder_layer = nn.TransformerEncoderLayer(
            d_model=embed_size,
            nhead=nhead,
            dim_feedforward=embed_size * 4,
            dropout=dropout,
            activation='relu',
            batch_first=True
        )
        self.transformer_encoder = nn.TransformerEncoder(
            encoder_layer,
            num_layers=num_layers
        )

        # 输出层，将Transformer的输出映射回词汇表空间
        self.output_projection = nn.Linear(embed_size, vocab_size)

        # 用于计算去噪损失
        self.reconstruction_loss = nn.CrossEntropyLoss(reduction='mean')

    def forward(self, bow, channel_mask):
        """
        bow: 原始的词袋输入 [batch_size, vocab_size]
        channel_mask: 用于制造噪声的掩码 [batch_size, vocab_size]
                       我们假设它是一个0-1掩码，0表示要被遮盖的词
        """
        # 1. 制造噪声：将bow中对应mask为0的位置清零
        # 我们假设 channel_mask 是由 mask_generator 生成的
        # 注意：原始的 channel_mask 可能维度或含义不同，这里我们根据去噪思想重新定义其功能
        masked_bow = bow * channel_mask

        # 2. 投影到嵌入空间
        # [batch_size, vocab_size] -> [batch_size, embed_size]
        input_embed = self.input_projection(masked_bow)

        # 3. Transformer编码
        # Transformer期望输入是 (batch, seq_len, features)，这里我们将seq_len视为1
        # [batch_size, embed_size] -> [batch_size, 1, embed_size]
        input_embed = input_embed.unsqueeze(1)
        # [batch_size, 1, embed_size] -> [batch_size, 1, embed_size]
        transformer_output = self.transformer_encoder(input_embed)
        # [batch_size, 1, embed_size] -> [batch_size, embed_size]
        transformer_output = transformer_output.squeeze(1)

        # 4. 投影回词汇表空间，得到重构的表示 z
        # [batch_size, embed_size] -> [batch_size, vocab_size]
        z = self.output_projection(transformer_output)

        # 5. 计算去噪损失 (dcloss)
        # 目标是让重构的 z 尽可能接近原始的 bow
        # CrossEntropyLoss 期望的输入是 (N, C) 和 (N)，所以我们需要对bow做一些处理
        # 我们用softmax处理z，用原始bow作为概率分布进行比较
        log_softmax_z = F.log_softmax(z, dim=1)
        dcloss = -torch.sum(log_softmax_z * bow, dim=1).mean()

        # 返回重构的表示、去噪损失和文档嵌入
        return F.softmax(z, dim=1), dcloss, transformer_output
