import torch
import torch.nn as nn
import torch.nn.functional as F


class LSTMSequenceTagger(nn.Module):
    """基于双向LSTM的序列标注模型"""
    def __init__(self,
                 vocab_size,        # 词汇表大小
                 embed_dim=128,     # 词嵌入维度
                 hidden_dim=256,    # LSTM隐藏层维度
                 num_tags=5,        # 标签类别数（如实体标签+非实体）
                 num_layers=2,      # LSTM层数
                 bidirectional=True, # 是否使用双向LSTM
                 dropout=0.1):      # dropout概率
        super().__init__()
        
        # 1. 词嵌入层
        self.embedding = nn.Embedding(
            num_embeddings=vocab_size,
            embedding_dim=embed_dim,
            padding_idx=0  # 假设0为填充符索引
        )
        
        # 2. LSTM层
        self.lstm = nn.LSTM(
            input_size=embed_dim,
            hidden_size=hidden_dim,
            num_layers=num_layers,
            bidirectional=bidirectional,
            batch_first=True,  # 输入格式为[batch_size, seq_len, feature]
            dropout=dropout if num_layers > 1 else 0  # 仅当层数>1时启用中间层dropout
        )
        
        # 3. 注意力机制（用于增强关键位置特征）
        self.attention = AttentionLayer(
            hidden_dim=hidden_dim * (2 if bidirectional else 1)  # 双向LSTM输出维度翻倍
        )
        
        # 4. 分类头（输出每个位置的标签概率）
        self.classifier = nn.Sequential(
            nn.Linear(
                in_features=hidden_dim * (2 if bidirectional else 1),
                out_features=hidden_dim
            ),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_dim, num_tags)
        )
        
        self.dropout = nn.Dropout(dropout)

    def forward(self, x, lengths=None, mask=None):
        """
        Args:
            x: 输入序列，形状 [batch_size, seq_len]
            lengths: 序列真实长度（用于LSTMPackedSequence），形状 [batch_size]
            mask: 掩码（标记填充位置），形状 [batch_size, seq_len]，1表示有效位置
        Returns:
            logits: 每个位置的标签预测，形状 [batch_size, seq_len, num_tags]
        """
        # 1. 词嵌入
        embed = self.embedding(x)  # [batch_size, seq_len, embed_dim]
        embed = self.dropout(embed)
        
        # 2. LSTM处理（支持变长序列）
        if lengths is not None:
            # 对变长序列进行打包（提升效率）
            packed_embed = nn.utils.rnn.pack_padded_sequence(
                embed, lengths, batch_first=True, enforce_sorted=False
            )
            packed_lstm_out, _ = self.lstm(packed_embed)
            # 解包为正常序列
            lstm_out, _ = nn.utils.rnn.pad_packed_sequence(
                packed_lstm_out, batch_first=True
            )
        else:
            # 固定长度序列直接输入
            lstm_out, _ = self.lstm(embed)  # [batch_size, seq_len, hidden_dim*2]（双向）
        
        # 3. 应用注意力机制（增强重要特征）
        attn_out = self.attention(lstm_out, mask)  # [batch_size, seq_len, hidden_dim*2]
        
        # 4. 分类预测
        logits = self.classifier(attn_out)  # [batch_size, seq_len, num_tags]
        
        return logits


class AttentionLayer(nn.Module):
    """加性注意力机制层（对序列位置加权）"""
    def __init__(self, hidden_dim):
        super().__init__()
        self.w1 = nn.Linear(hidden_dim, hidden_dim // 2)
        self.w2 = nn.Linear(hidden_dim // 2, 1)  # 输出每个位置的注意力权重

    def forward(self, x, mask=None):
        """
        Args:
            x: LSTM输出特征，形状 [batch_size, seq_len, hidden_dim]
            mask: 掩码，形状 [batch_size, seq_len]
        Returns:
            weighted_x: 注意力加权后的特征，形状 [batch_size, seq_len, hidden_dim]
        """
        # 计算注意力分数
        scores = self.w2(torch.tanh(self.w1(x)))  # [batch_size, seq_len, 1]
        scores = scores.squeeze(-1)  # [batch_size, seq_len]
        
        # 对填充位置的注意力分数进行mask（设为负无穷，softmax后权重为0）
        if mask is not None:
            scores = scores.masked_fill(mask == 0, -1e9)
        
        # 计算注意力权重
        attn_weights = F.softmax(scores, dim=1)  # [batch_size, seq_len]
        
        # 加权求和（广播机制）
        weighted_x = x * attn_weights.unsqueeze(-1)  # [batch_size, seq_len, hidden_dim]
        return weighted_x


# 测试代码
if __name__ == "__main__":
    # 超参数设置
    vocab_size = 5000
    batch_size = 16
    seq_len = 20
    num_tags = 5  # 例如：B-LOC, I-LOC, B-PER, I-PER, O
    
    # 初始化模型
    model = LSTMSequenceTagger(
        vocab_size=vocab_size,
        embed_dim=128,
        hidden_dim=256,
        num_tags=num_tags,
        num_layers=2,
        bidirectional=True
    )
    
    # 生成随机输入
    x = torch.randint(0, vocab_size, (batch_size, seq_len))  # 随机词索引
    lengths = torch.randint(5, seq_len+1, (batch_size,))  # 随机序列长度（5到20之间）
    mask = torch.zeros(batch_size, seq_len)
    for i in range(batch_size):
        mask[i, :lengths[i]] = 1  # 有效位置标记为1
    
    # 前向传播
    logits = model(x, lengths, mask)
    print(f"输入形状: {x.shape}")
    print(f"输出形状: {logits.shape}")  # 应输出 [16, 20, 5]