#!/usr/bin/env python3
"""
Transformer基础模型
定义Transformer的核心架构和组件
支持多种输入输出维度的配置
"""

import torch
import torch.nn as nn
import torch.nn.functional as F

class TransformerModel(nn.Module):
    def __init__(self, input_dim, output_dim, nhead, num_layers, dropout=0.1):
        super(TransformerModel, self).__init__()
        self.encoder_layer = nn.TransformerEncoderLayer(
            d_model=input_dim, nhead=nhead, dropout=dropout, batch_first=True
        )
        self.transformer_encoder = nn.TransformerEncoder(
            self.encoder_layer, num_layers=num_layers, enable_nested_tensor=False
        )
        self.decoder = nn.Linear(input_dim, output_dim)

    def forward(self, src, src_mask=None):
        """
        前向传播，支持masked注意力
        
        Args:
            src: 输入序列 (batch_size, seq_len, input_dim)
            src_mask: 注意力掩码 (seq_len, seq_len)
        """
        if src_mask is not None:
            output = self.transformer_encoder(src, mask=src_mask)
        else:
            output = self.transformer_encoder(src)
        output = self.decoder(output)
        return output
    
    def create_causal_mask(self, seq_len):
        """
        创建因果掩码（causal mask）用于自回归生成
        
        Args:
            seq_len: 序列长度
            
        Returns:
            mask: 因果掩码 (seq_len, seq_len)
        """
        # 创建下三角掩码，允许每个位置只能看到前面的位置
        mask = torch.triu(torch.ones(seq_len, seq_len) * float('-inf'), diagonal=1)
        return mask
