"""
Transformer模型完整测试脚本
测试所有模块化组件的功能和集成
"""

import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

import torch
import torch.nn as nn
from Transformer.module import (
    PositionWiseFFN, AddNorm, DotProductAttention, MultiHeadAttention,
    PositionalEncoding, EncoderLayer, Encoder, DecoderLayer, Decoder, Transformer
)

def test_positional_encoding():
    """测试位置编码模块"""
    print("=== 测试位置编码 ===")
    
    d_model = 512
    max_len = 100
    pe = PositionalEncoding(d_model, max_len)
    
    # 测试不同输入形状
    x1 = torch.randn(10, 32, d_model)  # [seq_len, batch_size, d_model]
    x2 = torch.randn(32, 10, d_model)  # [batch_size, seq_len, d_model]
    
    output1 = pe(x1)
    output2 = pe(x2)
    
    print(f"输入形状1: {x1.shape} -> 输出形状: {output1.shape}")
    print(f"输入形状2: {x2.shape} -> 输出形状: {output2.shape}")
    print("位置编码测试通过！\n")

def test_encoder_decoder_layers():
    """测试编码器和解码器层"""
    print("=== 测试编码器层 ===")
    
    batch_size, seq_len, d_model = 2, 10, 512
    num_heads, d_ff = 8, 2048
    
    # 测试编码器层
    encoder_layer = EncoderLayer(d_model, num_heads, d_ff)
    x = torch.randn(batch_size, seq_len, d_model)
    
    # 创建掩码
    mask = torch.ones(batch_size, seq_len, seq_len)
    mask[:, :, 5:] = 0  # 屏蔽后5个位置
    
    output = encoder_layer(x, mask)
    print(f"编码器层输入形状: {x.shape}")
    print(f"编码器层输出形状: {output.shape}")
    
    print("=== 测试解码器层 ===")
    
    # 测试解码器层
    decoder_layer = DecoderLayer(d_model, num_heads, d_ff)
    tgt_seq_len = 8
    
    x = torch.randn(batch_size, tgt_seq_len, d_model)
    memory = torch.randn(batch_size, seq_len, d_model)
    
    # 创建目标序列掩码（自回归掩码）
    tgt_mask = torch.triu(torch.ones(tgt_seq_len, tgt_seq_len), diagonal=1)
    tgt_mask = tgt_mask.unsqueeze(0).expand(batch_size, -1, -1)
    
    output = decoder_layer(x, memory, src_mask=mask, tgt_mask=tgt_mask)
    print(f"解码器层输入形状: x{x.shape}, memory{memory.shape}")
    print(f"解码器层输出形状: {output.shape}")
    print("编码器/解码器层测试通过！\n")

def test_encoder_decoder():
    """测试完整的编码器和解码器"""
    print("=== 测试编码器 ===")
    
    vocab_size = 1000
    batch_size, seq_len = 2, 10
    d_model, num_heads, d_ff, num_layers = 512, 8, 2048, 2
    
    encoder = Encoder(vocab_size, d_model, num_heads, d_ff, num_layers)
    
    # 创建输入序列
    src = torch.randint(0, vocab_size, (batch_size, seq_len))
    
    # 创建源序列掩码
    src_mask = torch.ones(batch_size, seq_len, seq_len)
    src_mask[:, :, 5:] = 0  # 屏蔽后5个位置
    
    encoder_output = encoder(src, src_mask)
    print(f"编码器输入形状: {src.shape}")
    print(f"编码器输出形状: {encoder_output.shape}")
    
    print("=== 测试解码器 ===")
    
    decoder = Decoder(vocab_size, d_model, num_heads, d_ff, num_layers)
    tgt_seq_len = 8
    
    # 创建目标序列
    tgt = torch.randint(0, vocab_size, (batch_size, tgt_seq_len))
    
    # 创建目标序列掩码
    tgt_mask = torch.triu(torch.ones(tgt_seq_len, tgt_seq_len), diagonal=1)
    tgt_mask = tgt_mask.unsqueeze(0).expand(batch_size, -1, -1)
    
    decoder_output = decoder(tgt, encoder_output, src_mask=src_mask, tgt_mask=tgt_mask)
    print(f"解码器输入形状: tgt{tgt.shape}, memory{encoder_output.shape}")
    print(f"解码器输出形状: {decoder_output.shape}")
    print("编码器/解码器测试通过！\n")

def test_transformer():
    """测试完整的Transformer模型"""
    print("=== 测试完整Transformer模型 ===")
    
    src_vocab_size, tgt_vocab_size = 1000, 1000
    batch_size, src_seq_len, tgt_seq_len = 2, 10, 8
    d_model, num_heads, d_ff, num_layers = 512, 8, 2048, 2
    
    transformer = Transformer(src_vocab_size, tgt_vocab_size, d_model, 
                             num_heads, d_ff, num_layers)
    
    # 创建输入序列
    src = torch.randint(0, src_vocab_size, (batch_size, src_seq_len))
    tgt = torch.randint(0, tgt_vocab_size, (batch_size, tgt_seq_len))
    
    # 创建掩码
    src_mask = torch.ones(batch_size, src_seq_len, src_seq_len)
    src_mask[:, :, 5:] = 0  # 屏蔽源序列后5个位置
    
    tgt_mask = transformer.generate_square_subsequent_mask(tgt_seq_len)
    tgt_mask = tgt_mask.unsqueeze(0).expand(batch_size, -1, -1)
    
    # 前向传播
    output = transformer(src, tgt, src_mask=src_mask, tgt_mask=tgt_mask)
    
    print(f"Transformer输入形状: src{src.shape}, tgt{tgt.shape}")
    print(f"Transformer输出形状: {output.shape}")
    print(f"源序列掩码形状: {src_mask.shape}")
    print(f"目标序列掩码形状: {tgt_mask.shape}")
    
    # 验证输出概率分布
    probs = torch.softmax(output, dim=-1)
    print(f"输出概率分布形状: {probs.shape}")
    print(f"概率分布和: {probs.sum(dim=-1).mean().item():.4f}")
    
    print("Transformer模型测试通过！\n")

def test_training_workflow():
    """测试训练流程"""
    print("=== 测试训练流程 ===")
    
    # 创建小规模模型
    transformer = Transformer(src_vocab_size=100, tgt_vocab_size=100, 
                             d_model=64, num_heads=4, d_ff=128, num_layers=2)
    
    # 创建优化器和损失函数
    optimizer = torch.optim.Adam(transformer.parameters(), lr=0.001)
    criterion = nn.CrossEntropyLoss(ignore_index=0)  # 忽略填充标记
    
    # 模拟训练数据
    batch_size, src_len, tgt_len = 4, 6, 5
    src = torch.randint(1, 100, (batch_size, src_len))  # 避免0（填充标记）
    tgt_input = torch.randint(1, 100, (batch_size, tgt_len))
    tgt_output = torch.randint(1, 100, (batch_size, tgt_len))
    
    # 生成掩码
    tgt_mask = transformer.generate_square_subsequent_mask(tgt_len)
    tgt_mask = tgt_mask.unsqueeze(0).expand(batch_size, -1, -1)
    
    # 训练步骤
    transformer.train()
    optimizer.zero_grad()
    
    # 前向传播
    output = transformer(src, tgt_input, tgt_mask=tgt_mask)
    
    # 计算损失
    loss = criterion(output.view(-1, 100), tgt_output.view(-1))
    
    # 反向传播
    loss.backward()
    optimizer.step()
    
    print(f"训练损失: {loss.item():.4f}")
    print(f"模型参数数量: {sum(p.numel() for p in transformer.parameters())}")
    print("训练流程测试通过！\n")

def main():
    """主测试函数"""
    print("开始测试Transformer模型所有组件...\n")
    
    try:
        # 测试所有组件
        test_positional_encoding()
        test_encoder_decoder_layers()
        test_encoder_decoder()
        test_transformer()
        test_training_workflow()
        
        print("🎉 所有测试通过！Transformer模型实现完整且功能正常。")
        print("\n模型包含以下模块化组件：")
        print("1. PositionalEncoding - 位置编码")
        print("2. EncoderLayer/Encoder - 编码器层和编码器")
        print("3. DecoderLayer/Decoder - 解码器层和解码器")
        print("4. Transformer - 完整Transformer模型")
        print("5. MultiHeadAttention - 多头注意力机制")
        print("6. DotProductAttention - 缩放点积注意力")
        print("7. PositionWiseFFN - 前馈网络")
        print("8. AddNorm - 残差连接和层归一化")
        
    except Exception as e:
        print(f"❌ 测试失败: {e}")
        import traceback
        traceback.print_exc()

if __name__ == "__main__":
    main()