import torch
import torch.nn as nn
from Transformer.module import MultiHeadAttention

def test_multihead_attention():
    """测试多头注意力机制"""
    print("=== 多头注意力机制测试 ===\n")
    
    # 创建多头注意力模块
    d_model = 64      # 输入维度
    num_heads = 8     # 注意力头数
    dropout = 0.1     # dropout率
    
    mha = MultiHeadAttention(d_model, num_heads, dropout)
    print(f"多头注意力模块创建成功")
    print(f"输入维度: {d_model}")
    print(f"注意力头数: {num_heads}")
    print(f"每个头维度: {d_model // num_heads}\n")
    
    # 创建测试数据
    batch_size = 2
    seq_len_q = 5     # 查询序列长度
    seq_len_k = 6     # 键值序列长度
    
    # 创建输入张量
    query = torch.randn(batch_size, seq_len_q, d_model)
    key = torch.randn(batch_size, seq_len_k, d_model)
    value = torch.randn(batch_size, seq_len_k, d_model)
    
    print("输入张量形状:")
    print(f"query: {query.shape}")
    print(f"key: {key.shape}")
    print(f"value: {value.shape}\n")
    
    # 测试前向传播
    print("步骤1: 线性变换得到Q、K、V")
    Q = mha.W_q(query)
    K = mha.W_k(key)
    V = mha.W_v(value)
    print(f"Q形状: {Q.shape}")
    print(f"K形状: {K.shape}")
    print(f"V形状: {V.shape}\n")
    
    print("步骤2: 分割成多个头")
    Q_heads = Q.view(batch_size, seq_len_q, num_heads, d_model // num_heads).transpose(1, 2)
    print(f"多头Q形状: {Q_heads.shape}")
    print(f"每个头维度: {Q_heads.shape[-1]}\n")
    
    # 完整前向传播
    print("步骤3: 完整前向传播")
    output, attention_weights = mha(query, key, value)
    
    print("输出结果:")
    print(f"输出形状: {output.shape}")
    print(f"注意力权重形状: {attention_weights.shape}")
    print(f"输出与输入形状一致: {output.shape == query.shape}\n")
    
    # 测试自注意力
    print("=== 自注意力测试 ===")
    x = torch.randn(batch_size, seq_len_q, d_model)
    output_self, weights_self = mha(x, x, x)
    
    print(f"自注意力输出形状: {output_self.shape}")
    print(f"自注意力权重形状: {weights_self.shape}")
    
    # 验证多头机制
    print("\n=== 多头机制验证 ===")
    print(f"注意力权重头数: {weights_self.shape[1]}")
    print(f"每个头的注意力矩阵形状: {weights_self.shape[2:]}")
    
    # 测试掩码功能
    print("\n=== 掩码功能测试 ===")
    mask = torch.ones(seq_len_q, seq_len_k)
    mask[:, -2:] = 0  # 屏蔽最后两个位置
    print(f"掩码矩阵:\n{mask}")
    
    output_masked, weights_masked = mha(query, key, value, mask)
    print(f"带掩码输出形状: {output_masked.shape}")
    print("掩码功能测试完成!")

def test_parameter_validation():
    """测试参数验证"""
    print("\n=== 参数验证测试 ===")
    
    # 测试无效参数
    try:
        mha_invalid = MultiHeadAttention(d_model=64, num_heads=7)  # 64不能被7整除
        print("参数验证失败!")
    except AssertionError as e:
        print(f"参数验证成功: {e}")
    
    # 测试有效参数
    try:
        mha_valid = MultiHeadAttention(d_model=64, num_heads=8)  # 64能被8整除
        print("有效参数测试通过!")
    except AssertionError as e:
        print(f"有效参数测试失败: {e}")

def test_gradient_flow():
    """测试梯度流动"""
    print("\n=== 梯度流动测试 ===")
    
    mha = MultiHeadAttention(d_model=64, num_heads=8)
    
    # 创建需要梯度的输入
    x = torch.randn(2, 5, 64, requires_grad=True)
    
    # 前向传播
    output, _ = mha(x, x, x)
    
    # 计算损失并反向传播
    loss = output.sum()
    loss.backward()
    
    print(f"输入梯度存在: {x.grad is not None}")
    print(f"梯度形状: {x.grad.shape}")
    print("梯度流动测试完成!")

if __name__ == "__main__":
    test_multihead_attention()
    test_parameter_validation()
    test_gradient_flow()
    
    print("\n=== 所有测试完成 ===")
    print("多头注意力机制实现成功!")