#!/usr/bin/env python3
"""
Transformer位置编码测试套件
验证位置编码算法的正确性
"""

import torch
import pytest
import sys
import os
import math

# Add the src directory to the path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'src'))

from models.text_generator import TextGenerator

class TestPositionalEncoding:
    """测试位置编码功能"""
    
    def test_positional_encoding_shape(self):
        """测试位置编码的形状是否正确"""
        generator = TextGenerator(vocab_size=1000, embedding_dim=512, nhead=8, num_layers=6)
        
        # 测试不同长度
        test_cases = [
            (10, 512),   # 短序列
            (100, 512),  # 中等序列
            (512, 512),  # 最大长度
        ]
        
        for max_length, d_model in test_cases:
            pos_encoding = generator._create_positional_encoding(max_length, d_model)
            
            # 验证形状
            assert pos_encoding.shape == (max_length, d_model)
            assert isinstance(pos_encoding, torch.Tensor)
    
    def test_positional_encoding_values(self):
        """测试位置编码的数值特性"""
        generator = TextGenerator(vocab_size=1000, embedding_dim=64, nhead=8, num_layers=6)
        
        max_length = 10
        d_model = 64
        pos_encoding = generator._create_positional_encoding(max_length, d_model)
        
        # 验证数值范围（正弦和余弦都在[-1, 1]之间）
        assert torch.all(pos_encoding >= -1.0)
        assert torch.all(pos_encoding <= 1.0)
        
        # 验证正弦和余弦交替模式
        for pos in range(max_length):
            for i in range(0, d_model, 2):
                # 偶数索引：验证是正弦函数的特征
                # 正弦函数在0处为0，在π/2处为1，在π处为0
                if i + 1 < d_model:  # 确保有对应的余弦值
                    # 验证正弦-余弦对的关系
                    sin_val = pos_encoding[pos, i]
                    cos_val = pos_encoding[pos, i + 1]
                    
                    # 正弦和余弦的平方和应该接近1（三角函数恒等式）
                    sin_cos_sum = sin_val**2 + cos_val**2
                    assert abs(sin_cos_sum - 1.0) < 0.1  # 允许一定误差
        
        # 验证第一个位置的特殊情况（位置0）
        # 在位置0，所有正弦值应该为0，余弦值应该为1
        assert torch.allclose(pos_encoding[0, 0::2], torch.zeros(d_model // 2), atol=1e-6)
        assert torch.allclose(pos_encoding[0, 1::2], torch.ones(d_model // 2), atol=0.1)
    
    def test_positional_encoding_consistency(self):
        """测试位置编码的一致性"""
        generator = TextGenerator(vocab_size=1000, embedding_dim=128, nhead=8, num_layers=6)
        
        # 多次调用应该得到相同的结果
        pos_encoding1 = generator._create_positional_encoding(20, 128)
        pos_encoding2 = generator._create_positional_encoding(20, 128)
        
        # 验证一致性
        assert torch.allclose(pos_encoding1, pos_encoding2)
        
        # 验证不同维度得到不同结果
        pos_encoding3 = generator._create_positional_encoding(20, 256)
        assert pos_encoding1.shape != pos_encoding3.shape
    
    def test_positional_encoding_edge_cases(self):
        """测试边界情况"""
        generator = TextGenerator(vocab_size=1000, embedding_dim=512, nhead=8, num_layers=6)
        
        # 测试长度为1的情况
        pos_encoding = generator._create_positional_encoding(1, 512)
        assert pos_encoding.shape == (1, 512)
        
        # 测试小维度
        pos_encoding = generator._create_positional_encoding(10, 2)
        assert pos_encoding.shape == (10, 2)
        
        # 测试奇数维度
        pos_encoding = generator._create_positional_encoding(5, 3)
        assert pos_encoding.shape == (5, 3)
    
    def test_positional_encoding_mathematical_correctness(self):
        """测试数学正确性"""
        generator = TextGenerator(vocab_size=1000, embedding_dim=16, nhead=8, num_layers=6)
        
        max_length = 5
        d_model = 16
        pos_encoding = generator._create_positional_encoding(max_length, d_model)
        
        # 手动计算验证第一个位置的编码
        position = 0
        expected_encoding = torch.zeros(d_model)
        
        for i in range(0, d_model, 2):
            if i < d_model:
                div_term = math.exp(i * -(math.log(10000.0) / d_model))
                expected_encoding[i] = math.sin(position * div_term)
            if i + 1 < d_model:
                expected_encoding[i + 1] = math.cos(position * div_term)
        
        # 验证第一个位置的编码
        assert torch.allclose(pos_encoding[0], expected_encoding, atol=1e-6)
    
    def test_positional_encoding_gradient(self):
        """测试位置编码的梯度行为"""
        generator = TextGenerator(vocab_size=1000, embedding_dim=32, nhead=8, num_layers=6)
        
        pos_encoding = generator._create_positional_encoding(10, 32)
        
        # 位置编码应该是不可训练的（没有梯度）
        assert not pos_encoding.requires_grad
        
        # 如果设置requires_grad为True，应该可以计算梯度
        pos_encoding.requires_grad = True
        
        # 创建一个简单的计算图
        result = pos_encoding.sum()
        result.backward()
        
        # 验证梯度存在
        assert pos_encoding.grad is not None
        assert pos_encoding.grad.shape == pos_encoding.shape
    
    def test_positional_encoding_device_consistency(self):
        """测试设备一致性"""
        generator = TextGenerator(vocab_size=1000, embedding_dim=64, nhead=8, num_layers=6)
        
        # 测试CPU设备
        pos_encoding_cpu = generator._create_positional_encoding(10, 64)
        assert pos_encoding_cpu.device.type == 'cpu'
        
        # 如果有GPU，测试GPU设备
        if torch.cuda.is_available():
            generator.model = generator.model.cuda()
            # 注意：位置编码方法可能不会自动使用GPU
            # 需要手动将位置编码移动到GPU
    
    def test_positional_encoding_in_integration(self):
        """测试位置编码在完整生成流程中的集成"""
        generator = TextGenerator(vocab_size=20, embedding_dim=64, nhead=8, num_layers=6)
        
        # 创建测试输入
        input_tokens = torch.tensor([[1, 2, 3, 4, 5]])  # batch_size=1, seq_len=5
        
        # 测试生成过程
        generated = generator.generate(input_tokens, max_length=10)
        
        # 验证输出形状
        assert generated.shape == (1, 10)  # batch_size=1, 生成长度=10
        
        # 验证输入部分保持不变
        assert torch.equal(generated[:, :5], input_tokens)
    
    def test_positional_encoding_extreme_values(self):
        """测试极端值情况"""
        generator = TextGenerator(vocab_size=1000, embedding_dim=512, nhead=8, num_layers=6)
        
        # 测试非常大的长度（但不会引发内存错误）
        try:
            pos_encoding = generator._create_positional_encoding(200000, 512)
            assert pos_encoding.shape == (200000, 512)
        except (RuntimeError, MemoryError):
            # 如果内存不足，跳过测试
            pytest.skip("Memory insufficient for large positional encoding test")

        # 测试非常小的维度
        pos_encoding = generator._create_positional_encoding(5, 1)
        assert pos_encoding.shape == (5, 1)
    
    def test_positional_encoding_dtype(self):
        """测试数据类型"""
        generator = TextGenerator(vocab_size=1000, embedding_dim=32, nhead=8, num_layers=6)
        
        pos_encoding = generator._create_positional_encoding(10, 32)
        
        # 默认应该是float32
        assert pos_encoding.dtype == torch.float32
        
        # 测试不同的dtype
        generator.model = generator.model.float()
        pos_encoding_float = generator._create_positional_encoding(10, 32)
        assert pos_encoding_float.dtype == torch.float32

if __name__ == "__main__":
    pytest.main([__file__])