#!/usr/bin/env python3
"""
Transformer文本生成器测试套件
验证文本生成模型的正确性和性能
"""

import torch
import pytest
import sys
import os
from unittest.mock import patch, MagicMock

# Add the models directory to the path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'models'))

from models.text_generator import TextGenerator

# 简单的词汇表映射（模拟中文文本）
class SimpleVocab:
    """简单的词汇表类，用于测试中文文本生成"""
    def __init__(self):
        # 创建基础词汇表（包含常用中文字符）
        self.chars = [
            '今', '天', '天', '气', '很', '好', '，', '。', '我', '们',
            '去', '公', '园', '玩', '吧', '！', '这', '是', '一', '个',
            '测', '试', '文', '本', '生', '成', '器', '的', '例', '子'
        ]
        self.char_to_id = {char: idx for idx, char in enumerate(self.chars)}
        self.id_to_char = {idx: char for idx, char in enumerate(self.chars)}
        self.vocab_size = len(self.chars)
    
    def encode(self, text):
        """将文本编码为token IDs"""
        tokens = []
        for char in text:
            if char in self.char_to_id:
                tokens.append(self.char_to_id[char])
            else:
                # 对于不在词汇表中的字符，使用第一个字符代替
                tokens.append(0)
        return torch.tensor(tokens, dtype=torch.long)
    
    def decode(self, tokens):
        """将token IDs解码为文本"""
        if isinstance(tokens, torch.Tensor):
            tokens = tokens.tolist()
        
        text = ''
        for token_id in tokens:
            if token_id in self.id_to_char:
                text += self.id_to_char[token_id]
            else:
                text += '?'  # 未知字符
        return text

class TestTextGenerator:
    def test_generator_initialization(self):
        """Test that the text generator initializes correctly"""
        generator = TextGenerator(
            vocab_size=10000,
            embedding_dim=512,
            nhead=8,
            num_layers=6
        )
        
        assert generator is not None
        assert isinstance(generator, TextGenerator)
        assert hasattr(generator, 'model')
    
    def test_generate_method_exists(self):
        """Test that the generate method exists"""
        generator = TextGenerator(
            vocab_size=10000,
            embedding_dim=512,
            nhead=8,
            num_layers=6
        )
        
        assert hasattr(generator, 'generate')
        assert callable(generator.generate)
    
    def test_model_architecture(self):
        """Test the underlying model architecture"""
        generator = TextGenerator(
            vocab_size=10000,
            embedding_dim=512,
            nhead=8,
            num_layers=6
        )
        
        # Check model parameters
        model = generator.model
        assert model is not None
        
        # Test forward pass
        batch_size = 2
        seq_length = 10
        x = torch.randn(batch_size, seq_length, 512)  # batch_first=True
        output = model(x)
        
        assert output.shape == (batch_size, seq_length, 10000)

    def test_preprocess_input_valid_tensor(self):
        """Test _preprocess_input with valid tensor inputs"""
        generator = TextGenerator(vocab_size=100, embedding_dim=64, nhead=4, num_layers=2)
        
        # Test 1D input
        input_1d = torch.tensor([1, 2, 3, 4])
        result_1d = generator._preprocess_input(input_1d)
        assert result_1d.shape == (1, 4)
        
        # Test 2D input
        input_2d = torch.tensor([[1, 2, 3], [4, 5, 6]])
        result_2d = generator._preprocess_input(input_2d)
        assert result_2d.shape == (2, 3)
        assert torch.equal(result_2d, input_2d)
    
    def test_preprocess_input_invalid_input(self):
        """Test _preprocess_input with invalid inputs"""
        generator = TextGenerator(vocab_size=100, embedding_dim=64, nhead=4, num_layers=2)
        
        # Test non-tensor input
        with pytest.raises(TypeError, match='Unsupported input type:'):
            generator._preprocess_input(123)
        with pytest.raises(TypeError, match='Unsupported input type:'):
            generator._preprocess_input("invalid input")
        with pytest.raises(TypeError, match='Unsupported input type:'):
            generator._preprocess_input({"invalid": "input"})
        with pytest.raises(TypeError, match='Unsupported input type:'):
            generator._preprocess_input(['1', '2', 3])
        with pytest.raises(TypeError, match='Unsupported input type:'):
            generator._preprocess_input(None)

        # Test 3D input
        input_3d = torch.randn(2, 3, 4)
        with pytest.raises(ValueError, match="Input tensor must have shape"):
            generator._preprocess_input(input_3d)
    
    def test_preprocess_input_token_range_validation(self):
        """Test token ID range validation in _preprocess_input"""
        generator = TextGenerator(vocab_size=10, embedding_dim=64, nhead=4, num_layers=2)
        
        # Test valid token IDs
        valid_tokens = torch.tensor([[0, 1, 2, 9]])  # 0-9 are valid
        result = generator._preprocess_input(valid_tokens)
        assert torch.equal(result, valid_tokens)
        
        # Test invalid token IDs (below 0)
        invalid_low = torch.tensor([[-1, 0, 1]])
        with pytest.raises(ValueError, match="Token IDs must be in range"):
            generator._preprocess_input(invalid_low)
        
        # Test invalid token IDs (above vocab_size)
        invalid_high = torch.tensor([[0, 1, 10]])  # vocab_size=10, so 10 is invalid
        with pytest.raises(ValueError, match="Token IDs must be in range"):
            generator._preprocess_input(invalid_high)
    
    def test_create_positional_encoding(self):
        """Test _create_positional_encoding method"""
        generator = TextGenerator(vocab_size=100, embedding_dim=64, nhead=4, num_layers=2)
        
        max_length = 10
        d_model = 64
        pos_encoding = generator._create_positional_encoding(max_length, d_model)
        
        assert pos_encoding.shape == (max_length, d_model)
        
        # 验证数值范围（正弦和余弦都在[-1, 1]之间）
        assert torch.all(pos_encoding >= -1.0)
        assert torch.all(pos_encoding <= 1.0)
        
        # 验证正弦和余弦交替模式
        for pos in range(max_length):
            for i in range(0, d_model, 2):
                # 偶数索引：验证是正弦函数的特征
                # 正弦函数在0处为0，在π/2处为1，在π处为0
                if i + 1 < d_model:  # 确保有对应的余弦值
                    # 验证正弦-余弦对的关系
                    sin_val = pos_encoding[pos, i]
                    cos_val = pos_encoding[pos, i + 1]
                    
                    # 正弦和余弦的平方和应该接近1（三角函数恒等式）
                    sin_cos_sum = sin_val**2 + cos_val**2
                    assert abs(sin_cos_sum - 1.0) < 0.1  # 允许一定误差
        
        # 验证第一个位置的特殊情况（位置0）
        # 在位置0，所有正弦值应该为0，余弦值应该为1
        assert torch.allclose(pos_encoding[0, 0::2], torch.zeros(d_model // 2), atol=1e-6)
        assert torch.allclose(pos_encoding[0, 1::2], torch.ones(d_model // 2), atol=0.1)
    
    @patch('models.text_generator.F.softmax')
    @patch('models.text_generator.torch.multinomial')
    def test_generate_basic_functionality(self, mock_multinomial, mock_softmax):
        """Test basic generate functionality with mocked model"""
        vocab_size = 100
        generator = TextGenerator(vocab_size=vocab_size, embedding_dim=64, nhead=4, num_layers=2)
        
        # Mock the model to return predictable logits
        def mock_model_forward(x):
            batch_size, seq_len, _ = x.shape  # batch_first=True
            # Return logits where the last token is always the most likely
            logits = torch.zeros(batch_size, seq_len, vocab_size)
            logits[:, -1, 5] = 10.0  # Make token 5 the most likely
            return logits
        
        generator.model.forward = mock_model_forward
        
        # Mock softmax to return predictable probabilities
        mock_softmax.return_value = torch.tensor([[0.1] * vocab_size])
        mock_softmax.return_value[:, 5] = 0.9  # The fifth token has high probability
        
        # Mock multinomial to always return the fifth token 
        mock_multinomial.return_value = torch.tensor([[5]])
        
        # Test generation
        input_tokens = torch.tensor([[1, 2, 3]])
        max_length = 6
        
        result = generator.generate(input_tokens, max_length=max_length)
        
        assert result.shape == (1, max_length)
        assert torch.equal(result[:, :3], input_tokens)  # Original tokens preserved
        assert torch.equal(result[:, 3:], torch.tensor([[5, 5, 5]]))  # Generated tokens
    
    def test_generate_edge_cases(self):
        """Test generate method with edge cases"""
        generator = TextGenerator(vocab_size=100, embedding_dim=64, nhead=4, num_layers=2)
        
        # Test when input length equals max_length
        input_tokens = torch.tensor([[1, 2, 3, 4, 5]])
        max_length = 5
        result = generator.generate(input_tokens, max_length=max_length)
        assert torch.equal(result, input_tokens)
        
        # Test when input length exceeds max_length
        input_tokens = torch.tensor([[1, 2, 3, 4, 5, 6]])
        max_length = 5
        result = generator.generate(input_tokens, max_length=max_length)
        assert torch.equal(result, input_tokens)
        
        # Test invalid max_length
        with pytest.raises(ValueError, match="max_length must be positive"):
            generator.generate(input_tokens, max_length=0)
        
        with pytest.raises(ValueError, match="max_length must be positive"):
            generator.generate(input_tokens, max_length=-1)
        
        # Test None max_length and big max_length
        result = generator.generate(input_tokens, max_length=None)
        assert result.shape == (1, generator.max_length)

        max_length = 1000000
        with pytest.raises(ValueError, match="max_length must be less than or equal to"):
            result = generator.generate(input_tokens, max_length=max_length)
    
    @patch('models.text_generator.F.softmax')
    @patch('models.text_generator.torch.multinomial')
    def test_generate_temperature_effect(self, mock_multinomial, mock_softmax):
        """Test temperature parameter effect on generation"""
        vocab_size = 100
        generator = TextGenerator(vocab_size=vocab_size, embedding_dim=64, nhead=4, num_layers=2)
        
        # Mock model to return specific logits
        def mock_model_forward(x):
            batch_size, seq_len, _ = x.shape  # batch_first=True
            logits = torch.ones(batch_size, seq_len, vocab_size)
            return logits
        
        generator.model.forward = mock_model_forward
        
        input_tokens = torch.tensor([[1, 2, 3]])
        max_length = 5
        
        # Track temperature values passed to softmax
        temperature_values = []
        
        def mock_softmax_side_effect(logits, dim=None):
            # 通过logits的值范围判断温度是否被应用
            max_logit = torch.max(logits).item()
            
            if max_logit > 5.0:  # 原始logits值较大
                temperature_values.append(1.0)  # 默认温度（未缩放）
            else:
                temperature_values.append(2.0)  # 测试温度（已缩放）
    
            return torch.ones_like(logits) / logits.size(-1)
        
        mock_softmax.side_effect = mock_softmax_side_effect
        mock_multinomial.return_value = torch.tensor([[5]])
        
        # Test with temperature=2.0
        generator.generate(input_tokens, max_length=max_length, temperature=2.0)
        
        # Verify temperature was applied
        assert 2.0 in temperature_values
    
    @patch('models.text_generator.F.softmax')
    @patch('models.text_generator.torch.multinomial')
    def test_generate_top_k_filtering(self, mock_multinomial, mock_softmax):
        """Test top-k filtering functionality"""
        vocab_size = 100
        generator = TextGenerator(vocab_size=vocab_size, embedding_dim=64, nhead=4, num_layers=2)
        
        # Mock model to return specific logits
        def mock_model_forward(x):
            batch_size, seq_len, _ = x.shape  # batch_first=True
            logits = torch.arange(vocab_size, dtype=torch.float32).repeat(batch_size, seq_len, 1)
            return logits
        
        generator.model.forward = mock_model_forward
        mock_multinomial.return_value = torch.tensor([[5]])
        
        input_tokens = torch.tensor([[1, 2, 3]])
        max_length = 5
        top_k = 10
        
        # Mock softmax to verify top-k filtering
        def mock_softmax_side_effect(logits, dim=None):
            # Verify that only top-k values are not -inf
            num_valid = (logits > -float('inf')).sum(dim=-1)
            assert torch.all(num_valid <= top_k)
            return torch.ones_like(logits) / logits.size(-1)
        
        mock_softmax.side_effect = mock_softmax_side_effect
        
        # Test with top-k filtering
        result = generator.generate(input_tokens, max_length=max_length, top_k=top_k)
        
        assert result.shape == (1, max_length)
    
    def test_generate_greedy_method(self):
        """Test generate_greedy method"""
        generator = TextGenerator(vocab_size=100, embedding_dim=64, nhead=4, num_layers=2)
        
        # Mock the generate method to verify parameters
        original_generate = generator.generate
        
        def mock_generate(input_tokens, max_length=None, temperature=1.0, top_k=None):
            # Verify greedy parameters
            assert temperature == 0.0
            assert top_k == 1
            return torch.cat([input_tokens, torch.tensor([[5, 5]])], dim=1)
        
        generator.generate = mock_generate
        
        input_tokens = torch.tensor([[1, 2, 3]])
        result = generator.generate_greedy(input_tokens, max_length=5)
        
        assert result.shape == (1, 5)
        
        # Restore original method
        generator.generate = original_generate
    
    def test_generate_batch_processing(self):
        """Test generate method with batch inputs"""
        generator = TextGenerator(vocab_size=100, embedding_dim=64, nhead=4, num_layers=2)
        
        # Mock model to handle batch processing
        def mock_model_forward(x):
            batch_size, seq_len, _ = x.shape  # batch_first=True
            logits = torch.zeros(batch_size, seq_len, 100)
            logits[:, -1, 5] = 10.0  # Token 5 is most likely
            return logits
        
        generator.model.forward = mock_model_forward
        
        # Mock sampling to return predictable results
        with patch('models.text_generator.torch.multinomial') as mock_multinomial, \
             patch('models.text_generator.F.softmax') as mock_softmax:
            
            mock_softmax.return_value = torch.tensor([[[0.01] * 100] * 2])
            mock_softmax.return_value[:, :, 5] = 0.9
            mock_multinomial.return_value = torch.tensor([[5], [5]])
            
            # Test batch input
            batch_input = torch.tensor([[1, 2, 3], [4, 5, 6]])
            max_length = 5
            
            result = generator.generate(batch_input, max_length=max_length)
            
            assert result.shape == (2, max_length)
            assert torch.equal(result[:, :3], batch_input)
    
    def test_generate_no_grad_context(self):
        """Test that generate runs in no_grad context"""
        generator = TextGenerator(vocab_size=100, embedding_dim=64, nhead=4, num_layers=2)
        
        # Mock model to track gradient computation
        gradient_tracked = []
        
        class TrackedModel:
            def __call__(self, x):
                gradient_tracked.append(x.requires_grad)
                return torch.zeros(x.shape[0], x.shape[1], 100)
        
        generator.model = TrackedModel()
        
        input_tokens = torch.tensor([[1, 2, 3]])
        
        # This should run without gradients
        result = generator.generate(input_tokens, max_length=5)
        
        # Verify that gradients were not tracked during generation
        assert all(not requires_grad for requires_grad in gradient_tracked)

    def test_chinese_text_generation_basic(self):
        """测试中文文本生成基本功能 - 输入"今天天气"，输出续写文本"""
        vocab = SimpleVocab()
        generator = TextGenerator(
            vocab_size=vocab.vocab_size,
            embedding_dim=128,
            nhead=4,
            num_layers=2,
            max_length=200
        )
        
        # 编码输入文本
        input_text = "今天天气"
        input_tokens = vocab.encode(input_text).unsqueeze(0)  # (1, seq_len)
        
        # Mock模型返回合理的logits（倾向于生成"很好，..."）
        def mock_model_forward(x):
            batch_size, seq_len, _ = x.shape
            logits = torch.zeros(batch_size, seq_len, vocab.vocab_size)
            
            # 在最后一个位置，让"很"字有较高概率
            if seq_len >= len(input_text):
                # 模拟生成"很好，..."的序列
                continuation_tokens = [vocab.char_to_id['很'], vocab.char_to_id['好'], 
                                    vocab.char_to_id['，'], vocab.char_to_id['我']]
                
                for i, token_id in enumerate(continuation_tokens):
                    if i < len(continuation_tokens):
                        logits[:, -1, token_id] = 10.0  # 高概率
            
            return logits
        
        generator.model.forward = mock_model_forward
        
        # Mock采样函数，模拟生成合理的续写
        with patch('models.text_generator.torch.multinomial') as mock_multinomial, \
             patch('models.text_generator.F.softmax') as mock_softmax:
            
            # 设置softmax返回合理的概率分布
            def softmax_side_effect(logits, dim=None):
                probs = torch.softmax(logits, dim=-1)
                return probs
            
            mock_softmax.side_effect = softmax_side_effect
            
            # 模拟生成序列："很好，我们去公园玩吧！"
            continuation_sequence = [
                vocab.char_to_id['很'], vocab.char_to_id['好'], vocab.char_to_id['，'],
                vocab.char_to_id['我'], vocab.char_to_id['们'], vocab.char_to_id['去'],
                vocab.char_to_id['公'], vocab.char_to_id['园'], vocab.char_to_id['玩'],
                vocab.char_to_id['吧'], vocab.char_to_id['！']
            ]
            
            # 创建循环的side_effect，避免StopIteration错误
            # 方法1
            def multinomial_side_effect(probs, num_samples=1):
                # 循环使用continuation_sequence中的token, 每次调用multinomial时返回一个token
                token_id = continuation_sequence[multinomial_side_effect.call_count % len(continuation_sequence)]
                multinomial_side_effect.call_count += 1
                return torch.tensor([[token_id]])
            # 方法2
            # import itertools
            # token_cycle = itertools.cycle(continuation_sequence)
            # def multinomial_side_effect(probs, num_samples=1):
            #     token_id = next(token_cycle)
            #     return torch.tensor([[token_id]])
            
            multinomial_side_effect.call_count = 0
            mock_multinomial.side_effect = multinomial_side_effect
            
            # 生成文本，最大长度不超过200
            max_length = min(len(input_text) + 10, 200)  # 测试时使用较小的长度
            result_tokens = generator.generate(input_tokens, max_length=max_length)
            
            # 验证输出
            assert result_tokens.shape[0] == 1  # batch_size=1
            assert result_tokens.shape[1] <= max_length  # 不超过最大长度
            assert result_tokens.shape[1] > len(input_text)  # 应该有续写内容
            
            # 解码结果
            result_text = vocab.decode(result_tokens[0])
            print(f"\nresult_tokens: {result_tokens}")
            
            # 验证输入文本被保留
            assert result_text.startswith(input_text)
            
            # 验证有续写内容
            assert len(result_text) > len(input_text)
            
            print(f"输入: {input_text}")
            print(f"输出: {result_text}")
            print(f"生成长度: {len(result_text)} 字符")
    
    def test_chinese_text_generation_edge_cases(self):
        """测试中文文本生成的边界情况"""
        vocab = SimpleVocab()
        generator = TextGenerator(
            vocab_size=vocab.vocab_size,
            embedding_dim=128,
            nhead=4,
            num_layers=2,
            max_length=200
        )
        
        # 测试1: 空输入
        empty_input = torch.tensor([[]], dtype=torch.long)
        
        with patch('models.text_generator.torch.multinomial') as mock_multinomial, \
             patch('models.text_generator.F.softmax') as mock_softmax:
            
            mock_softmax.return_value = torch.ones(1, 1, vocab.vocab_size) / vocab.vocab_size
            
            # 创建循环的side_effect
            def multinomial_side_effect(probs, num_samples=1):
                return torch.tensor([[0]])  # 总是返回token 0
            
            mock_multinomial.side_effect = multinomial_side_effect
            
            # Mock模型
            def mock_model_forward(x):
                batch_size, seq_len, _ = x.shape
                return torch.zeros(batch_size, seq_len, vocab.vocab_size)
            
            generator.model.forward = mock_model_forward
            
            result = generator.generate(empty_input, max_length=10)
            print(f"\n空输入结果: {result}")
            assert result.shape == (1, 10)
        
        # 测试2: 长输入序列
        long_input = "今天天气很好，我们一起去公园玩吧！这是一个测试文本生成器的例子。"
        long_tokens = vocab.encode(long_input).unsqueeze(0)
        
        # 当输入长度接近最大长度时
        result_tokens = generator.generate(long_tokens, max_length=len(long_input) + 5)
        print(f"长输入结果: {result_tokens}")
        assert result.shape[1] <= len(long_input) + 5

        # 解码结果
        result_text = vocab.decode(result_tokens[0])
        
        # # 验证输入文本被保留
        # assert result_text.startswith(long_input)
        
        # 验证有续写内容
        assert len(result_text) > len(long_input)
        
        print(f"输入: {long_input}")
        print(f"输出: {result_text}")
        print(f"生成长度: {len(result_text)} 字符")
    
    def test_chinese_text_generation_with_temperature(self):
        """测试带温度参数的中文文本生成"""
        vocab = SimpleVocab()
        generator = TextGenerator(
            vocab_size=vocab.vocab_size,
            embedding_dim=128,
            nhead=4,
            num_layers=2
        )
        
        input_text = "今天"
        input_tokens = vocab.encode(input_text).unsqueeze(0)
        
        # 测试不同温度值
        temperatures = [0.5, 1.0, 2.0]
        
        for temperature in temperatures:
            with patch('models.text_generator.torch.multinomial') as mock_multinomial, \
                 patch('models.text_generator.F.softmax') as mock_softmax:
                
                # 记录温度是否被正确应用
                temperature_applied = []
                
                def softmax_side_effect(logits, dim=None):
                    # 检查logits是否被温度缩放
                    if torch.max(logits) < 10:  # 如果logits值较小，说明可能被温度缩放了
                        temperature_applied.append(True)
                    return torch.softmax(logits, dim=-1)
                
                mock_softmax.side_effect = softmax_side_effect
                
                # 创建循环的side_effect
                def multinomial_side_effect(probs, num_samples=1):
                    return torch.tensor([[vocab.char_to_id['天']]])  # 总是返回'天'字
                
                mock_multinomial.side_effect = multinomial_side_effect
                
                # Mock模型
                def mock_model_forward(x):
                    batch_size, seq_len, _ = x.shape
                    logits = torch.ones(batch_size, seq_len, vocab.vocab_size) * 5.0
                    return logits
                
                generator.model.forward = mock_model_forward
                
                result = generator.generate(input_tokens, max_length=5, temperature=temperature)
                
                # 验证生成了文本
                assert result.shape == (1, 5)
                result_text = vocab.decode(result[0])
                assert len(result_text) >= len(input_text)
    
    def test_max_length_constraint(self):
        """测试最大长度约束（不超过200字符）"""
        vocab = SimpleVocab()
        generator = TextGenerator(
            vocab_size=vocab.vocab_size,
            embedding_dim=128,
            nhead=4,
            num_layers=2,
            max_length=200
        )
        
        input_text = "测试"
        input_tokens = vocab.encode(input_text).unsqueeze(0)
        
        # 测试不同的最大长度
        test_max_lengths = [10, 50, 100, 200]
        
        print(f"\n测试的最大长度: {test_max_lengths}")
        for max_len in test_max_lengths:
            with patch('models.text_generator.torch.multinomial') as mock_multinomial, \
                 patch('models.text_generator.F.softmax') as mock_softmax:
                
                mock_softmax.return_value = torch.ones(1, 1, vocab.vocab_size) / vocab.vocab_size
                
                # 创建循环的side_effect
                def multinomial_side_effect(probs, num_samples=1):
                    return torch.tensor([[vocab.char_to_id['文']]])  # 总是返回'文'字
                
                mock_multinomial.side_effect = multinomial_side_effect
                
                # Mock模型返回固定logits
                def mock_model_forward(x):
                    return torch.ones(x.shape[0], x.shape[1], vocab.vocab_size)
                
                generator.model.forward = mock_model_forward
                
                result = generator.generate(input_tokens, max_length=max_len)
                
                # 验证输出长度不超过最大长度
                assert result.shape[1] <= max_len
                
                # 解码并验证字符数
                result_text = vocab.decode(result[0])
                assert len(result_text) <= max_len
                
                print(f"最大长度: {max_len}, 实际长度: {len(result_text)}")

if __name__ == "__main__":
    pytest.main([__file__])