#!/usr/bin/env python3
"""
Transformer基础模型测试套件
验证Transformer核心组件的功能
"""
import torch
import pytest
import sys
import os

# Add the models directory to the path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'models'))

from models.model import TransformerModel

class TestTransformerModel:
    def test_model_initialization(self):
        """Test that the model initializes correctly"""
        model = TransformerModel(
            input_dim=512,
            output_dim=256,
            nhead=8,
            num_layers=6
        )
        
        assert model is not None
        assert isinstance(model, TransformerModel)
        assert hasattr(model, 'transformer_encoder')
        assert hasattr(model, 'decoder')
        assert hasattr(model, 'encoder_layer')
    
    def test_forward_pass_batch_first(self):
        """Test the forward pass with batch_first=True"""
        model = TransformerModel(
            input_dim=512,
            output_dim=256,
            nhead=8,
            num_layers=6
        )
        
        # Create a dummy input tensor with batch_first format
        batch_size = 2
        seq_length = 10
        input_dim = 512
        
        x = torch.randn(batch_size, seq_length, input_dim)
        
        # Forward pass
        output = model(x)
        
        # Check output shape (batch_first should be preserved)
        assert output.shape == (batch_size, seq_length, 256)
        assert not torch.isnan(output).any()
        assert not torch.isinf(output).any()
    
    def test_forward_pass_with_mask(self):
        """Test the forward pass with attention mask"""
        model = TransformerModel(
            input_dim=512,
            output_dim=256,
            nhead=8,
            num_layers=6
        )
        
        batch_size = 2
        seq_length = 10
        input_dim = 512
        
        x = torch.randn(batch_size, seq_length, input_dim)
        
        # Create a causal mask
        src_mask = model.create_causal_mask(seq_length)
        
        # Forward pass with mask
        output = model(x, src_mask=src_mask)
        
        # Check output shape
        assert output.shape == (batch_size, seq_length, 256)
        assert not torch.isnan(output).any()
        assert not torch.isinf(output).any()
    
    def test_create_causal_mask(self):
        """Test the causal mask creation"""
        model = TransformerModel(
            input_dim=512,
            output_dim=256,
            nhead=8,
            num_layers=6
        )
        
        seq_length = 5
        mask = model.create_causal_mask(seq_length)
        
        # Check mask shape
        assert mask.shape == (seq_length, seq_length)
        
        # Check that mask is upper triangular with -inf
        expected_mask = torch.triu(torch.ones(seq_length, seq_length) * float('-inf'), diagonal=1)
        assert torch.allclose(mask, expected_mask)
        
        # Check that diagonal and below are 0 (no masking)
        for i in range(seq_length):
            for j in range(i + 1):
                assert mask[i, j] == 0.0
    
    def test_different_configurations(self):
        """Test model with different configurations"""
        configs = [
            {'input_dim': 256, 'output_dim': 128, 'nhead': 4, 'num_layers': 3},
            {'input_dim': 1024, 'output_dim': 512, 'nhead': 16, 'num_layers': 12},
            {'input_dim': 64, 'output_dim': 32, 'nhead': 2, 'num_layers': 2, 'dropout': 0.2}
        ]
        
        for config in configs:
            model = TransformerModel(**config)
            
            # Test forward pass
            batch_size = 2
            seq_length = 5
            x = torch.randn(batch_size, seq_length, config['input_dim'])
            output = model(x)
            
            assert output.shape == (batch_size, seq_length, config['output_dim'])
    
    def test_gradient_flow(self):
        """Test that gradients flow correctly through the model"""
        model = TransformerModel(
            input_dim=128,
            output_dim=64,
            nhead=4,
            num_layers=3
        )
        
        batch_size = 2
        seq_length = 8
        x = torch.randn(batch_size, seq_length, 128, requires_grad=True)
        
        # Forward pass
        output = model(x)
        
        # Create a dummy loss
        target = torch.randn_like(output)
        loss = torch.nn.functional.mse_loss(output, target)
        
        # Backward pass
        loss.backward()
        
        # Check that gradients are computed
        assert x.grad is not None
        assert x.grad.shape == x.shape
        assert not torch.isnan(x.grad).any()
    
    def test_model_parameters(self):
        """Test that model has the expected number of parameters"""
        model = TransformerModel(
            input_dim=256,
            output_dim=128,
            nhead=8,
            num_layers=4
        )
        
        # Count parameters
        total_params = sum(p.numel() for p in model.parameters())
        trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
        
        # All parameters should be trainable
        assert total_params == trainable_params
        assert total_params > 0
    
    def test_model_on_gpu_if_available(self):
        """Test model on GPU if available"""
        if not torch.cuda.is_available():
            pytest.skip("CUDA not available")
        
        model = TransformerModel(
            input_dim=128,
            output_dim=64,
            nhead=4,
            num_layers=3
        ).cuda()
        
        batch_size = 2
        seq_length = 5
        x = torch.randn(batch_size, seq_length, 128).cuda()
        
        output = model(x)
        
        assert output.shape == (batch_size, seq_length, 64)
        assert output.device.type == 'cuda'

if __name__ == "__main__":
    pytest.main([__file__])