#!/usr/bin/env python3
"""
Transformer集成测试套件
验证多个Transformer组件的协同工作
"""
import torch
import pytest
import sys
import os

# Add the models directory to the path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'models'))

from models.model import TransformerModel
from models.text_generator import TextGenerator
from models.audio_generator import AudioGenerator
from models.image_generator import ImageGenerator
from models.video_generator import VideoGenerator

class TestIntegration:
    def test_all_generators_importable(self):
        """Test that all generators can be imported and initialized"""
        # Test text generator 
        text_gen = TextGenerator(vocab_size=10000, embedding_dim=512, nhead=8, num_layers=6)
        assert text_gen is not None
        
        # Test audio generator 
        audio_gen = AudioGenerator(sample_rate=22050, embedding_dim=512, nhead=8, num_layers=6)
        assert audio_gen is not None
        
        # Test image generator 
        image_gen = ImageGenerator(image_size=(64, 64), embedding_dim=512, nhead=8, num_layers=6)
        assert image_gen is not None
        
        # Test video generator 
        video_gen = VideoGenerator(frame_size=(32, 32), num_frames=4, embedding_dim=512, nhead=8, num_layers=6)
        assert video_gen is not None
    
    def test_shared_model_architecture(self):
        """Test that all generators use the same base model architecture"""
        generators = [
            TextGenerator(vocab_size=10000, embedding_dim=512, nhead=8, num_layers=6),
            AudioGenerator(sample_rate=22050, embedding_dim=512, nhead=8, num_layers=6),
            ImageGenerator(image_size=(64, 64), embedding_dim=512, nhead=8, num_layers=6),
            VideoGenerator(frame_size=(32, 32), num_frames=4, embedding_dim=512, nhead=8, num_layers=6)
        ]
        
        for generator in generators:
            model = generator.model
            assert isinstance(model, TransformerModel)
            assert hasattr(model, 'transformer_encoder')
            assert hasattr(model, 'decoder')
    
    def test_generator_specific_functionality(self):
        """Test specific functionality for each generator type"""
        # Test text generator token processing
        text_gen = TextGenerator(vocab_size=10000, embedding_dim=512, nhead=8, num_layers=6)
        tokens = torch.randint(0, 10000, (2, 10))  # (batch_size, seq_length)
        embeddings = text_gen.token_embedding(tokens)
        assert embeddings.shape == (2, 10, 512)
        
        # Test image generator patch processing
        image_gen = ImageGenerator(image_size=(64, 64), embedding_dim=512, nhead=8, num_layers=6)
        dummy_image = torch.randn(2, 3, 64, 64)  # (batch_size, channels, height, width)
        patches = image_gen._image_to_patches(dummy_image)
        assert patches.dim() == 3  # (batch_size, num_patches, patch_dim)
        
        # Test video generator basic functionality
        video_gen = VideoGenerator(frame_size=(32, 32), num_frames=4, embedding_dim=512, nhead=8, num_layers=6)
        assert hasattr(video_gen, '_video_to_patches')
        assert hasattr(video_gen, '_patches_to_video')
    
    def test_forward_pass_consistency(self):
        """Test that all generators can perform forward passes with correct input formats"""
        # Test text generator forward pass
        text_gen = TextGenerator(vocab_size=10000, embedding_dim=512, nhead=8, num_layers=6)
        text_input = torch.randint(0, 10000, (2, 10))  # (batch_size, seq_length)
        text_input = text_gen.token_embedding(text_input)  # (batch_size, seq_length, embedding_dim)
        text_output = text_gen.model(text_input)  # (batch_size, seq_length, embedding_dim)
        assert text_output.shape == (2, 10, 10000)
        
        # Test image generator forward pass
        image_gen = ImageGenerator(image_size=(64, 64), embedding_dim=512, nhead=8, num_layers=6)
        # Create dummy patches as input
        num_patches = (64 // 8) * (64 // 8)  # image_size / patch_size
        patch_dim = 3 * 8 * 8  # channels * patch_size * patch_size
        image_input = torch.randn(2, num_patches, patch_dim)  # (batch_size, num_patches, patch_dim)
        image_input = image_gen.patch_embedding(image_input)  # (batch_size, num_patches, patch_dim)
        image_output = image_gen.model(image_input)
        assert image_output.shape == (2, num_patches, patch_dim)
        
        # Test video generator forward pass
        video_gen = VideoGenerator(frame_size=(32, 32), num_frames=4, embedding_dim=512, nhead=8, num_layers=6)
        # Create dummy patches as input
        patches_per_frame = (32 // 8) * (32 // 8)  # frame_size / patch_size
        total_patches = 4 * patches_per_frame  # num_frames * patches_per_frame
        patch_dim = 3 * 8 * 8  # channels * patch_size * patch_size
        video_input = torch.randn(2, total_patches, patch_dim)
        video_input = video_gen.patch_embedding(video_input)
        video_output = video_gen.model(video_input)
        assert video_output.shape == (2, total_patches, patch_dim)
    
    def test_generator_methods_exist(self):
        """Test that all generators have expected methods"""
        text_gen = TextGenerator(vocab_size=10000, embedding_dim=512, nhead=8, num_layers=6)
        assert hasattr(text_gen, 'generate')
        
        image_gen = ImageGenerator(image_size=(64, 64), embedding_dim=512, nhead=8, num_layers=6)
        assert hasattr(image_gen, 'generate')
        
        video_gen = VideoGenerator(frame_size=(32, 32), num_frames=4, embedding_dim=512, nhead=8, num_layers=6)
        assert hasattr(video_gen, 'generate')
        assert hasattr(video_gen, 'generate_continuation')
    
    def test_model_configuration_consistency(self):
        """Test that all generators use consistent model configurations"""
        generators = [
            TextGenerator(vocab_size=10000, embedding_dim=512, nhead=8, num_layers=6),
            ImageGenerator(image_size=(64, 64), embedding_dim=512, nhead=8, num_layers=6),
            VideoGenerator(frame_size=(32, 32), num_frames=4, embedding_dim=512, nhead=8, num_layers=6)
        ]
        
        for generator in generators:
            model = generator.model
            # Check that all models have the same basic architecture
            assert hasattr(model, 'encoder_layer')
            assert hasattr(model, 'transformer_encoder')
            assert hasattr(model, 'decoder')
            assert hasattr(model, 'create_causal_mask')

if __name__ == "__main__":
    pytest.main([__file__])