#!/usr/bin/env python3
"""
Test YOLOv9 model modifications for StatA integration
Verifies: model loading, StatA setup, feature extraction, CLIP projection
"""

import torch
import sys
from pathlib import Path

ROOT = Path(__file__).resolve().parent
if str(ROOT) not in sys.path:
    sys.path.append(str(ROOT))

from models.yolo import DetectionModel


def test_model_modifications():
    """Complete test of all model modifications"""
    
    print("\n" + "="*70)
    print("Testing YOLOv9 Model Modifications for StatA")
    print("="*70)
    
    # Test configuration
    model_config = 'models/detect/yolov9-c.yaml'
    test_classes = ['person', 'car', 'dog']
    batch_size = 2
    img_size = 640
    
    # ========================================
    # Test 1: Load Model
    # ========================================
    print("\n[Test 1/6] Loading model...")
    try:
        model = DetectionModel(model_config, ch=3, nc=80)
        model.eval()
        print(f"           ✓ Model loaded successfully")
        print(f"           Total parameters: {sum(p.numel() for p in model.parameters())/1e6:.1f}M")
    except Exception as e:
        print(f"           ✗ Model loading failed: {e}")
        return False
    
    # ========================================
    # Test 2: Check StatA Attributes
    # ========================================
    print("\n[Test 2/6] Checking StatA attributes...")
    required_attrs = ['enable_stata', 'clip_projector', 'clip_prototypes']
    all_present = True
    
    for attr in required_attrs:
        if hasattr(model, attr):
            print(f"           ✓ {attr} exists")
        else:
            print(f"           ✗ {attr} missing")
            all_present = False
    
    if not all_present:
        print("\n           ERROR: Missing StatA attributes!")
        print("           Make sure you added them in __init__():")
        print("           self.enable_stata = False")
        print("           self.clip_projector = None")
        print("           self.clip_prototypes = None")
        return False
    
    # ========================================
    # Test 3: Setup StatA
    # ========================================
    print("\n[Test 3/6] Setting up StatA...")
    try:
        model.setup_stata(test_classes, 'ViT-B-16.pt')
        print(f"           ✓ StatA setup successful")
        print(f"           CLIP prototypes: {model.clip_prototypes.shape}")
    except Exception as e:
        print(f"           ✗ StatA setup failed: {e}")
        import traceback
        traceback.print_exc()
        return False
    
    # ========================================
    # Test 4: Feature Extraction
    # ========================================
    print("\n[Test 4/6] Testing feature extraction...")
    dummy_input = torch.randn(batch_size, 3, img_size, img_size)
    
    try:
        with torch.no_grad():
            features = model.extract_features_for_stata(dummy_input)
        
        print(f"           ✓ Extracted {len(features)} feature scales")
        
        if len(features) != 3:
            print(f"           ⚠ Expected 3 scales, got {len(features)}")
            print(f"           Run find_layer_indices.py to find correct indices")
            return False
        
        # Expected shapes for YOLOv9-C
        expected_h = [80, 40, 20]
        expected_w = [80, 40, 20]
        
        all_correct = True
        actual_channels = []
        
        for i, feat in enumerate(features):
            scale_name = ['P3', 'P4', 'P5'][i]
            B, C, H, W = feat.shape
            actual_channels.append(C)
            
            if H == expected_h[i] and W == expected_w[i]:
                print(f"           ✓ {scale_name}: {feat.shape}")
            else:
                print(f"           ✗ {scale_name}: {feat.shape} (expected H={expected_h[i]}, W={expected_w[i]})")
                all_correct = False
        
        if not all_correct:
            print("\n           ERROR: Feature shapes incorrect!")
            print("           Run find_layer_indices.py to find correct layer indices")
            return False
        
        print(f"\n           Channel dimensions: {actual_channels}")
        
    except Exception as e:
        print(f"           ✗ Feature extraction failed: {e}")
        import traceback
        traceback.print_exc()
        return False
    
    # ========================================
    # Test 5: CLIP Projection
    # ========================================
    print("\n[Test 5/6] Testing CLIP projection...")
    
    try:
        # Check projector configuration
        projector_channels = []
        for proj in model.clip_projector.projectors:
            first_layer = next(iter(proj._modules.values()))
            projector_channels.append(first_layer.in_channels)
        
        if projector_channels != actual_channels:
            print(f"           ⚠ Channel mismatch!")
            print(f"           Projector expects: {projector_channels}")
            print(f"           Features provide: {actual_channels}")
            print(f"\n           Recreating projector with correct channels...")
            
            from utils.clip_integration import YOLOCLIPProjector
            model.clip_projector = YOLOCLIPProjector(actual_channels, 512)
            print(f"           ✓ Projector recreated")
        else:
            print(f"           ✓ Projector channels match: {projector_channels}")
        
        # Test projection
        with torch.no_grad():
            clip_features = model.clip_projector(features)
        
        expected_shape = (batch_size * 3, 512)  # 3 scales per image
        
        if tuple(clip_features.shape) == expected_shape:
            print(f"           ✓ CLIP features: {clip_features.shape}")
        else:
            print(f"           ✗ CLIP features: {clip_features.shape} (expected {expected_shape})")
            return False
        
        # Check normalization
        norms = clip_features.norm(dim=-1)
        mean_norm = norms.mean().item()
        std_norm = norms.std().item()
        
        if 0.99 <= mean_norm <= 1.01:
            print(f"           ✓ Features normalized: {mean_norm:.4f} ± {std_norm:.4f}")
        else:
            print(f"           ⚠ Normalization: {mean_norm:.4f} ± {std_norm:.4f} (should be ~1.0)")
        
    except Exception as e:
        print(f"           ✗ CLIP projection failed: {e}")
        import traceback
        traceback.print_exc()
        return False
    
    # ========================================
    # Test 6: StatA Compatibility
    # ========================================
    print("\n[Test 6/6] Testing StatA compatibility...")
    
    try:
        # Format prototypes for StatA
        clip_prototypes = model.clip_prototypes.T.unsqueeze(-1)  # [512, 3, 1]
        
        # Test similarity computation (StatA's first step)
        with torch.no_grad():
            # Expand prototypes for batch
            prototypes_expanded = clip_prototypes.expand(clip_features.shape[0], -1, -1)
            
            # Compute similarity
            similarity = torch.bmm(
                clip_features.unsqueeze(1),  # [B*3, 1, 512]
                prototypes_expanded           # [B*3, 512, 3]
            ).squeeze(1)                      # [B*3, 3]
        
        print(f"           ✓ Similarity matrix: {similarity.shape}")
        print(f"           ✓ Similarity range: [{similarity.min():.3f}, {similarity.max():.3f}]")
        print(f"           ✓ StatA-compatible format verified")
        
    except Exception as e:
        print(f"           ✗ StatA compatibility test failed: {e}")
        import traceback
        traceback.print_exc()
        return False
    
    # ========================================
    # Summary
    # ========================================
    print("\n" + "="*70)
    print("✅ ALL TESTS PASSED!")
    print("="*70)
    print("\nYour YOLOv9 model is ready for StatA!")
    print("\nConfiguration:")
    print(f"  Model:           YOLOv9-C")
    print(f"  Feature scales:  3 (P3, P4, P5)")
    print(f"  Channels:        {actual_channels}")
    print(f"  CLIP dimension:  512")
    print(f"  Test classes:    {len(test_classes)}")
    print("\nNext steps:")
    print("  1. Run val_stata.py for full evaluation")
    print("  2. Use --load-cache for faster subsequent runs")
    print("="*70 + "\n")
    
    return True


def main():
    """Main execution"""
    try:
        success = test_model_modifications()
        
        if success:
            print("✅ Model modifications verified and working!\n")
            sys.exit(0)
        else:
            print("\n❌ Some tests failed. Please fix the issues and run again.\n")
            sys.exit(1)
            
    except KeyboardInterrupt:
        print("\n\nTests cancelled by user")
        sys.exit(1)
        
    except Exception as e:
        print(f"\n❌ Unexpected error:")
        print(f"   {type(e).__name__}: {e}")
        import traceback
        traceback.print_exc()
        sys.exit(1)


if __name__ == '__main__':
    main()