#!/usr/bin/env python3
import torch
from models.yolo import DetectionModel

print("Testing YOLOv9 + StatA integration...")
print("="*60)

# Test 1: Create model
print("\n[Test 1] Loading model...")
model = DetectionModel('models/detect/yolov9-c.yaml', ch=3, nc=80)
print("✓ Model loaded")

# Test 2: Check StatA attributes
print("\n[Test 2] Checking StatA attributes...")
assert hasattr(model, 'enable_stata'), "Missing enable_stata"
assert hasattr(model, 'clip_projector'), "Missing clip_projector"
assert hasattr(model, 'clip_prototypes'), "Missing clip_prototypes"
print("✓ Attributes present")

# Test 3: Setup StatA
print("\n[Test 3] Setting up StatA...")
test_classes = ['person', 'car', 'dog']
model.setup_stata(test_classes, 'ViT-B-16.pt')
print("✓ StatA setup complete")

# Test 4: Feature extraction
print("\n[Test 4] Testing feature extraction...")
dummy_input = torch.randn(1, 3, 640, 640)
features = model.extract_features_for_stata(dummy_input)
print(f"✓ Extracted {len(features)} scales")

# Print actual feature dimensions
actual_channels = []
for i, feat in enumerate(features):
    print(f"   P{i+3}: {feat.shape}")
    actual_channels.append(feat.shape[1])

print(f"\n   Actual channel dimensions: {actual_channels}")

# Test 5: Check and recreate projector if needed
print("\n[Test 5] Checking CLIP projector configuration...")
print(f"   Expected channels: {actual_channels}")

# Get current projector config
if hasattr(model.clip_projector, 'projectors'):
    current_config = []
    for proj in model.clip_projector.projectors:
        if hasattr(proj, '_modules'):
            first_layer = next(iter(proj._modules.values()))
            if hasattr(first_layer, 'in_channels'):
                current_config.append(first_layer.in_channels)
    
    print(f"   Current projector input channels: {current_config}")
    
    # Check for mismatch
    if current_config != actual_channels:
        print(f"\n   ⚠ MISMATCH DETECTED!")
        print(f"   └─ Projector expects: {current_config}")
        print(f"   └─ Features provide: {actual_channels}")
        
        # Recreate projectors with correct dimensions
        print("\n   Recreating projectors with correct dimensions...")
        from utils.clip_integration import YOLOCLIPProjector
        model.clip_projector = YOLOCLIPProjector(actual_channels, 512)
        print(f"   ✓ Projectors recreated for {actual_channels}")
    else:
        print(f"   ✓ Projector channels match!")

# Test 6: CLIP projection
print("\n[Test 6] Testing CLIP projection...")
try:
    clip_features = model.clip_projector(features)
    print(f"✓ CLIP features shape: {clip_features.shape}")
    
    # Check if normalized
    norms = clip_features.norm(dim=-1)
    print(f"✓ Feature norms (should be ~1.0): {norms.mean():.4f} ± {norms.std():.4f}")
    
except RuntimeError as e:
    print(f"✗ CLIP projection failed: {e}")
    print("\n   Debugging info:")
    print(f"   - Input features shapes: {[f.shape for f in features]}")
    print(f"   - Projector config: {actual_channels}")
    raise

print("\n" + "="*60)
print("All tests passed! ✅")
print("="*60)


[Test 4] Testing feature extraction...
✓ Extracted 3 scales
   P3: torch.Size([1, 1024, 80, 80])
   P4: torch.Size([1, 768, 40, 40])
   P5: torch.Size([1, 1024, 20, 20])

   Actual channel dimensions: [1024, 768, 1024]