Perfect! 🎉 CLIP is working now! Let's continue with the integration.

---

# STEP 3: Modify YOLOv9 Model (Find Layer Indices)

First, let's find the correct layer indices for P3, P4, P5 in your YOLOv9 model:

```bash
cd ~/Dara/yolov9-main

# Create script to find layer indices
cat > find_layers.py << 'EOF'
import torch
from models.yolo import DetectionModel

print("Finding P3, P4, P5 layer indices for YOLOv9...")
print("="*70)

try:
    model = DetectionModel('models/yolov9-c.yaml')
    dummy_input = torch.randn(1, 3, 640, 640)
    
    y = []
    candidates = {'P3': [], 'P4': [], 'P5': []}
    
    for i, m in enumerate(model.model):
        if m.f != -1:
            dummy_input = y[m.f] if isinstance(m.f, int) else \
                [dummy_input if j == -1 else y[j] for j in m.f]
        dummy_input = m(dummy_input)
        y.append(dummy_input if m.i in model.save else None)
        
        if isinstance(dummy_input, torch.Tensor) and len(dummy_input.shape) == 4:
            H, W = dummy_input.shape[2:]
            C = dummy_input.shape[1]
            
            if H == 80 and W == 80:
                candidates['P3'].append((m.i, C))
                print(f"Layer {m.i:3d}: shape={dummy_input.shape} | P3 CANDIDATE (80×80)")
            elif H == 40 and W == 40:
                candidates['P4'].append((m.i, C))
                print(f"Layer {m.i:3d}: shape={dummy_input.shape} | P4 CANDIDATE (40×40)")
            elif H == 20 and W == 20:
                candidates['P5'].append((m.i, C))
                print(f"Layer {m.i:3d}: shape={dummy_input.shape} | P5 CANDIDATE (20×20)")
    
    print("="*70)
    print("\n📋 SUMMARY:")
    print("-"*70)
    
    if candidates['P3']:
        idx, ch = candidates['P3'][-1]  # Take last one
        print(f"P3: Layer {idx:3d} with {ch:4d} channels")
    
    if candidates['P4']:
        idx, ch = candidates['P4'][-1]
        print(f"P4: Layer {idx:3d} with {ch:4d} channels")
    
    if candidates['P5']:
        idx, ch = candidates['P5'][-1]
        print(f"P5: Layer {idx:3d} with {ch:4d} channels")
    
    print("-"*70)
    
    # Generate code
    print("\n📝 UPDATE models/yolo.py with these indices:")
    print("-"*70)
    if candidates['P3'] and candidates['P4'] and candidates['P5']:
        p3_idx = candidates['P3'][-1][0]
        p4_idx = candidates['P4'][-1][0]
        p5_idx = candidates['P5'][-1][0]
        
        p3_ch = candidates['P3'][-1][1]
        p4_ch = candidates['P4'][-1][1]
        p5_ch = candidates['P5'][-1][1]
        
        print(f"""
# In extract_features_for_stata():
if m.i == {p3_idx}:  # P3
    features.append(x)
elif m.i == {p4_idx}:  # P4
    features.append(x)
elif m.i == {p5_idx}:  # P5
    features.append(x)

# In setup_stata():
yolo_channels = [{p3_ch}, {p4_ch}, {p5_ch}]  # P3, P4, P5
""")
    else:
        print("⚠️  Could not find all scales. Check your model configuration.")
    
    print("-"*70)

except Exception as e:
    print(f"\n✗ Error: {e}")
    print("\nMake sure:")
    print("  1. models/yolov9-c.yaml exists")
    print("  2. YOLOv9 model is properly installed")
EOF

python find_layers.py
```

**Copy the output** - you'll need those layer indices!


(yolov9) wrf@wrf:~/Dara/yolov9-main$ python find_layers.py
Finding P3, P4, P5 layer indices for YOLOv9...
======================================================================

                 from  n    params  module                                  arguments                     
  0                -1  1         0  models.common.Silence                   []                            
  1                -1  1      1856  models.common.Conv                      [3, 64, 3, 2]                 
  2                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               
  3                -1  1    212864  models.common.RepNCSPELAN4              [128, 256, 128, 64, 1]        
  4                -1  1    164352  models.common.ADown                     [256, 256]                    
  5                -1  1    847616  models.common.RepNCSPELAN4              [256, 512, 256, 128, 1]       
  6                -1  1    656384  models.common.ADown                     [512, 512]                    
  7                -1  1   2857472  models.common.RepNCSPELAN4              [512, 512, 512, 256, 1]       
  8                -1  1    656384  models.common.ADown                     [512, 512]                    
  9                -1  1   2857472  models.common.RepNCSPELAN4              [512, 512, 512, 256, 1]       
 10                -1  1    656896  models.common.SPPELAN                   [512, 512, 256]               
 11                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          
 12           [-1, 7]  1         0  models.common.Concat                    [1]                           
 13                -1  1   3119616  models.common.RepNCSPELAN4              [1024, 512, 512, 256, 1]      
 14                -1  1         0  torch.nn.modules.upsampling.Upsample    [None, 2, 'nearest']          
 15           [-1, 5]  1         0  models.common.Concat                    [1]                           
 16                -1  1    912640  models.common.RepNCSPELAN4              [1024, 256, 256, 128, 1]      
 17                -1  1    164352  models.common.ADown                     [256, 256]                    
 18          [-1, 13]  1         0  models.common.Concat                    [1]                           
 19                -1  1   2988544  models.common.RepNCSPELAN4              [768, 512, 512, 256, 1]       
 20                -1  1    656384  models.common.ADown                     [512, 512]                    
 21          [-1, 10]  1         0  models.common.Concat                    [1]                           
 22                -1  1   3119616  models.common.RepNCSPELAN4              [1024, 512, 512, 256, 1]      
 23                 5  1    131328  models.common.CBLinear                  [512, [256]]                  
 24                 7  1    393984  models.common.CBLinear                  [512, [256, 512]]             
 25                 9  1    656640  models.common.CBLinear                  [512, [256, 512, 512]]        
 26                 0  1      1856  models.common.Conv                      [3, 64, 3, 2]                 
 27                -1  1     73984  models.common.Conv                      [64, 128, 3, 2]               
 28                -1  1    212864  models.common.RepNCSPELAN4              [128, 256, 128, 64, 1]        
 29                -1  1    164352  models.common.ADown                     [256, 256]                    
 30  [23, 24, 25, -1]  1         0  models.common.CBFuse                    [[0, 0, 0]]                   
 31                -1  1    847616  models.common.RepNCSPELAN4              [256, 512, 256, 128, 1]       
 32                -1  1    656384  models.common.ADown                     [512, 512]                    
 33      [24, 25, -1]  1         0  models.common.CBFuse                    [[1, 1]]                      
 34                -1  1   2857472  models.common.RepNCSPELAN4              [512, 512, 512, 256, 1]       
 35                -1  1    656384  models.common.ADown                     [512, 512]                    
 36          [25, -1]  1         0  models.common.CBFuse                    [[2]]                         
 37                -1  1   2857472  models.common.RepNCSPELAN4              [512, 512, 512, 256, 1]       
 38[31, 34, 37, 16, 19, 22]  1  21725312  models.yolo.DualDDetect                 [80, [512, 512, 512, 256, 512, 512]]
yolov9-c summary: 962 layers, 51182080 parameters, 51182048 gradients, 239.9 GFLOPs

Layer   4: shape=torch.Size([1, 256, 80, 80]) | P3 CANDIDATE (80×80)
Layer   5: shape=torch.Size([1, 512, 80, 80]) | P3 CANDIDATE (80×80)
Layer   6: shape=torch.Size([1, 512, 40, 40]) | P4 CANDIDATE (40×40)
Layer   7: shape=torch.Size([1, 512, 40, 40]) | P4 CANDIDATE (40×40)
Layer   8: shape=torch.Size([1, 512, 20, 20]) | P5 CANDIDATE (20×20)
Layer   9: shape=torch.Size([1, 512, 20, 20]) | P5 CANDIDATE (20×20)
Layer  10: shape=torch.Size([1, 512, 20, 20]) | P5 CANDIDATE (20×20)
Layer  11: shape=torch.Size([1, 512, 40, 40]) | P4 CANDIDATE (40×40)
Layer  12: shape=torch.Size([1, 1024, 40, 40]) | P4 CANDIDATE (40×40)
Layer  13: shape=torch.Size([1, 512, 40, 40]) | P4 CANDIDATE (40×40)
Layer  14: shape=torch.Size([1, 512, 80, 80]) | P3 CANDIDATE (80×80)
Layer  15: shape=torch.Size([1, 1024, 80, 80]) | P3 CANDIDATE (80×80)
Layer  16: shape=torch.Size([1, 256, 80, 80]) | P3 CANDIDATE (80×80)
Layer  17: shape=torch.Size([1, 256, 40, 40]) | P4 CANDIDATE (40×40)
Layer  18: shape=torch.Size([1, 768, 40, 40]) | P4 CANDIDATE (40×40)
Layer  19: shape=torch.Size([1, 512, 40, 40]) | P4 CANDIDATE (40×40)
Layer  20: shape=torch.Size([1, 512, 20, 20]) | P5 CANDIDATE (20×20)
Layer  21: shape=torch.Size([1, 1024, 20, 20]) | P5 CANDIDATE (20×20)
Layer  22: shape=torch.Size([1, 512, 20, 20]) | P5 CANDIDATE (20×20)
Layer  29: shape=torch.Size([1, 256, 80, 80]) | P3 CANDIDATE (80×80)
Layer  30: shape=torch.Size([1, 256, 80, 80]) | P3 CANDIDATE (80×80)
Layer  31: shape=torch.Size([1, 512, 80, 80]) | P3 CANDIDATE (80×80)
Layer  32: shape=torch.Size([1, 512, 40, 40]) | P4 CANDIDATE (40×40)
Layer  33: shape=torch.Size([1, 512, 40, 40]) | P4 CANDIDATE (40×40)
Layer  34: shape=torch.Size([1, 512, 40, 40]) | P4 CANDIDATE (40×40)
Layer  35: shape=torch.Size([1, 512, 20, 20]) | P5 CANDIDATE (20×20)
Layer  36: shape=torch.Size([1, 512, 20, 20]) | P5 CANDIDATE (20×20)
Layer  37: shape=torch.Size([1, 512, 20, 20]) | P5 CANDIDATE (20×20)
======================================================================

📋 SUMMARY:
----------------------------------------------------------------------
P3: Layer  31 with  512 channels
P4: Layer  34 with  512 channels
P5: Layer  37 with  512 channels
----------------------------------------------------------------------

📝 UPDATE models/yolo.py with these indices:
----------------------------------------------------------------------

# In extract_features_for_stata():
if m.i == 31:  # P3
    features.append(x)
elif m.i == 34:  # P4
    features.append(x)
elif m.i == 37:  # P5
    features.append(x)

# In setup_stata():
yolo_channels = [512, 512, 512]  # P3, P4, P5
---

# STEP 4: Update models/yolo.py

Now let's modify the YOLOv9 model file:

```bash
cd ~/Dara/yolov9-main

# Backup original file
cp models/yolo.py models/yolo.py.backup

# Open for editing
nano models/yolo.py
```

## 4.1: Find the DetectionModel class

Search for: `class DetectionModel(BaseModel):`

## 4.2: Add attributes to `__init__`

Scroll to the **END** of the `__init__` method and add:

```python
        # ... existing YOLOv9 code ...
        
        # ============= ADD THESE 3 LINES AT THE END =============
        self.enable_stata = False
        self.clip_projector = None
        self.clip_prototypes = None
        # ========================================================
```

## 4.3: Add new methods

After the `__init__` method, add these two new methods:

```python
    def setup_stata(self, class_names, clip_model_name='ViT-B/16'):
        """Initialize StatA for test-time adaptation"""
        from utils.clip_integration import YOLOCLIPProjector, CLIPTextEncoder
        
        print("\n" + "="*60)
        print("Setting up StatA")
        print("="*60)
        
        self.enable_stata = True
        device = next(self.parameters()).device
        
        if self.clip_projector is None:
            # UPDATE THESE VALUES based on find_layers.py output!
            yolo_channels = [256, 512, 1024]  # P3, P4, P5 channels
            self.clip_projector = YOLOCLIPProjector(yolo_channels, 512).to(device)
        
        clip_encoder = CLIPTextEncoder(clip_model_name, device)
        self.clip_prototypes = clip_encoder.encode_classes(class_names)
        
        print(f"[StatA] ✓ Ready with {len(class_names)} classes")
        print("="*60 + "\n")
    
    def extract_features_for_stata(self, x):
        """Extract multi-scale features (P3, P4, P5)"""
        features = []
        y = []
        
        for i, m in enumerate(self.model):
            if m.f != -1:
                x = y[m.f] if isinstance(m.f, int) else \
                    [x if j == -1 else y[j] for j in m.f]
            x = m(x)
            y.append(x if m.i in self.save else None)
            
            # UPDATE THESE INDICES based on find_layers.py output!
            if m.i == 15:  # P3 - CHANGE THIS
                features.append(x)
            elif m.i == 18:  # P4 - CHANGE THIS
                features.append(x)
            elif m.i == 21:  # P5 - CHANGE THIS
                features.append(x)
        
        if len(features) != 3:
            print(f"[WARNING] Expected 3 scales, got {len(features)}")
            print(f"[INFO] Update indices in extract_features_for_stata()")
        
        return features
```

Save the file (Ctrl+O, Enter, Ctrl+X).

---

# STEP 5: Test Model Modifications

```bash
cd ~/Dara/yolov9-main

# Create test script
cat > test_model_stata.py << 'EOF'
import torch
from models.yolo import DetectionModel

print("Testing YOLOv9 + StatA integration...")
print("="*60)

# Test 1: Create model
print("\n[Test 1] Loading model...")
model = DetectionModel('models/yolov9-c.yaml', ch=3, nc=80)
print("✓ Model loaded")

# Test 2: Check StatA attributes
print("\n[Test 2] Checking StatA attributes...")
assert hasattr(model, 'enable_stata'), "Missing enable_stata"
assert hasattr(model, 'clip_projector'), "Missing clip_projector"
assert hasattr(model, 'clip_prototypes'), "Missing clip_prototypes"
print("✓ Attributes present")

# Test 3: Setup StatA
print("\n[Test 3] Setting up StatA...")
test_classes = ['person', 'car', 'dog']
model.setup_stata(test_classes, 'ViT-B/16')
print("✓ StatA setup complete")

# Test 4: Feature extraction
print("\n[Test 4] Testing feature extraction...")
dummy_input = torch.randn(1, 3, 640, 640)
features = model.extract_features_for_stata(dummy_input)
print(f"✓ Extracted {len(features)} scales")

for i, feat in enumerate(features):
    print(f"   P{i+3}: {feat.shape}")

# Test 5: CLIP projection
print("\n[Test 5] Testing CLIP projection...")
clip_features = model.clip_projector(features)
print(f"✓ CLIP features: {clip_features.shape}")
print(f"✓ Normalized: {clip_features.norm(dim=-1).mean():.3f}")

print("\n" + "="*60)
print("All tests passed! ✅")
print("="*60)
EOF

python test_model_stata.py
```

---

## If Tests Fail

### Error: "Expected 3 scales, got X"

Update the indices in `extract_features_for_stata()` based on `find_layers.py` output.

### Error: "Channel mismatch"

Update `yolo_channels` in `setup_stata()` based on `find_layers.py` output.

---

# STEP 6: Create val_stata.py

Now create the main validation script:

```bash
cd ~/Dara/yolov9-main
nano val_stata.py
```

Paste this complete script:

```python
"""
YOLOv9 + StatA Validation Script
"""

import argparse
import os
import sys
import yaml
from pathlib import Path
from tqdm import tqdm
import torch
import torch.nn.functional as F

FILE = Path(__file__).resolve()
ROOT = FILE.parents[0]
if str(ROOT) not in sys.path:
    sys.path.append(str(ROOT))

from models.yolo import DetectionModel
from utils.datasets import create_dataloader
from utils.general import check_dataset
from utils.torch_utils import select_device

from stata import StatA_solver, BatchSampler, OnlineSampler, cls_acc


def get_arguments():
    parser = argparse.ArgumentParser()
    parser.add_argument('--data', type=str, default='data/coco.yaml')
    parser.add_argument('--weights', type=str, default='yolov9-c.pt')
    parser.add_argument('--img-size', type=int, default=640)
    parser.add_argument('--device', default='0')
    parser.add_argument('--backbone', type=str, default='vit_b16')
    parser.add_argument('--batch-size', type=int, default=64)
    parser.add_argument('--n-tasks', type=int, default=10)
    parser.add_argument('--alpha', type=float, default=1.0)
    parser.add_argument('--lambda-laplacian', type=float, default=1.0)
    parser.add_argument('--max-iter', type=int, default=10)
    parser.add_argument('--online', action='store_true')
    parser.add_argument('--gamma', type=float, default=1.0)
    parser.add_argument('--cache-dir', type=str, default='./cache')
    parser.add_argument('--load-cache', action='store_true')
    parser.add_argument('--seed', type=int, default=1)
    return parser.parse_args()


def extract_features(model, dataloader, args):
    cache_path = Path(args.cache_dir)
    cache_path.mkdir(parents=True, exist_ok=True)
    
    cache_features = cache_path / 'features.pt'
    cache_labels = cache_path / 'labels.pt'
    
    if args.load_cache and cache_features.exists():
        print(f"[Cache] Loading from {cache_features}")
        return torch.load(cache_features), torch.load(cache_labels)
    
    print("[Extract] Extracting features...")
    features_list = []
    labels_list = []
    
    model.eval()
    with torch.no_grad():
        for imgs, targets, _, _ in tqdm(dataloader, desc='Extracting'):
            imgs = imgs.to(next(model.parameters()).device).float() / 255.0
            
            yolo_features = model.extract_features_for_stata(imgs)
            clip_features = model.clip_projector(yolo_features)
            
            batch_labels = targets[:, 1].long()
            expanded_labels = batch_labels.repeat_interleave(3)
            
            features_list.append(clip_features.cpu())
            labels_list.append(expanded_labels.cpu())
    
    features = torch.cat(features_list)
    labels = torch.cat(labels_list)
    
    print(f"[Extract] ✓ Features: {features.shape}")
    
    torch.save(features, cache_features)
    torch.save(labels, cache_labels)
    print(f"[Cache] ✓ Saved")
    
    return features, labels


def run_evaluation(features, labels, clip_prototypes, args):
    acc_tot = 0.0
    acc_zs_tot = 0.0
    
    if not args.online:
        sampler = BatchSampler(features, labels, args.batch_size)
        
        for i in tqdm(range(args.n_tasks), desc='StatA'):
            indices = sampler.generate_indices()
            if indices is None:
                break
            
            preds_zs, preds = StatA_solver(
                features[indices, :],
                labels[indices],
                clip_prototypes,
                alpha=args.alpha,
                lambda_laplacian=args.lambda_laplacian,
                max_iter=args.max_iter,
            )
            
            acc_zs = cls_acc(preds_zs, labels[indices])
            acc = cls_acc(preds, labels[indices])
            
            acc_zs_tot += acc_zs
            acc_tot += acc
        
        acc_zs_tot /= args.n_tasks
        acc_tot /= args.n_tasks
    
    return acc_zs_tot, acc_tot


def main():
    args = get_arguments()
    
    import random, numpy as np
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    
    device = select_device(args.device)
    
    with open(args.data) as f:
        data_dict = yaml.safe_load(f)
    data_dict = check_dataset(data_dict)
    
    nc = int(data_dict['nc'])
    names = data_dict['names']
    
    print(f"\n[Dataset] {nc} classes")
    
    print(f"[Model] Loading {args.weights}...")
    model = DetectionModel(cfg=None, ch=3, nc=nc)
    ckpt = torch.load(args.weights, map_location=device)
    model.load_state_dict(ckpt['model'].float().state_dict() if 'model' in ckpt else ckpt)
    model = model.to(device).eval()
    
    clip_models = {'vit_b16': 'ViT-B/16'}
    model.setup_stata(names, clip_models[args.backbone])
    
    clip_prototypes = model.clip_prototypes.T.unsqueeze(-1)
    
    dataloader = create_dataloader(
        data_dict['val'], args.img_size, args.batch_size,
        stride=32, pad=0.5, rect=False, workers=8
    )[0]
    
    features, labels = extract_features(model, dataloader, args)
    acc_zs, acc_stata = run_evaluation(features, labels, clip_prototypes, args)
    
    print("\n" + "="*70)
    print("                    Final Results                             ")
    print("="*70)
    print(f"ZERO-shot Accuracy:   {acc_zs:.2f}%")
    print(f"FINAL Accuracy:       {acc_stata:.2f}%")
    print(f"Improvement:          +{acc_stata - acc_zs:.2f}%")
    print("="*70 + "\n")


if __name__ == '__main__':
    main()
```

Save the file.

---

# STEP 7: Quick Test Run

```bash
cd ~/Dara/yolov9-main

# Download YOLOv9 weights if you don't have them
# wget https://github.com/WongKinYiu/yolov9/releases/download/v0.1/yolov9-c.pt

# Quick test (1 task)
python val_stata.py \
    --data data/coco.yaml \
    --weights yolov9-c.pt \
    --batch-size 10 \
    --n-tasks 1 \
    --device cpu
```

---

## Progress Check ✅

What we've completed:
- ✅ StatA module setup
- ✅ CLIP integration working
- ✅ Model modifications
- ✅ Validation script created

**What's your status now?**
1. Did `test_model_stata.py` pass all tests?
2. What were the P3, P4, P5 layer indices from `find_layers.py`?
3. Ready to run the first real evaluation?

Let me know and we'll continue! 🚀