"""Fine-tuning script with depth supervision: freeze layer_4 and layer_11, train layer_17 and layer_23 using depth loss.

This script loads pretrained weights for the first two layers and VGGT's DPT head,
then trains the last two layers by comparing predicted depth with ground truth depth maps.

Usage:
CUDA_VISIBLE_DEVICES=0,5,6,7 torchrun \
    --nproc_per_node=4 \
    --master_port=29505 \
    /data0/liqifeng/ZYC/vggt/mini_vggt/Distill/train_mini_depth_finetune.py \
    --data-root /data1/datasets/distill_libero_10_goal \
    --epochs 50 \
    --lr 1e-5 \
    --batch-size 1 \
    --num-workers 8 \
    --accumulation-steps 50 \
    --pretrained-path /data0/liqifeng/ZYC/mini_vggt_distilled.pt \
    --teacher-checkpoint /data0/liqifeng/ZYC/model.pt \
    --save-path /data0/liqifeng/ZYC/vggt/mini_vggt/Distill/mini_vggt_depth_finetuned.pt \
    --log-dir /data0/liqifeng/ZYC/vggt/mini_vggt/Distill/runs/mini_vggt_depth_finetune \
    --lr-scheduler cosine \
    --warmup-epochs 5 \
    --min-lr 1e-6 \
    --grad-clip 1.0 \
    --weight-decay 0.01 \
    --num-samples-per-scene 5

    

tensorboard --logdir /data0/liqifeng/ZYC/vggt/mini_vggt/Distill/runs/mini_vggt_depth_finetune --port 6011
pkill -f train_mini_depth_finetune.py
"""

from __future__ import annotations

import argparse
import glob
import logging
import os
import random
import signal
import sys
from pathlib import Path
from typing import List, Dict

os.environ.setdefault("HDF5_USE_FILE_LOCKING", "FALSE")

# Global flag for graceful shutdown
_SHUTDOWN_FLAG = False

def signal_handler(signum, frame):
    """Handle interrupt signals for graceful shutdown"""
    global _SHUTDOWN_FLAG
    _SHUTDOWN_FLAG = True
    logger.info(f"Received signal {signum}, initiating graceful shutdown...")

try:
    from torch.utils.tensorboard import SummaryWriter
except ImportError:
    SummaryWriter = None

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader, Dataset
from torch.utils.data.distributed import DistributedSampler
from tqdm.auto import tqdm

REPO_ROOT = Path(__file__).resolve().parents[2]
if str(REPO_ROOT) not in sys.path:
    sys.path.insert(0, str(REPO_ROOT))

from mini_vggt.Distill.vggt_mini import MiniVGGT, MiniVGGTConfig
from mini_vggt.Distill.vggt_mini_gqa import MiniVGGTGQA, MiniVGGTGQAConfig
from vggt.models.vggt import VGGT
from vggt.utils.load_fn import load_and_preprocess_images

logger = logging.getLogger(__name__)

# Define which layers to freeze (already converged)
_FROZEN_STUDENT_LAYERS = [0, 1]  # Freeze first two layers
_TRAINABLE_STUDENT_LAYERS = [2, 3]  # Train last two layers


class DepthDataset(Dataset):
    """Dataset that loads RGB images and corresponding depth maps."""
    
    def __init__(
        self, 
        data_root: str, 
        num_samples_per_scene: int = 5,
        min_images: int = 3,
        max_images: int = 8,
        preprocess_mode: str = "crop",
        is_main_process: bool = True
    ) -> None:
        self.data_root = data_root
        self.num_samples_per_scene = num_samples_per_scene
        self.min_images = min_images
        self.max_images = max_images
        self.preprocess_mode = preprocess_mode
        
        # Collect all samples
        self.samples = self._collect_samples()
        
        if is_main_process:
            logger.info(f"Dataset initialized with {len(self.samples)} sample directories")
            logger.info(f"Each sample will generate 50 random combinations, total dataset size: {len(self.samples) * 50}")
    
    def _collect_samples(self) -> List[str]:
        """Collect all sample directories."""
        samples = []
        
        if not os.path.isdir(self.data_root):
            raise FileNotFoundError(f"Data root does not exist: {self.data_root}")
        
        # Walk through split/scene/sample structure
        for split_dir in sorted(os.listdir(self.data_root)):
            split_path = os.path.join(self.data_root, split_dir)
            if not os.path.isdir(split_path):
                continue
            
            for scene_dir in sorted(os.listdir(split_path)):
                scene_path = os.path.join(split_path, scene_dir)
                if not os.path.isdir(scene_path):
                    continue
                
                # Collect sample directories from this scene
                scene_samples = []
                for sample_dir in sorted(os.listdir(scene_path)):
                    if not sample_dir.startswith("sample_"):
                        continue
                    sample_path = os.path.join(scene_path, sample_dir)
                    if os.path.isdir(sample_path):
                        # Check if the sample has enough images
                        rgb_paths = []
                        for ext in ("png", "jpg", "jpeg"):
                            pattern = os.path.join(sample_path, f"cam_*_rgb.{ext}")
                            rgb_paths.extend(glob.glob(pattern))
                        
                        if len(rgb_paths) >= self.min_images:
                            scene_samples.append(sample_path)

                # Randomly select num_samples_per_scene from this scene
                if len(scene_samples) > self.num_samples_per_scene:
                    scene_samples = random.sample(scene_samples, self.num_samples_per_scene)
                
                samples.extend(scene_samples)
        
        return sorted(samples)
    
    def __len__(self) -> int:
        return len(self.samples) * 50  # Each sample generates 50 random combinations
    
    def __getitem__(self, idx: int) -> Dict:
        # Map idx to sample_idx and combination_idx
        sample_idx = idx // 50
        combination_idx = idx % 50
        
        sample_path = self.samples[sample_idx]
        
        # Find all RGB images
        rgb_paths = []
        for ext in ("png", "jpg", "jpeg"):
            pattern = os.path.join(sample_path, f"cam_*_rgb.{ext}")
            rgb_paths.extend(sorted(glob.glob(pattern)))
        
        if not rgb_paths:
            raise ValueError(f"No RGB images found in {sample_path}")
        
        # Randomly select images (3-8 images per combination)
        # Use combination_idx as seed for reproducible but different combinations
        random.seed(combination_idx * 1000 + sample_idx)  # Different seed for each combination
        
        num_images = random.randint(
            min(self.min_images, len(rgb_paths)), 
            min(self.max_images, len(rgb_paths))
        )
        selected_indices = sorted(random.sample(range(len(rgb_paths)), num_images))
        selected_rgb_paths = [rgb_paths[i] for i in selected_indices]
        
        # Load RGB images (will be resized to 518x518 by VGGT's preprocessing)
        images = load_and_preprocess_images(selected_rgb_paths, mode=self.preprocess_mode)
        
        # Get the target size from preprocessed images (should be 518x518)
        target_h, target_w = images.shape[-2:]
        
        # Load corresponding depth maps and resize to match image size
        depth_maps = []
        for rgb_path in selected_rgb_paths:
            # Convert cam_XXX_rgb.png -> cam_XXX_depth.npy
            depth_path = rgb_path.replace("_rgb.", "_depth.").rsplit(".", 1)[0] + ".npy"
            if not os.path.exists(depth_path):
                raise FileNotFoundError(f"Depth map not found: {depth_path}")
            
            depth_map = np.load(depth_path)  # Shape: (H, W), typically 480x480
            
            # Resize depth map to match preprocessed image size (518x518)
            # Convert to tensor for interpolation
            depth_tensor = torch.from_numpy(depth_map).float().unsqueeze(0).unsqueeze(0)  # (1, 1, H, W)
            depth_resized = F.interpolate(
                depth_tensor,
                size=(target_h, target_w),
                mode='nearest'  # Use nearest to preserve exact depth values
            )
            depth_map = depth_resized.squeeze(0).squeeze(0).numpy()  # (H, W)
            
            depth_maps.append(depth_map)
        
        # Stack depth maps: (S, H, W)
        depth_maps = np.stack(depth_maps, axis=0)
        
        return {
            "images": images,  # (S, 3, 518, 518)
            "depth": torch.from_numpy(depth_maps).float(),  # (S, 518, 518)
            "sample_path": sample_path,
        }


class DepthLoss(nn.Module):
    """Depth loss with L1 distance on valid pixels.
    
    Following VGGT's depth loss implementation from training/loss.py.
    """
    
    def __init__(self, loss_weight: float = 1.0):
        super().__init__()
        self.loss_weight = loss_weight
    
    def forward(self, pred_depth: torch.Tensor, gt_depth: torch.Tensor) -> torch.Tensor:
        """
        Args:
            pred_depth: (B, S, H, W, 1) or (B, S, H, W) - predicted depth from DPT head
            gt_depth: (B, S, H, W) - ground truth depth, resized to match pred_depth size
        
        Note: Sizes should already match since we resize GT depth in dataset to 518x518
        """
        # Handle different shapes - squeeze channel dimension if present
        if pred_depth.dim() == 5 and pred_depth.shape[-1] == 1:
            pred_depth = pred_depth.squeeze(-1)  # (B, S, H, W)
        
        # Verify sizes match (should be 518x518 for both)
        if pred_depth.shape[-2:] != gt_depth.shape[-2:]:
            raise ValueError(
                f"Size mismatch: pred {pred_depth.shape[-2:]} vs gt {gt_depth.shape[-2:]}. "
                f"GT depth should be resized to 518x518 in dataset preprocessing."
            )
        
        # Create mask for valid depth values (> 0)
        valid_mask = gt_depth > 0.0
        
        if not valid_mask.any():
            return torch.tensor(0.0, device=pred_depth.device, requires_grad=True)
        
        # Compute L1 loss on valid pixels
        pred_valid = pred_depth[valid_mask]
        gt_valid = gt_depth[valid_mask]
        
        loss = F.l1_loss(pred_valid, gt_valid)
        
        return loss * self.loss_weight


def parse_args() -> argparse.Namespace:
    parser = argparse.ArgumentParser(description="Fine-tune MiniVGGT with depth supervision")
    parser.add_argument("--data-root", default="/data1/datasets/distill_libero_10_goal", help="Root directory of the dataset")
    parser.add_argument("--pretrained-path", required=True, help="Path to pretrained student checkpoint")
    parser.add_argument("--teacher-checkpoint", required=True, help="Path to teacher VGGT checkpoint (for DPT head)")
    parser.add_argument("--epochs", type=int, default=50)
    parser.add_argument("--lr", type=float, default=1e-5, help="Learning rate")
    parser.add_argument("--batch-size", type=int, default=1)
    parser.add_argument("--accumulation-steps", type=int, default=50)
    parser.add_argument("--num-workers", type=int, default=8)
    parser.add_argument("--device", default="cuda:0")
    parser.add_argument("--num-samples-per-scene", type=int, default=5, help="Number of samples to use per scene")
    parser.add_argument("--min-images", type=int, default=3, help="Minimum images per sample")
    parser.add_argument("--max-images", type=int, default=8, help="Maximum images per sample")
    parser.add_argument("--preprocess-mode", choices=["crop", "pad"], default="crop")
    parser.add_argument("--log-every", type=int, default=10)
    parser.add_argument("--save-path", default="/data0/liqifeng/ZYC/vggt/mini_vggt/Distill/mini_vggt_depth_finetuned.pt")
    parser.add_argument("--log-dir", default="/data0/liqifeng/ZYC/vggt/mini_vggt/Distill/logs/mini_vggt_depth_finetune")
    parser.add_argument("--save-epoch-plot", action="store_true")
    parser.add_argument("--lr-scheduler", type=str, default=None, choices=["cosine", "step", "exponential", "constant"])
    parser.add_argument("--warmup-epochs", type=int, default=0)
    parser.add_argument("--min-lr", type=float, default=1e-6)
    parser.add_argument("--grad-clip", type=float, default=1.0, help="Gradient clipping max norm")
    parser.add_argument("--weight-decay", type=float, default=0.01, help="Weight decay for AdamW optimizer")
    parser.add_argument("--seed", type=int, default=2025, help="Random seed")
    parser.add_argument("--local_rank", type=int, default=-1)
    return parser.parse_args()


def configure_logging() -> None:
    logging.basicConfig(level=logging.INFO, format="%(asctime)s | %(levelname)s | %(message)s")


class FeatureProjector(nn.Module):
    """Projects 4 student layers (1536-dim) to teacher format (2048-dim) and places them at indices [4, 11, 17, 23]."""
    
    def __init__(self, student_dim: int = 1536, teacher_dim: int = 2048):
        super().__init__()
        # Student has 4 layers that correspond to teacher's layers 4, 11, 17, 23
        self.layer_mapping = [4, 11, 17, 23]
        
        # Create projection layers for each of the 4 student layers
        self.projections = nn.ModuleList([
            nn.Linear(student_dim, teacher_dim) for _ in range(4)
        ])
    
    def forward(self, student_tokens: List[torch.Tensor]) -> List[torch.Tensor]:
        """
        Args:
            student_tokens: List of 4 tensors, each (S, N, 1536)
        
        Returns:
            List of 24 tensors where indices [4, 11, 17, 23] contain projected features,
            others are filled with dummy tensors (won't be used by DPT head)
        """
        # Create a list of 24 None placeholders
        teacher_tokens = [None] * 24
        
        # Project and place student tokens at correct teacher indices
        for i, (tokens, teacher_idx) in enumerate(zip(student_tokens, self.layer_mapping)):
            projected = self.projections[i](tokens)
            teacher_tokens[teacher_idx] = projected
        
        # Fill unused positions with dummy tensors (DPT head only accesses [4, 11, 17, 23])
        # Use the first projected tensor as template for shape
        dummy = torch.zeros_like(teacher_tokens[4])
        for i in range(24):
            if teacher_tokens[i] is None:
                teacher_tokens[i] = dummy
        
        return teacher_tokens


def load_teacher_depth_head(teacher_checkpoint: str, device: str):
    """Load DPT head from teacher VGGT model (keeps original layer_indices=[4, 11, 17, 23])."""
    logger.info(f"Loading teacher DPT head from {teacher_checkpoint}")
    
    teacher = VGGT()
    checkpoint = torch.load(teacher_checkpoint, map_location="cpu")
    state_dict = checkpoint.get("model", checkpoint)
    teacher.load_state_dict(state_dict, strict=False)
    
    # Extract and freeze depth head
    depth_head = teacher.depth_head
    if depth_head is None:
        raise ValueError("Teacher model does not have a depth head!")
    
    # Keep DPT head's original layer_indices [4, 11, 17, 23]
    # Student's 4 layers will be projected and placed at these indices
    if hasattr(depth_head, 'layer_indices'):
        logger.info(f"DPT head using layer indices: {depth_head.layer_indices}")
    
    for param in depth_head.parameters():
        param.requires_grad = False
    
    depth_head.eval()
    depth_head = depth_head.to(device)
    
    return depth_head


def train() -> None:
    args = parse_args()
    
    # Register signal handlers
    signal.signal(signal.SIGINT, signal_handler)
    signal.signal(signal.SIGTERM, signal_handler)
    
    # Set random seeds
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    
    if args.local_rank == -1:
        local_rank = int(os.environ.get("LOCAL_RANK", -1))
    else:
        local_rank = args.local_rank
    
    # Setup distributed training
    if local_rank != -1:
        torch.cuda.set_device(local_rank)
        dist.init_process_group(backend='nccl')
        device = torch.device(f'cuda:{local_rank}')
        world_size = dist.get_world_size()
        is_main_process = dist.get_rank() == 0
        
        if is_main_process:
            configure_logging()
            logger.info(f"Distributed depth fine-tuning initialized: world_size={world_size}, rank={dist.get_rank()}")
    else:
        device = torch.device(args.device if torch.cuda.is_available() else "cpu")
        world_size = 1
        is_main_process = True
        configure_logging()
    
    if args.batch_size != 1:
        raise ValueError("This script currently supports only batch_size=1")
    
    # Load dataset
    dataset = DepthDataset(
        data_root=args.data_root,
        num_samples_per_scene=args.num_samples_per_scene,
        min_images=args.min_images,
        max_images=args.max_images,
        preprocess_mode=args.preprocess_mode,
        is_main_process=is_main_process
    )
    
    if local_rank != -1:
        sampler = DistributedSampler(dataset, shuffle=True)
        dataloader = DataLoader(
            dataset,
            batch_size=args.batch_size,
            sampler=sampler,
            num_workers=args.num_workers,
            pin_memory=True,
        )
    else:
        dataloader = DataLoader(
            dataset,
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=args.num_workers,
            pin_memory=True,
        )
    
    # Load pretrained student checkpoint
    if is_main_process:
        logger.info(f"Loading pretrained student from {args.pretrained_path}")
    
    checkpoint = torch.load(args.pretrained_path, map_location="cpu")
    
    if "model" in checkpoint:
        model_state = checkpoint["model"]
    else:
        model_state = checkpoint
    
    # Detect model type
    if any("query_proj" in k for k in model_state.keys()):
        if is_main_process:
            logger.info("Detected MiniVGGTGQA model")
        student = MiniVGGTGQA(MiniVGGTGQAConfig())
    else:
        if is_main_process:
            logger.info("Detected MiniVGGT model")
        student = MiniVGGT(MiniVGGTConfig())
    
    # Load weights
    student.load_state_dict(model_state, strict=True)
    student = student.to(device)
    
    # Freeze first two layers
    if is_main_process:
        logger.info(f"Freezing student backbone layers: {_FROZEN_STUDENT_LAYERS}")
        logger.info(f"Trainable student backbone layers: {_TRAINABLE_STUDENT_LAYERS}")
    
    for layer_idx in _FROZEN_STUDENT_LAYERS:
        if hasattr(student, 'blocks') and layer_idx < len(student.blocks):
            for param in student.blocks[layer_idx].parameters():
                param.requires_grad = False
            if is_main_process:
                logger.info(f"  Frozen backbone block {layer_idx}")
    
    # Ensure trainable layers are not frozen
    for layer_idx in _TRAINABLE_STUDENT_LAYERS:
        if hasattr(student, 'blocks') and layer_idx < len(student.blocks):
            for param in student.blocks[layer_idx].parameters():
                param.requires_grad = True
            if is_main_process:
                logger.info(f"  Trainable backbone block {layer_idx}")
    
    # Count trainable parameters
    if is_main_process:
        num_trainable = sum(p.numel() for p in student.parameters() if p.requires_grad)
        num_total = sum(p.numel() for p in student.parameters())
        logger.info(f"Student trainable params: {num_trainable:,} / {num_total:,} ({100*num_trainable/num_total:.1f}%)")
    
    # Create feature projector (1536 -> 2048 for 4 layers)
    student_dim = student.config.embed_dim * 2  # 768 * 2 = 1536
    teacher_dim = 2048
    projector = FeatureProjector(student_dim=student_dim, teacher_dim=teacher_dim)
    
    if is_main_process:
        logger.info(f"Created FeatureProjector: {student_dim} -> {teacher_dim}")
        logger.info("Projector will place student layers [0,1,2,3] at teacher indices [4,11,17,23]")
    
    # Optionally load pretrained projector weights from distillation checkpoint
    if "projector" in checkpoint:
        projector.load_state_dict(checkpoint["projector"], strict=False)
        if is_main_process:
            logger.info("Loaded pretrained projector weights")
    elif "criterion" in checkpoint:
        # Try loading from criterion's layer_projs (from feature distillation)
        criterion_state = checkpoint["criterion"]
        layer_names = ["layer_4", "layer_11", "layer_17", "layer_23"]
        loaded_count = 0
        for i, layer_name in enumerate(layer_names):
            weight_key = f"layer_projs.{layer_name}.weight"
            bias_key = f"layer_projs.{layer_name}.bias"
            if weight_key in criterion_state and bias_key in criterion_state:
                projector.projections[i].weight.data = criterion_state[weight_key]
                projector.projections[i].bias.data = criterion_state[bias_key]
                loaded_count += 1
        if is_main_process:
            logger.info(f"Loaded {loaded_count}/4 projector weights from criterion.layer_projs")
    
    # Move projector to device after loading weights
    projector = projector.to(device)
    
    # Freeze projections for converged layers [0, 1], train projections for [2, 3]
    for i, proj in enumerate(projector.projections):
        if i in [0, 1]:  # Freeze first two projections
            for param in proj.parameters():
                param.requires_grad = False
            if is_main_process:
                logger.info(f"  Frozen projection layer {i}")
        else:  # Keep last two projections trainable
            for param in proj.parameters():
                param.requires_grad = True
            if is_main_process:
                logger.info(f"  Trainable projection layer {i}")
    
    # Load teacher depth head
    depth_head = load_teacher_depth_head(args.teacher_checkpoint, device)
    
    # Wrap with DDP
    if local_rank != -1:
        student = DDP(student, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=False)
        projector = DDP(projector, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=False)
        # Don't wrap depth_head with DDP since it's frozen
    
    # Create depth loss
    depth_criterion = DepthLoss(loss_weight=1.0).to(device)
    
    # Optimizer - trainable student parameters + trainable projector parameters
    trainable_params = []
    trainable_params.extend([p for p in student.parameters() if p.requires_grad])
    trainable_params.extend([p for p in projector.parameters() if p.requires_grad])
    
    if is_main_process:
        num_student = sum(p.numel() for p in student.parameters() if p.requires_grad)
        num_projector = sum(p.numel() for p in projector.parameters() if p.requires_grad)
        num_trainable = num_student + num_projector
        logger.info(f"Trainable params - Student: {num_student:,}, Projector: {num_projector:,}, Total: {num_trainable:,}")
    
    optimizer = torch.optim.AdamW(trainable_params, lr=args.lr, weight_decay=args.weight_decay)
    
    if is_main_process:
        logger.info(f"Optimizer: AdamW(lr={args.lr}, weight_decay={args.weight_decay})")
        if args.grad_clip is not None and args.grad_clip > 0:
            logger.info(f"Gradient clipping enabled: max_norm={args.grad_clip}")
    
    # Learning rate scheduler
    scheduler = None
    if args.lr_scheduler == "cosine":
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
            optimizer, 
            T_max=args.epochs - args.warmup_epochs,
            eta_min=args.min_lr
        )
        if is_main_process:
            logger.info(f"Using Cosine Annealing LR scheduler")
    elif args.lr_scheduler == "step":
        scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.5)
        if is_main_process:
            logger.info("Using Step LR scheduler")
    elif args.lr_scheduler == "exponential":
        scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.95)
        if is_main_process:
            logger.info("Using Exponential LR scheduler")
    elif args.lr_scheduler == "constant":
        if is_main_process:
            logger.info(f"Using Constant LR: {args.lr}")
    
    start_epoch = 0
    global_step = 0
    epoch_losses: List[float] = []
    
    writer = None
    if is_main_process:
        if args.log_dir and SummaryWriter is None:
            logger.warning("torch.utils.tensorboard is unavailable")
        writer = SummaryWriter(log_dir=args.log_dir) if args.log_dir and SummaryWriter is not None else None
    
    # Training loop
    for epoch in range(start_epoch, args.epochs):
        student.train()
        projector.train()  # Set projector to training mode
        depth_head.eval()  # Keep depth head in eval mode
        
        optimizer.zero_grad()
        
        # Apply warmup
        if args.warmup_epochs > 0 and epoch < args.warmup_epochs:
            warmup_lr = args.lr * (epoch + 1) / args.warmup_epochs
            for param_group in optimizer.param_groups:
                param_group['lr'] = warmup_lr
        
        if local_rank != -1:
            sampler.set_epoch(epoch)
        
        running_loss = 0.0
        epoch_loss_total = 0.0
        accumulation_counter = 0
        skipped_batches = 0
        
        if is_main_process:
            progress = tqdm(dataloader, desc=f"Epoch {epoch+1}/{args.epochs} [Depth Fine-tune]")
        else:
            progress = dataloader
        
        for batch_idx, batch in enumerate(progress):
            # Check shutdown signal
            if _SHUTDOWN_FLAG:
                if is_main_process:
                    logger.info("Shutdown flag detected, saving checkpoint and exiting...")
                break
            
            images = batch["images"].to(device, non_blocking=True)
            gt_depth = batch["depth"].to(device, non_blocking=True)
            
            if images.dim() == 4:
                images = images.unsqueeze(0)  # (1, S, 3, H, W)
            
            if gt_depth.dim() == 3:
                gt_depth = gt_depth.unsqueeze(0)  # (1, S, H, W)
            
            # Forward through student backbone
            model = student.module if isinstance(student, DDP) else student
            student_tokens, patch_start_idx = model.forward_features(images)
            
            # Forward through projector to get teacher-format tokens
            # student_tokens: List[4] of (S, N, 1536)
            # teacher_tokens: List[24] with indices [4,11,17,23] filled with (S, N, 2048)
            proj_model = projector.module if isinstance(projector, DDP) else projector
            teacher_tokens = proj_model(student_tokens)
            
            # Forward through frozen depth head
            pred_depth_trainable, _ = depth_head(
                teacher_tokens,
                images=images,
                patch_start_idx=patch_start_idx
            )
            
            loss = depth_criterion(pred_depth_trainable, gt_depth)
            
            # Check for NaN or Inf
            if torch.isnan(loss) or torch.isinf(loss):
                if is_main_process:
                    logger.warning(f"⚠️  Invalid loss detected at step {global_step} (epoch {epoch+1}, batch {batch_idx}): {loss.item()}")
                optimizer.zero_grad()
                skipped_batches += 1
                continue
            
            # Check for large loss
            if loss.item() > 100.0:
                if is_main_process:
                    logger.warning(f"⚠️  Large loss detected: {loss.item():.4f} at step {global_step}")
            
            loss = loss / args.accumulation_steps
            loss.backward()
            
            accumulation_counter += 1
            
            if accumulation_counter % args.accumulation_steps == 0 or batch_idx == len(dataloader) - 1:
                # Gradient clipping
                if args.grad_clip is not None and args.grad_clip > 0:
                    total_norm = torch.nn.utils.clip_grad_norm_(trainable_params, args.grad_clip)
                    
                    if total_norm > args.grad_clip * 2 and is_main_process:
                        logger.warning(f"⚠️  Large gradient norm: {total_norm:.4f} (clipped to {args.grad_clip})")
                    
                    if writer is not None and is_main_process:
                        writer.add_scalar("train/grad_norm", total_norm, global_step)
                
                optimizer.step()
                optimizer.zero_grad()
                
                # Log to TensorBoard
                if writer is not None and is_main_process:
                    writer.add_scalar("train/depth_loss", loss.item() * args.accumulation_steps, global_step)
                    writer.add_scalar("train/lr", float(optimizer.param_groups[0].get("lr", 0.0)), global_step)
                
                accumulation_counter = 0
                global_step += 1
            
            running_loss += loss.item() * args.accumulation_steps
            epoch_loss_total += loss.item() * args.accumulation_steps
            
            if is_main_process and accumulation_counter == 0 and global_step % args.log_every == 0:
                avg_loss = running_loss / args.log_every
                progress.set_postfix({"loss": f"{avg_loss:.4f}"})
                running_loss = 0.0
        
        epoch_loss = epoch_loss_total / max(len(dataloader), 1)
        epoch_losses.append(float(epoch_loss))
        if writer is not None and is_main_process:
            writer.add_scalar("train/epoch_loss", epoch_loss, epoch + 1)
        
        if is_main_process and skipped_batches > 0:
            logger.warning(f"⚠️  Skipped {skipped_batches} batches due to invalid loss in epoch {epoch+1}")
        
        if scheduler is not None and epoch >= args.warmup_epochs:
            scheduler.step()
            if is_main_process:
                current_lr = optimizer.param_groups[0]['lr']
                logger.info(f"Epoch {epoch+1}/{args.epochs} - Loss: {epoch_loss:.6f} - LR: {current_lr:.6f}")
        
        # Save checkpoint
        if is_main_process:
            model = student.module if isinstance(student, DDP) else student
            proj_model = projector.module if isinstance(projector, DDP) else projector
            
            checkpoint = {
                "model": model.state_dict(),
                "projector": proj_model.state_dict(),
                "optimizer": optimizer.state_dict(),
                "epoch": epoch,
                "global_step": global_step,
                "epoch_losses": epoch_losses,
                "args": vars(args),
                "frozen_layers": _FROZEN_STUDENT_LAYERS,
                "trainable_layers": _TRAINABLE_STUDENT_LAYERS,
            }
            if scheduler is not None:
                checkpoint["scheduler"] = scheduler.state_dict()
            
            torch.save(checkpoint, args.save_path)
            logger.info(f"Saved depth fine-tuned checkpoint to {args.save_path}")
        
        # Check for shutdown signal
        if _SHUTDOWN_FLAG:
            if is_main_process:
                logger.info("Shutdown signal received, exiting training loop...")
            break
    
    # Save epoch loss plot
    if is_main_process and args.log_dir and args.save_epoch_plot and len(epoch_losses) > 0:
        try:
            import matplotlib.pyplot as plt
            
            os.makedirs(args.log_dir, exist_ok=True)
            plt.figure(figsize=(5, 3))
            plt.plot(range(1, len(epoch_losses) + 1), epoch_losses, marker="o")
            plt.xlabel("Epoch")
            plt.ylabel("Depth Loss")
            plt.title("MiniVGGT Depth Fine-tuning - Epoch Loss")
            plt.grid(True, alpha=0.3)
            out_path = os.path.join(args.log_dir, "loss_epoch_depth_finetune.png")
            plt.tight_layout()
            plt.savefig(out_path, dpi=150)
            plt.close()
            logger.info("Saved epoch loss plot to %s", out_path)
        except Exception as e:
            logger.warning("Could not save loss plot: %s", e)
    
    if writer is not None and is_main_process:
        writer.close()
    
    if local_rank != -1:
        dist.destroy_process_group()


if __name__ == "__main__":
    train()
