"""Fine-tuning script: align camera token from last layer and depth reconstruction loss.

This script loads the pretrained checkpoint and trains with:
1. Camera token alignment loss (last layer only)
2. Depth reconstruction loss

Usage:
CUDA_VISIBLE_DEVICES=0,5 torchrun \
    --nproc_per_node=2 \
    --master_port=29503 \
    /data0/liqifeng/ZYC/vggt/mini_vggt/Distill/train_mini_finetune.py \
    --feature-root /data1/datasets/VGGT_features \
    --epochs 20 \
    --lr 5e-6 \
    --batch-size 1 \
    --num-workers 32 \
    --accumulation-steps 50 \
    --pretrained-path /data0/liqifeng/ZYC/mini_vggt_distilled.pt \
    --teacher-checkpoint /data0/liqifeng/ZYC/model.pt \
    --save-path /data0/liqifeng/ZYC/vggt/mini_vggt/Distill/mini_vggt_finetuned.pt \
    --log-dir /data0/liqifeng/ZYC/vggt/mini_vggt/Distill/runs/mini_vggt_finetune_v2 \
    --lr-scheduler cosine \
    --warmup-epochs 10 \
    --min-lr 5e-6 \
    --grad-clip 1.0 \
    --weight-decay 0.01 \
    --camera-token-weight 1.0 \
    --depth-weight 1.0

tensorboard --logdir /data0/liqifeng/ZYC/vggt/mini_vggt/Distill/runs/mini_vggt_finetune_v2 --port 6010
"""


from __future__ import annotations

import argparse
import json
import logging
import os
import random
import signal
import sys
import numpy as np
from dataclasses import dataclass
from typing import Dict, Iterable, List, Sequence
from PIL import Image

os.environ.setdefault("HDF5_USE_FILE_LOCKING", "FALSE")

# Global flag for graceful shutdown
_SHUTDOWN_FLAG = False

def signal_handler(signum, frame):
    """Handle interrupt signals for graceful shutdown"""
    global _SHUTDOWN_FLAG
    _SHUTDOWN_FLAG = True
    logger.info(f"Received signal {signum}, initiating graceful shutdown...")

try:
    import h5py
except ImportError:
    h5py = None

try:
    from torch.utils.tensorboard import SummaryWriter
except ImportError:
    SummaryWriter = None

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader, Dataset
from torch.utils.data.distributed import DistributedSampler
from tqdm.auto import tqdm
import sys
from pathlib import Path

REPO_ROOT = Path(__file__).resolve().parents[2]
if str(REPO_ROOT) not in sys.path:
    sys.path.insert(0, str(REPO_ROOT))

from mini_vggt.Distill.vggt_mini import MiniVGGT, MiniVGGTConfig
from mini_vggt.Distill.vggt_mini_gqa import MiniVGGTGQA, MiniVGGTGQAConfig
from vggt.models.vggt import VGGT
from vggt.utils.load_fn import load_and_preprocess_images

logger = logging.getLogger(__name__)

# Define which student backbone layers to freeze (0-indexed)
_FROZEN_STUDENT_LAYERS = [0, 1]
_TRAINABLE_STUDENT_LAYERS = [2, 3]


def _decode_h5_strings(dataset) -> List[str]:
    values = dataset[()]
    if isinstance(values, (bytes, str, torch.Tensor)):
        values = [values]
    elif hasattr(values, "tolist"):
        values = values.tolist()

    decoded: List[str] = []
    for item in values:
        if isinstance(item, (bytes, bytearray)):
            decoded.append(item.decode("utf-8"))
        else:
            decoded.append(str(item))
    return decoded


@dataclass
class TeacherFeatures:
    metadata: Dict
    final: torch.Tensor
    layers: Dict[str, torch.Tensor]
    image_paths: List[str]
    preprocess_mode: str


def load_teacher_features(metadata_path: str) -> TeacherFeatures:
    """Load teacher features following the same logic as demo.py"""
    if h5py is None:
        raise RuntimeError("h5py is required to read HDF5 feature caches. Please install h5py to continue.")
    
    with open(metadata_path, "r", encoding="utf-8") as f:
        metadata = json.load(f)

    scene_ref = metadata.get("scene_file")
    if not scene_ref:
        raise KeyError(f"Metadata missing 'scene_file': {metadata_path}")

    if os.path.isabs(scene_ref):
        scene_file = scene_ref
    else:
        scene_file = os.path.normpath(os.path.join(os.path.dirname(metadata_path), scene_ref))

    if not os.path.exists(scene_file):
        raise FileNotFoundError(f"Scene HDF5 file not found: {scene_file}")

    sample_group = metadata.get("sample_group")
    iteration_group = metadata.get("iteration_group")
    if not sample_group or not iteration_group:
        raise KeyError(f"Metadata missing group identifiers: {metadata_path}")

    layers: Dict[str, torch.Tensor] = {}
    storage_format = metadata.get("storage_format")

    with h5py.File(scene_file, "r", locking=False) as scene_h5:
        if sample_group not in scene_h5:
            raise KeyError(f"Sample group '{sample_group}' not present in {scene_file}")
        sample_grp = scene_h5[sample_group]
        
        if iteration_group not in sample_grp:
            raise KeyError(f"Iteration group '{iteration_group}' not present in scene HDF5")
        iter_grp = sample_grp[iteration_group]

        if storage_format == "scene_hdf5_int8":
            quant_features = iter_grp["features"][...]
            feature_scale = float(iter_grp.attrs.get("feature_scale", metadata.get("feature_scale", 1.0)))
            final_arr = quant_features.astype("float32") * feature_scale
            
            layer_grp = iter_grp.get("layer_features")
            if layer_grp is not None:
                layer_scales_meta = metadata.get("layer_scales", {})
                for layer_name in metadata.get("saved_layers", []):
                    if layer_name not in layer_grp:
                        continue
                    layer_ds = layer_grp[layer_name]
                    layer_scale = float(layer_ds.attrs.get("scale", layer_scales_meta.get(layer_name, feature_scale)))
                    layer_arr = layer_ds[...].astype("float32") * layer_scale
                    layers[layer_name] = torch.from_numpy(layer_arr.squeeze(axis=0) if layer_arr.shape[0] == 1 else layer_arr)
        else:
            teacher_scale = float(iter_grp.attrs.get("feature_scale", metadata.get("feature_scale", 1.0)))
            final_arr = iter_grp["features"][...].astype("float32") * teacher_scale
            
            layer_grp = iter_grp.get("layer_features")
            layer_scales_meta = metadata.get("layer_scales", {})
            for layer_name in metadata.get("saved_layers", []):
                if layer_grp is None or layer_name not in layer_grp:
                    continue
                layer_ds = layer_grp[layer_name]
                layer_scale = float(layer_ds.attrs.get("scale", layer_scales_meta.get(layer_name, teacher_scale)))
                layer_arr = layer_ds[...].astype("float32") * layer_scale
                layers[layer_name] = torch.from_numpy(layer_arr.squeeze(axis=0) if layer_arr.shape[0] == 1 else layer_arr)

        final_arr = final_arr.squeeze(axis=0) if final_arr.shape[0] == 1 else final_arr

        if "image_paths" in iter_grp and not metadata.get("image_paths"):
            metadata["image_paths"] = _decode_h5_strings(iter_grp["image_paths"])

    image_paths = metadata.get("image_paths", [])
    if not image_paths:
        image_paths_file = os.path.join(os.path.dirname(metadata_path), "image_paths.json")
        if os.path.exists(image_paths_file):
            with open(image_paths_file, "r", encoding="utf-8") as f:
                image_paths = json.load(f)
    
    preprocess_mode = metadata.get("preprocess_mode", "crop")
    final_tensor = torch.from_numpy(final_arr)

    return TeacherFeatures(
        metadata=metadata,
        final=final_tensor,
        layers=layers,
        image_paths=image_paths,
        preprocess_mode=preprocess_mode,
    )


class SceneFeatureDataset(Dataset):
    """Dataset that loads teacher features and depth maps."""
    
    def __init__(self, feature_root: str, max_samples: int | None = None, is_main_process: bool = True) -> None:
        metadata_paths = []
        for root, _dirs, files in os.walk(feature_root):
            if "metadata.json" in files:
                metadata_paths.append(os.path.join(root, "metadata.json"))
        metadata_paths.sort()

        if not metadata_paths:
            raise FileNotFoundError(f"No metadata.json found under {feature_root}")

        if max_samples is not None:
            metadata_paths = metadata_paths[:max_samples]

        self.samples = metadata_paths
        
        if is_main_process:
            logger.info(f"Loading first sample to infer dimensions: {self.samples[0]}")
        first_teacher = load_teacher_features(self.samples[0])
        self.teacher_dim = int(first_teacher.final.shape[-1])
        self.saved_layers = list(first_teacher.layers.keys())
        
        if is_main_process:
            logger.info(f"Dataset initialized with {len(self.samples)} samples")
            logger.info(f"Teacher feature dimension: {self.teacher_dim}")
            logger.info(f"Saved layers: {self.saved_layers}")

    def __len__(self) -> int:
        return len(self.samples)
    
    def _replace_third_level_path(self, path: str) -> str:
        """Replace third level directory with 'distill_libero_10_goal'.
        
        Example:
            Input:  /data1/datasets/distill_liero_obj/libero_spatial/pick_up.../sample_00000/cam_015_rgb.png
            Output: /data1/datasets/distill_libero_10_goal/libero_spatial/pick_up.../sample_00000/cam_015_rgb.png
        """
        parts = path.split(os.sep)
        if len(parts) >= 4:
            # Find 'datasets' index and replace the next directory
            try:
                datasets_idx = parts.index('datasets')
                if datasets_idx + 1 < len(parts):
                    parts[datasets_idx + 1] = 'distill_libero_10_goal'
            except ValueError:
                # 'datasets' not found in path, return original
                pass
        return os.sep.join(parts)
    
    def _get_depth_path_from_rgb(self, rgb_path: str) -> str:
        """Convert RGB image path to depth image path.
        
        RGB path format: /data1/datasets/distill_libero_10_goal/.../sample_00000/cam_000_rgb.png
        Depth path format: /data1/datasets/distill_libero_10_goal/.../sample_00000/cam_000_depth.npy
        """
        # First replace third level path
        rgb_path = self._replace_third_level_path(rgb_path)
        
        rgb_dir = os.path.dirname(rgb_path)
        rgb_filename = os.path.basename(rgb_path)
        
        # Replace '_rgb.png' with '_depth.npy'
        depth_filename = rgb_filename.replace('_rgb.png', '_depth.npy')
        
        # Depth is in the same directory as RGB
        depth_path = os.path.join(rgb_dir, depth_filename)
        return depth_path

    def __getitem__(self, idx: int) -> Dict:
        metadata_path = self.samples[idx]
        max_retries = 3
        
        for attempt in range(max_retries):
            try:
                teacher = load_teacher_features(metadata_path)
                
                if not teacher.image_paths:
                    raise ValueError(f"No image paths found for sample: {metadata_path}")
                
                # Replace third level path for all image paths
                corrected_image_paths = [self._replace_third_level_path(p) for p in teacher.image_paths]
                
                images = load_and_preprocess_images(corrected_image_paths, mode=teacher.preprocess_mode)
                
                # Get target size from preprocessed images (should be 518x518)
                target_h, target_w = images.shape[-2:]
                
                # Load corresponding depth maps (using corrected paths)
                depth_maps = []
                for rgb_path in corrected_image_paths:
                    depth_path = self._get_depth_path_from_rgb(rgb_path)
                    
                    if not os.path.exists(depth_path):
                        # If depth doesn't exist, create zero depth map
                        logger.warning(f"Depth not found: {depth_path}, using zeros")
                        depth_map = np.zeros((target_h, target_w), dtype=np.float32)
                    else:
                        # Load depth from .npy file
                        depth_map = np.load(depth_path).astype(np.float32)
                        
                        # Normalize depth if needed (assuming depth is in mm or similar)
                        # You may need to adjust this based on your depth format
                        if depth_map.max() > 100:  # If depth is in mm
                            depth_map = depth_map / 1000.0  # Convert to meters
                        
                        # Resize depth to match image size (518x518)
                        if depth_map.shape != (target_h, target_w):
                            depth_img_resized = Image.fromarray(depth_map).resize(
                                (target_w, target_h), Image.BILINEAR
                            )
                            depth_map = np.array(depth_img_resized, dtype=np.float32)
                    
                    depth_maps.append(depth_map)
                
                # Stack depth maps: (S, H, W)
                depth_maps = np.stack(depth_maps, axis=0)
                
                return {
                    "images": images,
                    "teacher_final": teacher.final,
                    "teacher_layers": teacher.layers,
                    "depth": torch.from_numpy(depth_maps).float(),
                    "metadata": teacher.metadata,
                }
            except OSError as e:
                if "filter returned failure" in str(e) or "Can't synchronously read" in str(e):
                    if attempt < max_retries - 1:
                        logger.warning(f"HDF5 read error on attempt {attempt+1}/{max_retries} for sample {idx}: {e}. Retrying...")
                        import time
                        time.sleep(0.1 * (attempt + 1))
                        continue
                    else:
                        logger.error(f"Failed to load sample {idx} after {max_retries} attempts. Using next sample.")
                        if idx + 1 < len(self.samples):
                            return self.__getitem__(idx + 1)
                        else:
                            return self.__getitem__(max(0, idx - 1))
                else:
                    raise
            except Exception as e:
                logger.error(f"Error loading sample {idx} from {metadata_path}: {e}")
                raise
        
        raise RuntimeError(f"Failed to load sample {idx} after all retries")


class CameraTokenCriterion(nn.Module):
    """Loss for aligning camera token from last layer only."""
    
    def __init__(
        self,
        student_dim: int,
        teacher_dim: int,
        camera_token_weight: float = 1.0,
    ) -> None:
        super().__init__()
        self.camera_token_weight = camera_token_weight
        
        # Single projection layer for camera token (from last student layer)
        self.camera_proj = nn.Linear(student_dim, teacher_dim)
    
    @torch.no_grad()
    def _squeeze_if_batched(self, t: torch.Tensor) -> torch.Tensor:
        if t.dim() == 4 and t.shape[0] == 1:
            return t.squeeze(0)
        return t

    def compute_losses(
        self,
        student_tokens: Sequence[torch.Tensor],
        teacher_final: torch.Tensor,
    ) -> tuple[torch.Tensor, Dict[str, object]]:
        """Compute camera token alignment loss from last layer.
        
        Args:
            student_tokens: List of 4 tensors, each (S, N, student_dim)
            teacher_final: (S, N, teacher_dim) - teacher's final features
        
        Returns:
            (total_loss, details_dict)
        """
        device = student_tokens[-1].device
        
        # Use last layer's camera token (index 0)
        last_student_tokens = self._squeeze_if_batched(student_tokens[-1])  # (S, N, student_dim)
        teacher_final = self._squeeze_if_batched(teacher_final)  # (S, N, teacher_dim)
        
        # Extract camera token (first token, index 0)
        student_camera = last_student_tokens[:, 0, :]  # (S, student_dim)
        teacher_camera = teacher_final[:, 0, :]  # (S, teacher_dim)
        
        # Project student camera token
        student_camera_proj = self.camera_proj(student_camera)  # (S, teacher_dim)
        
        # Compute L2 loss
        camera_loss = F.mse_loss(student_camera_proj, teacher_camera)
        
        total_loss = camera_loss * self.camera_token_weight
        
        details: Dict[str, object] = {
            "camera_token_loss": float(camera_loss.detach().item()),
            "total": float(total_loss.detach().item())
        }
        
        return total_loss, details

    def forward(
        self,
        student_tokens: Sequence[torch.Tensor],
        teacher_final: torch.Tensor,
    ) -> torch.Tensor:
        total, _details = self.compute_losses(student_tokens, teacher_final)
        return total


class DepthLoss(nn.Module):
    """Depth reconstruction loss with L1 distance on valid pixels."""
    
    def __init__(self, loss_weight: float = 1.0):
        super().__init__()
        self.loss_weight = loss_weight
    
    def forward(self, pred_depth: torch.Tensor, gt_depth: torch.Tensor) -> torch.Tensor:
        """
        Args:
            pred_depth: (B, S, H, W, 1) or (B, S, H, W) - predicted depth from DPT head
            gt_depth: (B, S, H, W) - ground truth depth
        """
        # Handle different shapes - squeeze channel dimension if present
        if pred_depth.dim() == 5 and pred_depth.shape[-1] == 1:
            pred_depth = pred_depth.squeeze(-1)
        
        # Verify sizes match
        if pred_depth.shape != gt_depth.shape:
            raise ValueError(
                f"Shape mismatch: pred_depth {pred_depth.shape} vs gt_depth {gt_depth.shape}"
            )
        
        # Create mask for valid depth values (> 0)
        valid_mask = gt_depth > 0.0
        
        if not valid_mask.any():
            return torch.tensor(0.0, device=pred_depth.device, requires_grad=True)
        
        # Compute L1 loss on valid pixels
        pred_valid = pred_depth[valid_mask]
        gt_valid = gt_depth[valid_mask]
        
        loss = F.l1_loss(pred_valid, gt_valid)
        
        return loss * self.loss_weight


class FeatureProjector(nn.Module):
    """Projects 4 student layers to teacher format (2048-dim) at indices [4, 11, 17, 23].
    
    Note: student_dim depends on model type:
    - MiniVGGTGQA: embed_dim * 2 = 384 * 2 = 768
    - MiniVGGT: embed_dim * 2 (varies by config)
    """
    
    def __init__(self, student_dim: int = 768, teacher_dim: int = 2048):
        super().__init__()
        # Student has 4 layers that correspond to teacher's layers 4, 11, 17, 23
        self.layer_mapping = [4, 11, 17, 23]
        
        # Create projection layers for each of the 4 student layers
        self.projections = nn.ModuleList([
            nn.Linear(student_dim, teacher_dim) for _ in range(4)
        ])
    
    def forward(self, student_tokens: List[torch.Tensor]) -> List[torch.Tensor]:
        """
        Args:
            student_tokens: List of 4 tensors, each (S, N, student_dim)
        
        Returns:
            List of 24 tensors where indices [4, 11, 17, 23] contain projected features,
            others are filled with dummy tensors
        """
        # Create a list of 24 None placeholders
        teacher_tokens = [None] * 24
        
        # Project and place student tokens at correct teacher indices
        for i, (tokens, teacher_idx) in enumerate(zip(student_tokens, self.layer_mapping)):
            teacher_tokens[teacher_idx] = self.projections[i](tokens)
        
        # Fill unused positions with dummy tensors
        dummy = torch.zeros_like(teacher_tokens[4])
        for i in range(24):
            if teacher_tokens[i] is None:
                teacher_tokens[i] = dummy
        
        return teacher_tokens


def load_teacher_depth_head(teacher_checkpoint: str, device: str):
    """Load DPT head from teacher VGGT model."""
    logger.info(f"Loading teacher DPT head from {teacher_checkpoint}")
    
    teacher = VGGT()
    checkpoint = torch.load(teacher_checkpoint, map_location="cpu")
    state_dict = checkpoint.get("model", checkpoint)
    teacher.load_state_dict(state_dict, strict=False)
    
    # Extract and freeze depth head
    depth_head = teacher.depth_head
    if depth_head is None:
        raise ValueError("Teacher model does not have a depth head!")
    
    if hasattr(depth_head, 'layer_indices'):
        logger.info(f"DPT head using layer indices: {depth_head.layer_indices}")
    
    # Freeze all parameters
    for param in depth_head.parameters():
        param.requires_grad = False
    
    # Verify all parameters are frozen
    num_frozen = sum(1 for p in depth_head.parameters() if not p.requires_grad)
    num_total = sum(1 for p in depth_head.parameters())
    logger.info(f"DPT head frozen: {num_frozen}/{num_total} parameters (requires_grad=False)")
    
    depth_head.eval()
    depth_head = depth_head.to(device)
    
    return depth_head


def parse_args() -> argparse.Namespace:
    parser = argparse.ArgumentParser(description="Fine-tune MiniVGGT with camera token and depth loss")
    parser.add_argument("--feature-root", default="/data1/datasets/VGGT_features")
    parser.add_argument("--pretrained-path", required=True, help="Path to pretrained checkpoint to load")
    parser.add_argument("--teacher-checkpoint", required=True, help="Path to teacher VGGT checkpoint (for DPT head)")
    parser.add_argument("--epochs", type=int, default=50)
    parser.add_argument("--lr", type=float, default=1e-4, help="Learning rate")
    parser.add_argument("--batch-size", type=int, default=1)
    parser.add_argument("--accumulation-steps", type=int, default=8)
    parser.add_argument("--num-workers", type=int, default=8)
    parser.add_argument("--device", default="cuda:0")
    parser.add_argument("--max-samples", type=int, default=None)
    parser.add_argument("--log-every", type=int, default=10)
    parser.add_argument("--save-path", default="/data0/liqifeng/ZYC/vggt/mini_vggt/Distill/mini_vggt_finetuned.pt")
    parser.add_argument("--log-dir", default="/data0/liqifeng/ZYC/vggt/mini_vggt/Distill/logs/mini_vggt_finetune")
    parser.add_argument("--save-epoch-plot", action="store_true")
    parser.add_argument("--lr-scheduler", type=str, default=None, choices=["cosine", "step", "exponential", "constant"])
    parser.add_argument("--warmup-epochs", type=int, default=0)
    parser.add_argument("--min-lr", type=float, default=1e-6)
    parser.add_argument("--grad-clip", type=float, default=1.0, help="Gradient clipping max norm")
    parser.add_argument("--weight-decay", type=float, default=0.01, help="Weight decay for AdamW optimizer")
    parser.add_argument("--camera-token-weight", type=float, default=1.0, help="Weight for camera token loss")
    parser.add_argument("--depth-weight", type=float, default=1.0, help="Weight for depth reconstruction loss")
    parser.add_argument("--seed", type=int, default=2025, help="Random seed")
    parser.add_argument("--local_rank", type=int, default=-1)
    return parser.parse_args()


def configure_logging() -> None:
    logging.basicConfig(level=logging.INFO, format="%(asctime)s | %(levelname)s | %(message)s")


def train() -> None:
    args = parse_args()
    
    # Register signal handlers for graceful shutdown
    signal.signal(signal.SIGINT, signal_handler)
    signal.signal(signal.SIGTERM, signal_handler)
    
    # Set random seeds
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    
    if args.local_rank == -1:
        local_rank = int(os.environ.get("LOCAL_RANK", -1))
    else:
        local_rank = args.local_rank
    
    # Setup distributed training
    if local_rank != -1:
        torch.cuda.set_device(local_rank)
        dist.init_process_group(backend='nccl')
        device = torch.device(f'cuda:{local_rank}')
        world_size = dist.get_world_size()
        is_main_process = dist.get_rank() == 0
        
        if is_main_process:
            configure_logging()
            logger.info(f"Distributed fine-tuning initialized: world_size={world_size}, rank={dist.get_rank()}")
    else:
        device = torch.device(args.device if torch.cuda.is_available() else "cpu")
        world_size = 1
        is_main_process = True
        configure_logging()
    
    if args.batch_size != 1:
        raise ValueError("This distillation script currently supports only batch_size=1")

    # Load dataset
    dataset = SceneFeatureDataset(args.feature_root, max_samples=args.max_samples, is_main_process=is_main_process)
    
    if local_rank != -1:
        sampler = DistributedSampler(dataset, shuffle=True)
        dataloader = DataLoader(
            dataset,
            batch_size=args.batch_size,
            sampler=sampler,
            num_workers=args.num_workers,
            pin_memory=True,
        )
    else:
        dataloader = DataLoader(
            dataset,
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=args.num_workers,
            pin_memory=True,
        )

    # Load pretrained checkpoint
    if is_main_process:
        logger.info(f"Loading pretrained checkpoint from {args.pretrained_path}")
    
    checkpoint = torch.load(args.pretrained_path, map_location="cpu")
    
    # Determine model type from checkpoint
    if "model" in checkpoint:
        model_state = checkpoint["model"]
    else:
        model_state = checkpoint
    
    # Check if it's GQA or regular MiniVGGT
    # You can check a key that's unique to GQA model
    if any("query_proj" in k for k in model_state.keys()):
        if is_main_process:
            logger.info("Detected MiniVGGTGQA model")
        student = MiniVGGTGQA(MiniVGGTGQAConfig())
    else:
        if is_main_process:
            logger.info("Detected MiniVGGT model")
        student = MiniVGGT(MiniVGGTConfig())
    
    # Load model weights
    student.load_state_dict(model_state, strict=True)
    student = student.to(device)
    
    # Freeze only the first two layers of the backbone (layer 0 and 1)
    # Keep layers 2 and 3 trainable for fine-tuning
    if is_main_process:
        logger.info(f"Freezing student backbone layers: {_FROZEN_STUDENT_LAYERS}")
        logger.info(f"Trainable student backbone layers: {_TRAINABLE_STUDENT_LAYERS}")
    
    # Determine which blocks to freeze based on model type
    # GQA model uses frame_blocks + global_blocks, regular model uses blocks
    if hasattr(student, 'frame_blocks') and hasattr(student, 'global_blocks'):
        # MiniVGGTGQA: freeze both frame and global blocks
        for layer_idx in _FROZEN_STUDENT_LAYERS:
            if layer_idx < len(student.frame_blocks):
                for param in student.frame_blocks[layer_idx].parameters():
                    param.requires_grad = False
                for param in student.global_blocks[layer_idx].parameters():
                    param.requires_grad = False
                if is_main_process:
                    logger.info(f"  Frozen GQA frame_blocks[{layer_idx}] and global_blocks[{layer_idx}]")
        
        # Ensure trainable layers are not frozen
        for layer_idx in _TRAINABLE_STUDENT_LAYERS:
            if layer_idx < len(student.frame_blocks):
                for param in student.frame_blocks[layer_idx].parameters():
                    param.requires_grad = True
                for param in student.global_blocks[layer_idx].parameters():
                    param.requires_grad = True
                if is_main_process:
                    logger.info(f"  Trainable GQA frame_blocks[{layer_idx}] and global_blocks[{layer_idx}]")
        
        # Verify freeze status
        if is_main_process:
            for layer_idx in range(len(student.frame_blocks)):
                frame_params = list(student.frame_blocks[layer_idx].parameters())
                global_params = list(student.global_blocks[layer_idx].parameters())
                
                num_frozen_frame = sum(1 for p in frame_params if not p.requires_grad)
                num_frozen_global = sum(1 for p in global_params if not p.requires_grad)
                num_total_frame = len(frame_params)
                num_total_global = len(global_params)
                
                is_frozen = (num_frozen_frame == num_total_frame and num_frozen_global == num_total_global)
                status = "FROZEN" if is_frozen else f"TRAINABLE"
                logger.info(f"  Layer {layer_idx}: {status}")
                
    elif hasattr(student, 'blocks'):
        # Regular MiniVGGT
        for layer_idx in _FROZEN_STUDENT_LAYERS:
            if layer_idx < len(student.blocks):
                for param in student.blocks[layer_idx].parameters():
                    param.requires_grad = False
                if is_main_process:
                    logger.info(f"  Frozen backbone block {layer_idx}")
        
        for layer_idx in _TRAINABLE_STUDENT_LAYERS:
            if layer_idx < len(student.blocks):
                for param in student.blocks[layer_idx].parameters():
                    param.requires_grad = True
                if is_main_process:
                    logger.info(f"  Trainable backbone block {layer_idx}")
        
        if is_main_process:
            for layer_idx in range(len(student.blocks)):
                layer_params = list(student.blocks[layer_idx].parameters())
                num_frozen = sum(1 for p in layer_params if not p.requires_grad)
                num_total = len(layer_params)
                status = "FROZEN" if num_frozen == num_total else f"TRAINABLE"
                logger.info(f"  Layer {layer_idx}: {status}")
    else:
        if is_main_process:
            logger.warning("⚠️  Model has neither 'blocks' nor 'frame_blocks'+'global_blocks' attributes!")
    
    # Count trainable backbone parameters
    if is_main_process:
        num_trainable_backbone = sum(p.numel() for p in student.parameters() if p.requires_grad)
        num_total_backbone = sum(p.numel() for p in student.parameters())
        logger.info(f"Backbone trainable params: {num_trainable_backbone:,} / {num_total_backbone:,} ({100*num_trainable_backbone/num_total_backbone:.1f}%)")
    
    # Use actual embed_dim from config instead of hardcoded value
    student_dim = student.config.embed_dim * 2  # For GQA: 384*2=768, for regular: depends on config
    teacher_dim = dataset.teacher_dim  # 2048
    
    if is_main_process:
        logger.info(f"Student dimension: {student_dim} (embed_dim={student.config.embed_dim})")
        logger.info(f"Teacher dimension: {teacher_dim}")

    # Create camera token criterion
    camera_criterion = CameraTokenCriterion(
        student_dim=student_dim,
        teacher_dim=teacher_dim,
        camera_token_weight=args.camera_token_weight,
    )
    
    # Try to load pretrained camera_proj weights if available
    if "criterion" in checkpoint:
        criterion_state = checkpoint["criterion"]
        # Try to load from old criterion's layer_projs (last layer)
        if "layer_projs.layer_23.weight" in criterion_state:
            camera_criterion.camera_proj.weight.data = criterion_state["layer_projs.layer_23.weight"].clone()
            camera_criterion.camera_proj.bias.data = criterion_state["layer_projs.layer_23.bias"].clone()
            if is_main_process:
                logger.info("Initialized camera_proj from pretrained layer_23 projection")
    
    camera_criterion = camera_criterion.to(device)
    
    # Create feature projector (for DPT head)
    projector = FeatureProjector(student_dim=student_dim, teacher_dim=teacher_dim)
    
    # Load pretrained projector weights if available
    if "criterion" in checkpoint:
        criterion_state = checkpoint["criterion"]
        layer_names = ["layer_4", "layer_11", "layer_17", "layer_23"]
        loaded_count = 0
        for i, layer_name in enumerate(layer_names):
            weight_key = f"layer_projs.{layer_name}.weight"
            bias_key = f"layer_projs.{layer_name}.bias"
            if weight_key in criterion_state and bias_key in criterion_state:
                projector.projections[i].weight.data = criterion_state[weight_key].clone()
                projector.projections[i].bias.data = criterion_state[bias_key].clone()
                loaded_count += 1
        if is_main_process and loaded_count > 0:
            logger.info(f"Loaded {loaded_count}/4 pretrained projection layers")
    
    projector = projector.to(device)
    
    # Create depth loss
    depth_criterion = DepthLoss(loss_weight=args.depth_weight).to(device)
    
    # Load teacher depth head
    depth_head = load_teacher_depth_head(args.teacher_checkpoint, device)
    
    # Wrap with DDP
    if local_rank != -1:
        student = DDP(student, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=False)
        camera_criterion = DDP(camera_criterion, device_ids=[local_rank], output_device=local_rank)
        projector = DDP(projector, device_ids=[local_rank], output_device=local_rank)
        # Don't wrap depth_head and depth_criterion with DDP (frozen)

    # Optimize trainable parameters: student + camera_criterion + projector
    trainable_params = []
    trainable_params.extend([p for p in student.parameters() if p.requires_grad])
    trainable_params.extend([p for p in camera_criterion.parameters() if p.requires_grad])
    trainable_params.extend([p for p in projector.parameters() if p.requires_grad])
    
    if is_main_process:
        num_student = sum(p.numel() for p in student.parameters() if p.requires_grad)
        num_camera = sum(p.numel() for p in camera_criterion.parameters() if p.requires_grad)
        num_projector = sum(p.numel() for p in projector.parameters() if p.requires_grad)
        num_trainable = num_student + num_camera + num_projector
        logger.info(f"Trainable params - Student: {num_student:,}, Camera: {num_camera:,}, Projector: {num_projector:,}, Total: {num_trainable:,}")
    
    optimizer = torch.optim.AdamW(trainable_params, lr=args.lr, weight_decay=args.weight_decay)
    
    if is_main_process:
        logger.info(f"Optimizer: AdamW(lr={args.lr}, weight_decay={args.weight_decay})")
        if args.grad_clip is not None and args.grad_clip > 0:
            logger.info(f"Gradient clipping enabled: max_norm={args.grad_clip}")
        else:
            logger.info("Gradient clipping disabled")
    
    # Create learning rate scheduler
    scheduler = None
    if args.lr_scheduler == "cosine":
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
            optimizer, 
            T_max=args.epochs - args.warmup_epochs,
            eta_min=args.min_lr
        )
        if is_main_process:
            logger.info(f"Using Cosine Annealing LR scheduler")
    elif args.lr_scheduler == "step":
        scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.5)
        if is_main_process:
            logger.info("Using Step LR scheduler")
    elif args.lr_scheduler == "exponential":
        scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.95)
        if is_main_process:
            logger.info("Using Exponential LR scheduler")
    elif args.lr_scheduler == "constant":
        # Constant learning rate - no scheduler needed
        if is_main_process:
            logger.info(f"Using Constant LR: {args.lr}")
    
    start_epoch = 0
    global_step = 0
    epoch_losses: List[float] = []
    
    writer = None
    if is_main_process:
        if args.log_dir and SummaryWriter is None:
            logger.warning("torch.utils.tensorboard is unavailable")
        writer = SummaryWriter(log_dir=args.log_dir) if args.log_dir and SummaryWriter is not None else None

    for epoch in range(start_epoch, args.epochs):
        student.train()
        camera_criterion.train()
        projector.train()
        depth_head.eval()  # Keep depth head frozen
        
        optimizer.zero_grad()
        
        # Apply warmup
        if args.warmup_epochs > 0 and epoch < args.warmup_epochs:
            warmup_lr = args.lr * (epoch + 1) / args.warmup_epochs
            for param_group in optimizer.param_groups:
                param_group['lr'] = warmup_lr
        
        if local_rank != -1:
            sampler.set_epoch(epoch)
        
        running_loss = 0.0
        epoch_loss_total = 0.0
        accumulation_counter = 0
        skipped_batches = 0
        
        # For accumulating component losses
        accumulated_losses = {
            "camera_token": 0.0,
            "depth": 0.0,
            "total": 0.0
        }
        
        if is_main_process:
            progress = tqdm(dataloader, desc=f"Epoch {epoch+1}/{args.epochs} [Camera+Depth]")
        else:
            progress = dataloader
            
        for batch_idx, batch in enumerate(progress):
            # Check for shutdown signal
            if _SHUTDOWN_FLAG:
                if is_main_process:
                    logger.info("Shutdown flag detected, saving checkpoint and exiting...")
                break
            
            images = batch["images"].to(device, non_blocking=True)
            teacher_final = batch["teacher_final"].to(device, non_blocking=True)
            gt_depth = batch["depth"].to(device, non_blocking=True)

            if images.dim() == 4:
                images = images.unsqueeze(0)
            
            if teacher_final.dim() == 4 and teacher_final.shape[0] == 1:
                teacher_final = teacher_final.squeeze(0)
            
            if gt_depth.dim() == 3:
                gt_depth = gt_depth.unsqueeze(0)
            
            # Forward through student
            model = student.module if isinstance(student, DDP) else student
            student_tokens, _ = model.forward_features(images)
            
            # Camera token loss
            cam_crit = camera_criterion.module if isinstance(camera_criterion, DDP) else camera_criterion
            camera_loss, camera_details = cam_crit.compute_losses(student_tokens, teacher_final)
            
            # Depth loss
            # Project student tokens to teacher format for DPT head
            proj_model = projector.module if isinstance(projector, DDP) else projector
            teacher_format_tokens = proj_model(student_tokens)
            
            # Predict depth using teacher's DPT head
            # DPT head expects: (tokens, images, patch_start_idx)
            # Note: projector ensures gradients flow, but depth_head is frozen
            patch_start_idx = model.patch_start_idx  # Get patch_start_idx from student model
            depth_output = depth_head(teacher_format_tokens, images, patch_start_idx)
            
            # DPT head returns a tuple (depth, aux_outputs), extract depth
            if isinstance(depth_output, tuple):
                pred_depth = depth_output[0]
            else:
                pred_depth = depth_output
            
            # Compute depth reconstruction loss
            depth_loss = depth_criterion(pred_depth, gt_depth)
            
            # Total loss
            loss = camera_loss + depth_loss
            
            # Check for NaN or Inf
            if torch.isnan(loss) or torch.isinf(loss):
                if is_main_process:
                    logger.warning(f"⚠️  Invalid loss detected at step {global_step} (epoch {epoch+1}, batch {batch_idx}): {loss.item()}")
                    logger.warning(f"   Camera loss: {camera_loss.item():.4f}, Depth loss: {depth_loss.item():.4f}")
                optimizer.zero_grad()
                skipped_batches += 1
                continue
            
            # Check for gradient explosion indicators
            if loss.item() > 10.0:
                if is_main_process:
                    logger.warning(f"⚠️  Large loss detected: {loss.item():.4f} at step {global_step} (epoch {epoch+1}, batch {batch_idx})")
                    logger.warning(f"   Camera loss: {camera_loss.item():.4f}, Depth loss: {depth_loss.item():.4f}")
            
            loss = loss / args.accumulation_steps
            loss.backward()
            
            accumulation_counter += 1
            
            # Accumulate component losses for averaging
            accumulated_losses["camera_token"] += float(camera_loss.detach().item())
            accumulated_losses["depth"] += float(depth_loss.detach().item())
            accumulated_losses["total"] += float(loss.detach().item()) * args.accumulation_steps
            
            if accumulation_counter % args.accumulation_steps == 0 or batch_idx == len(dataloader) - 1:
                # Apply gradient clipping if enabled
                if args.grad_clip is not None and args.grad_clip > 0:
                    # Compute gradient norm before clipping (for monitoring)
                    total_norm = 0.0
                    for p in trainable_params:
                        if p.grad is not None:
                            param_norm = p.grad.data.norm(2)
                            total_norm += param_norm.item() ** 2
                    total_norm = total_norm ** 0.5
                    
                    # Clip gradients
                    torch.nn.utils.clip_grad_norm_(trainable_params, args.grad_clip)
                    
                    # Log gradient norm if it's large
                    if total_norm > args.grad_clip * 2 and is_main_process:
                        logger.warning(f"⚠️  Large gradient norm detected: {total_norm:.4f} (clipped to {args.grad_clip}) at step {global_step}")
                    
                    # Log gradient norm to tensorboard
                    if writer is not None and is_main_process:
                        writer.add_scalar("train/grad_norm", total_norm, global_step)
                
                optimizer.step()
                optimizer.zero_grad()
                
                # Log to TensorBoard after optimizer step with accumulated averages
                if writer is not None and is_main_process:
                    # Log total loss (already averaged over accumulation steps)
                    writer.add_scalar("train/loss", loss.item() * args.accumulation_steps, global_step)
                    writer.add_scalar("train/lr", float(optimizer.param_groups[0].get("lr", 0.0)), global_step)
                    
                    # Log averaged component losses over accumulation steps
                    num_accumulated = min(accumulation_counter, args.accumulation_steps)
                    avg_camera_loss = accumulated_losses["camera_token"] / num_accumulated
                    avg_depth_loss = accumulated_losses["depth"] / num_accumulated
                    writer.add_scalar("train/loss_camera_token", avg_camera_loss, global_step)
                    writer.add_scalar("train/loss_depth", avg_depth_loss, global_step)
                
                # Reset accumulation
                accumulated_losses = {
                    "camera_token": 0.0,
                    "depth": 0.0,
                    "total": 0.0
                }
                accumulation_counter = 0
                global_step += 1

            running_loss += loss.item() * args.accumulation_steps
            epoch_loss_total += loss.item() * args.accumulation_steps

            if is_main_process and accumulation_counter == 0 and global_step % args.log_every == 0:
                avg_loss = running_loss / args.log_every
                progress.set_postfix({"loss": f"{avg_loss:.4f}"})
                running_loss = 0.0

        epoch_loss = epoch_loss_total / max(len(dataloader), 1)
        epoch_losses.append(float(epoch_loss))
        if writer is not None and is_main_process:
            writer.add_scalar("train/epoch_loss", epoch_loss, epoch + 1)
        
        if is_main_process and skipped_batches > 0:
            logger.warning(f"⚠️  Skipped {skipped_batches} batches due to invalid loss in epoch {epoch+1}")
        
        if scheduler is not None and epoch >= args.warmup_epochs:
            scheduler.step()
            if is_main_process:
                current_lr = optimizer.param_groups[0]['lr']
                logger.info(f"Epoch {epoch+1}/{args.epochs} - Loss: {epoch_loss:.6f} - LR: {current_lr:.6f}")

        # Save checkpoint
        if is_main_process:
            model = student.module if isinstance(student, DDP) else student
            cam_crit = camera_criterion.module if isinstance(camera_criterion, DDP) else camera_criterion
            proj_model = projector.module if isinstance(projector, DDP) else projector
            
            checkpoint = {
                "model": model.state_dict(),
                "camera_criterion": cam_crit.state_dict(),
                "projector": proj_model.state_dict(),
                "optimizer": optimizer.state_dict(),
                "epoch": epoch,
                "global_step": global_step,
                "epoch_losses": epoch_losses,
                "args": vars(args),
            }
            if scheduler is not None:
                checkpoint["scheduler"] = scheduler.state_dict()
            
            torch.save(checkpoint, args.save_path)
            logger.info(f"Saved fine-tuned checkpoint to {args.save_path}")
        
        # Check for shutdown signal after each epoch
        if _SHUTDOWN_FLAG:
            if is_main_process:
                logger.info("Shutdown signal received, exiting training loop...")
            break

    if is_main_process and args.log_dir and args.save_epoch_plot and len(epoch_losses) > 0:
        try:
            import matplotlib.pyplot as plt

            os.makedirs(args.log_dir, exist_ok=True)
            plt.figure(figsize=(5, 3))
            plt.plot(range(1, len(epoch_losses) + 1), epoch_losses, marker="o")
            plt.xlabel("Epoch")
            plt.ylabel("Loss")
            plt.title("MiniVGGT Fine-tuning - Epoch Loss (Layer 17 & 23 only)")
            plt.grid(True, alpha=0.3)
            out_path = os.path.join(args.log_dir, "loss_epoch_finetune.png")
            plt.tight_layout()
            plt.savefig(out_path, dpi=150)
            plt.close()
            logger.info("Saved epoch loss plot to %s", out_path)
        except Exception as e:
            logger.warning("Could not save loss plot: %s", e)

    if writer is not None and is_main_process:
        writer.close()
    
    if local_rank != -1:
        dist.destroy_process_group()


if __name__ == "__main__":
    train()
