"""Combined fine-tuning: alignment loss (to teacher features) + depth loss (via teacher DPT head).

This script merges the logic of:
- train_mini_finetune.py (feature alignment on layers 17 & 23, freeze 4 & 11)
- train_mini_depth_finetune.py (use teacher VGGT depth head with projections)

Workflow per batch:
1) Load teacher features (metadata.json + scene h5) and corresponding image paths
2) Load and preprocess images, and find matching depth maps for the same images
3) Forward student to get 4 layer tokens (student_dim = 1536)
4) Compute alignment loss: project student layer tokens to teacher_dim (from HDF5) for trainable layers [17, 23]
5) Compute depth loss: project 4 student layers to teacher format (2048) at indices [4, 11, 17, 23],
   feed frozen teacher DPT head to get depth, compare with ground truth depth
6) Total loss = alignment_weight * alignment_loss + depth_weight * depth_loss

Usage example:
CUDA_VISIBLE_DEVICES=0,5,6,7 torchrun \
  --nproc_per_node=4 \
  --master_port=29505 \
  /data0/liqifeng/ZYC/vggt/mini_vggt/Distill/train_mini_combined_finetune.py \
  --feature-root /data1/datasets/VGGT_features \
  --image-root /data1/datasets/distill_libero_10_goal \
  --epochs 50 \
  --lr 1e-5 \
  --batch-size 1 \
  --num-workers 8 \
  --accumulation-steps 50 \
  --pretrained-path /data0/liqifeng/ZYC/mini_vggt_distilled.pt \
  --teacher-checkpoint /data0/liqifeng/ZYC/model.pt \
  --save-path /data0/liqifeng/ZYC/vggt/mini_vggt/Distill/mini_vggt_combined_finetuned.pt \
  --log-dir /data0/liqifeng/ZYC/vggt/mini_vggt/Distill/runs/mini_vggt_combined_finetune \
  --lr-scheduler cosine \
  --warmup-epochs 5 \
  --min-lr 1e-6 \
  --grad-clip 1.0 \
  --weight-decay 0.01 \
  --depth-weight 1.0 \
  --alignment-weight 1.0 \
  --num-samples-per-scene 5

  tensorboard --logdir /data0/liqifeng/ZYC/vggt/mini_vggt/Distill/runs/mini_vggt_combined_finetune --port 6011

"""
from __future__ import annotations

import argparse
import glob
import json
import logging
import os
import random
import signal
import sys
from dataclasses import dataclass
import re
from pathlib import Path
from typing import Dict, Iterable, List, Sequence, Tuple

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader, Dataset
from torch.utils.data.distributed import DistributedSampler
from tqdm.auto import tqdm

os.environ.setdefault("HDF5_USE_FILE_LOCKING", "FALSE")

try:
    import h5py  # type: ignore
except Exception:
    h5py = None

try:
    from torch.utils.tensorboard import SummaryWriter
except Exception:
    SummaryWriter = None

REPO_ROOT = Path(__file__).resolve().parents[2]
if str(REPO_ROOT) not in sys.path:
    sys.path.insert(0, str(REPO_ROOT))

from mini_vggt.Distill.vggt_mini import MiniVGGT, MiniVGGTConfig
from mini_vggt.Distill.vggt_mini_gqa import MiniVGGTGQA, MiniVGGTGQAConfig
from vggt.models.vggt import VGGT
from vggt.utils.load_fn import load_and_preprocess_images

logger = logging.getLogger(__name__)

# Student backbone freezing policy: freeze first two blocks, train last two
_FROZEN_STUDENT_LAYERS = [0, 1]
_TRAINABLE_STUDENT_LAYERS = [2, 3]

# Alignment pairs: teacher named layers -> student indices
_LAYER_PAIRS: Sequence[Tuple[str, int]] = (
    ("layer_4", 0),
    ("layer_11", 1),
    ("layer_17", 2),
    ("layer_23", 3),
)
# For alignment loss: only train 17 & 23; freeze 4 & 11 projection layers
_FROZEN_ALIGN_LAYERS = ["layer_4", "layer_11"]
_TRAINABLE_ALIGN_LAYERS = ["layer_17", "layer_23"]


# -----------------------------
# Teacher features I/O helpers
# -----------------------------

def _decode_h5_strings(dataset) -> List[str]:
    values = dataset[()]
    if isinstance(values, (bytes, str, torch.Tensor)):
        values = [values]
    elif hasattr(values, "tolist"):
        values = values.tolist()
    decoded: List[str] = []
    for item in values:
        if isinstance(item, (bytes, bytearray)):
            decoded.append(item.decode("utf-8"))
        else:
            decoded.append(str(item))
    return decoded


@dataclass
class TeacherFeatures:
    metadata: Dict
    final: torch.Tensor
    layers: Dict[str, torch.Tensor]
    image_paths: List[str]
    preprocess_mode: str


def load_teacher_features(metadata_path: str) -> TeacherFeatures:
    if h5py is None:
        raise RuntimeError("h5py is required to read HDF5 feature caches. Please install h5py to continue.")
    with open(metadata_path, "r", encoding="utf-8") as f:
        metadata = json.load(f)

    scene_ref = metadata.get("scene_file")
    if not scene_ref:
        raise KeyError(f"Metadata missing 'scene_file': {metadata_path}")
    scene_file = scene_ref if os.path.isabs(scene_ref) else os.path.normpath(os.path.join(os.path.dirname(metadata_path), scene_ref))
    if not os.path.exists(scene_file):
        raise FileNotFoundError(f"Scene HDF5 file not found: {scene_file}")

    sample_group = metadata.get("sample_group")
    iteration_group = metadata.get("iteration_group")
    if not sample_group or not iteration_group:
        raise KeyError(f"Metadata missing group identifiers: {metadata_path}")

    layers: Dict[str, torch.Tensor] = {}
    storage_format = metadata.get("storage_format")

    with h5py.File(scene_file, "r", locking=False) as scene_h5:
        if sample_group not in scene_h5:
            raise KeyError(f"Sample group '{sample_group}' not present in {scene_file}")
        sample_grp = scene_h5[sample_group]
        if iteration_group not in sample_grp:
            raise KeyError(f"Iteration group '{iteration_group}' not present in scene HDF5")
        iter_grp = sample_grp[iteration_group]

        if storage_format == "scene_hdf5_int8":
            quant_features = iter_grp["features"][...]
            feature_scale = float(iter_grp.attrs.get("feature_scale", metadata.get("feature_scale", 1.0)))
            final_arr = quant_features.astype("float32") * feature_scale
            layer_grp = iter_grp.get("layer_features")
            if layer_grp is not None:
                layer_scales_meta = metadata.get("layer_scales", {})
                for layer_name in metadata.get("saved_layers", []):
                    if layer_name not in layer_grp:
                        continue
                    layer_ds = layer_grp[layer_name]
                    layer_scale = float(layer_ds.attrs.get("scale", layer_scales_meta.get(layer_name, feature_scale)))
                    layer_arr = layer_ds[...].astype("float32") * layer_scale
                    layers[layer_name] = torch.from_numpy(layer_arr.squeeze(axis=0) if layer_arr.shape[0] == 1 else layer_arr)
        else:
            teacher_scale = float(iter_grp.attrs.get("feature_scale", metadata.get("feature_scale", 1.0)))
            final_arr = iter_grp["features"][...].astype("float32") * teacher_scale
            layer_grp = iter_grp.get("layer_features")
            layer_scales_meta = metadata.get("layer_scales", {})
            for layer_name in metadata.get("saved_layers", []):
                if layer_grp is None or layer_name not in layer_grp:
                    continue
                layer_ds = layer_grp[layer_name]
                layer_scale = float(layer_ds.attrs.get("scale", layer_scales_meta.get(layer_name, teacher_scale)))
                layer_arr = layer_ds[...].astype("float32") * layer_scale
                layers[layer_name] = torch.from_numpy(layer_arr.squeeze(axis=0) if layer_arr.shape[0] == 1 else layer_arr)

        final_arr = final_arr.squeeze(axis=0) if final_arr.shape[0] == 1 else final_arr
        if "image_paths" in iter_grp and not metadata.get("image_paths"):
            metadata["image_paths"] = _decode_h5_strings(iter_grp["image_paths"]) 

    image_paths = metadata.get("image_paths", [])
    if not image_paths:
        image_paths_file = os.path.join(os.path.dirname(metadata_path), "image_paths.json")
        if os.path.exists(image_paths_file):
            with open(image_paths_file, "r", encoding="utf-8") as f:
                image_paths = json.load(f)

    preprocess_mode = metadata.get("preprocess_mode", "crop")
    final_tensor = torch.from_numpy(final_arr)

    return TeacherFeatures(
        metadata=metadata,
        final=final_tensor,
        layers=layers,
        image_paths=image_paths,
        preprocess_mode=preprocess_mode,
    )


# -----------------------------
# Dataset that yields images + teacher features + matching depth maps
# -----------------------------
class CombinedFeatureDepthDataset(Dataset):
    def __init__(
        self,
        feature_root: str,
        image_root: str | None = None,
        max_samples: int | None = None,
        min_images: int = 3,
        max_images: int = 8,
        dataset_bucket: str | None = None,
        is_main_process: bool = True,
    ) -> None:
        self.feature_root = feature_root
        self.image_root = image_root
        self.min_images = min_images
        self.max_images = max_images
        self.dataset_bucket = dataset_bucket

        metadata_paths: List[str] = []
        for root, _dirs, files in os.walk(feature_root):
            if "metadata.json" in files:
                metadata_paths.append(os.path.join(root, "metadata.json"))
        metadata_paths.sort()
        if not metadata_paths:
            raise FileNotFoundError(f"No metadata.json found under {feature_root}")
        if max_samples is not None:
            metadata_paths = metadata_paths[:max_samples]
        self.samples = metadata_paths

        # Probe first sample to set dims and saved layers
        first_teacher = load_teacher_features(self.samples[0])
        self.teacher_dim = int(first_teacher.final.shape[-1])
        self.saved_layers = list(first_teacher.layers.keys())
        self.preprocess_mode = first_teacher.preprocess_mode
        if is_main_process:
            logger.info(f"Combined dataset initialized with {len(self.samples)} samples")
            logger.info(f"Teacher feature dim: {self.teacher_dim}; saved layers: {self.saved_layers}")

    def __len__(self) -> int:
        return len(self.samples)

    @staticmethod
    def _depth_path_from_image(img_path: str) -> str:
        # Try direct replacement: cam_XXX_rgb.{ext} -> cam_XXX_depth.npy
        base, ext = os.path.splitext(img_path)
        if base.endswith("_rgb"):
            return base[:-4] + "_depth.npy"
        # Fallback: replace '_rgb.' pattern anywhere in name
        for e in (".png", ".jpg", ".jpeg", ".PNG", ".JPG", ".JPEG"):
            needle = f"_rgb{e}"
            if img_path.endswith(needle):
                return img_path[: -len(needle)] + "_depth.npy"
        # Default: just append .npy
        return base + ".npy"

    def _replace_dataset_bucket_in_path(self, path: str) -> str:
        """Replace '/datasets/<bucket>/' to '/datasets/{self.dataset_bucket}/' for the first occurrence.
        If dataset_bucket is None or pattern not found, return original.
        """
        if not self.dataset_bucket:
            return path
        # Only replace the immediate child after '/datasets/'
        return re.sub(r"(/datasets/)[^/]+(/)", rf"\1{self.dataset_bucket}\2", path, count=1)

    def _remap_to_image_root(self, path: str) -> str:
        if self.image_root is None:
            return path
        # Heuristic: preserve last 3-4 directories under image_root
        parts = Path(path).parts
        # Try to detect a split directory marker
        markers = {"train", "val", "test"}
        cut = 0
        for i, p in enumerate(parts):
            if p in markers:
                cut = i
                break
        subpath = Path(*parts[cut:]) if cut > 0 else Path(*parts[-4:])
        return str(Path(self.image_root) / subpath)

    def _load_depth_stack(self, selected_rgb_paths: List[str], target_hw: Tuple[int, int]) -> torch.Tensor:
        th, tw = target_hw
        depth_maps: List[torch.Tensor] = []
        for rgb in selected_rgb_paths:
            candidate = self._depth_path_from_image(rgb)
            if not os.path.exists(candidate):
                # Try remapping under image_root
                remapped_rgb = self._remap_to_image_root(rgb)
                candidate = self._depth_path_from_image(remapped_rgb)
            if not os.path.exists(candidate):
                raise FileNotFoundError(f"Depth map not found for image: {rgb} -> tried {candidate}")
            depth_np = np.load(candidate)
            depth_tensor = torch.from_numpy(depth_np).float().unsqueeze(0).unsqueeze(0)
            depth_resized = F.interpolate(depth_tensor, size=(th, tw), mode="nearest")
            depth_maps.append(depth_resized.squeeze(0).squeeze(0))
        return torch.stack(depth_maps, dim=0)  # (S, H, W)

    def __getitem__(self, idx: int) -> Dict:
        # Robust loader with retries and fallback to alternate indices to avoid killing the worker
        max_retries = 5
        for attempt in range(max_retries):
            try:
                metadata_path = self.samples[idx]
                teacher = load_teacher_features(metadata_path)
                if not teacher.image_paths:
                    raise ValueError(f"No image paths in teacher metadata: {metadata_path}")
                # Use ALL images for alignment and depth (strictly match original alignment)
                all_rgb_paths = [self._remap_to_image_root(self._replace_dataset_bucket_in_path(p)) for p in teacher.image_paths]

                # Load and preprocess images using same mode as teacher features
                images = load_and_preprocess_images(all_rgb_paths, mode=teacher.preprocess_mode)
                th, tw = images.shape[-2:]
                depth_stack = self._load_depth_stack(all_rgb_paths, (th, tw))

                # Squeeze teacher features if batched (do NOT slice; keep full alignment)
                final = teacher.final
                if final.dim() == 4 and final.shape[0] == 1:
                    final = final.squeeze(0)
                layers = {
                    name: (t.squeeze(0) if t.dim() == 4 and t.shape[0] == 1 else t)
                    for name, t in teacher.layers.items()
                }

                return {
                    "images": images,                 # (S_all, 3, H, W)
                    "teacher_final": final,           # full set from cache
                    "teacher_layers": layers,         # dict[name] -> (S_all, N, D)
                    "depth": depth_stack,             # (S_all, H, W)
                    "metadata": teacher.metadata,
                    "image_paths": all_rgb_paths,
                }
            except OSError as e:
                if "filter returned failure" in str(e) or "Can't synchronously read" in str(e):
                    if attempt < max_retries - 1:
                        logger.warning(f"Transient HDF5 error on {idx}, retrying ({attempt+1}/{max_retries})...")
                        continue
                raise
            except Exception:
                if attempt < max_retries - 1:
                    logger.warning(f"Error loading idx {idx}, retrying...", exc_info=True)
                    continue
                raise
        # If all retries failed for this index, try a few alternate indices instead of crashing the worker
        fallback_trials = 5
        tried: set[int] = {idx}
        for t in range(fallback_trials):
            alt_idx = random.randint(0, len(self.samples) - 1)
            if alt_idx in tried:
                continue
            tried.add(alt_idx)
            try:
                metadata_path = self.samples[alt_idx]
                teacher = load_teacher_features(metadata_path)
                if not teacher.image_paths:
                    continue
                all_rgb_paths = [self._remap_to_image_root(self._replace_dataset_bucket_in_path(p)) for p in teacher.image_paths]
                images = load_and_preprocess_images(all_rgb_paths, mode=teacher.preprocess_mode)
                th, tw = images.shape[-2:]
                depth_stack = self._load_depth_stack(all_rgb_paths, (th, tw))

                final = teacher.final
                if final.dim() == 4 and final.shape[0] == 1:
                    final = final.squeeze(0)
                layers = {
                    name: (t.squeeze(0) if t.dim() == 4 and t.shape[0] == 1 else t)
                    for name, t in teacher.layers.items()
                }
                logger.warning(f"Index {idx} failed after retries; falling back to alternate index {alt_idx}")
                return {
                    "images": images,
                    "teacher_final": final,
                    "teacher_layers": layers,
                    "depth": depth_stack,
                    "metadata": teacher.metadata,
                    "image_paths": all_rgb_paths,
                }
            except Exception:
                continue
        raise RuntimeError(f"Failed to load sample {idx} and {fallback_trials} alternate samples due to HDF5/IO errors")


# -----------------------------
# Losses and heads
# -----------------------------
class FeatureDistillationCriterion(nn.Module):
    def __init__(
        self,
        student_dim: int,
        teacher_dim: int,
        layer_pairs: Sequence[Tuple[str, int]],
        layer_weight: float = 1.0,
        frozen_layers: List[str] | None = None,
    ) -> None:
        super().__init__()
        self.layer_pairs = list(layer_pairs)
        self.layer_weight = layer_weight
        self.frozen_layers = frozen_layers or []
        self.layer_projs = nn.ModuleDict({name: nn.Linear(student_dim, teacher_dim) for name, _ in self.layer_pairs})

    def freeze_layers(self):
        for layer_name in self.frozen_layers:
            if layer_name in self.layer_projs:
                for p in self.layer_projs[layer_name].parameters():
                    p.requires_grad = False
                logger.info(f"  Frozen alignment projection layer: {layer_name}")

    @torch.no_grad()
    def _squeeze_if_batched(self, t: torch.Tensor) -> torch.Tensor:
        if t.dim() == 4 and t.shape[0] == 1:
            return t.squeeze(0)
        return t

    def compute_losses(
        self,
        student_tokens: Sequence[torch.Tensor],
        teacher_layers: Dict[str, torch.Tensor],
        teacher_final: torch.Tensor,
    ) -> Tuple[torch.Tensor, Dict[str, object]]:
        device = student_tokens[-1].device
        total_loss = torch.tensor(0.0, device=device)
        total_weight = 0.0
        details: Dict[str, object] = {"layers": {}, "total": None}
        for layer_name, student_idx in self.layer_pairs:
            if layer_name not in teacher_layers or student_idx >= len(student_tokens):
                continue
            if layer_name in self.frozen_layers:
                continue
            student = student_tokens[student_idx].squeeze(0)
            teacher = self._squeeze_if_batched(teacher_layers[layer_name])
            student_flat = student.reshape(-1, student.shape[-1])
            teacher_flat = teacher.reshape(-1, teacher.shape[-1])
            # Provide clearer diagnostics if any size mismatch occurs
            if student_flat.shape[0] != teacher_flat.shape[0]:
                raise RuntimeError(
                    f"Alignment token count mismatch at {layer_name}: "
                    f"student_tokens={tuple(student.shape)} -> flat={tuple(student_flat.shape)}, "
                    f"teacher_tokens={tuple(teacher.shape)} -> flat={tuple(teacher_flat.shape)}"
                )
            projected = self.layer_projs[layer_name](student_flat)
            layer_mse = F.mse_loss(projected, teacher_flat)
            cast_layers: Dict[str, float] = details["layers"]  # type: ignore
            cast_layers[layer_name] = float(layer_mse.detach().item())
            total_loss = total_loss + self.layer_weight * layer_mse
            total_weight += self.layer_weight
        norm = max(total_weight, 1e-6)
        total_loss = total_loss / norm
        details["total"] = float(total_loss.detach().item())
        return total_loss, details


class FeatureProjector(nn.Module):
    """Projects 4 student layers (1536) to teacher 2048-d at indices [4, 11, 17, 23]."""
    def __init__(self, student_dim: int = 1536, teacher_dim: int = 2048):
        super().__init__()
        self.layer_mapping = [4, 11, 17, 23]
        self.projections = nn.ModuleList([nn.Linear(student_dim, teacher_dim) for _ in range(4)])

    def forward(self, student_tokens: List[torch.Tensor]) -> List[torch.Tensor]:
        teacher_tokens: List[torch.Tensor | None] = [None] * 24
        for i, (tokens, t_idx) in enumerate(zip(student_tokens, self.layer_mapping)):
            teacher_tokens[t_idx] = self.projections[i](tokens)
        dummy = torch.zeros_like(teacher_tokens[4])  # type: ignore[index]
        for i in range(24):
            if teacher_tokens[i] is None:
                teacher_tokens[i] = dummy
        return [t for t in teacher_tokens]  # type: ignore[return-value]


class DepthLoss(nn.Module):
    def __init__(self, loss_weight: float = 1.0):
        super().__init__()
        self.loss_weight = loss_weight

    def forward(self, pred_depth: torch.Tensor, gt_depth: torch.Tensor) -> torch.Tensor:
        if pred_depth.dim() == 5 and pred_depth.shape[-1] == 1:
            pred_depth = pred_depth.squeeze(-1)  # (B, S, H, W)
        if pred_depth.shape[-2:] != gt_depth.shape[-2:]:
            raise ValueError(f"Size mismatch: pred {pred_depth.shape[-2:]} vs gt {gt_depth.shape[-2:]}")
        valid_mask = gt_depth > 0.0
        if not valid_mask.any():
            return torch.tensor(0.0, device=pred_depth.device, requires_grad=True)
        loss = F.l1_loss(pred_depth[valid_mask], gt_depth[valid_mask])
        return loss * self.loss_weight


# -----------------------------
# CLI and training
# -----------------------------

def parse_args() -> argparse.Namespace:
    parser = argparse.ArgumentParser(description="Combined fine-tune: alignment + depth")
    parser.add_argument("--feature-root", required=True, help="Root of teacher feature caches (with metadata.json)")
    parser.add_argument("--image-root", required=False, default=None, help="Root of RGB/depth dataset for depth maps")
    parser.add_argument("--dataset-bucket", required=False, default="distill_libero_10_goal", help="Replace the third-level bucket under /datasets to this value (e.g., distill_libero_10_goal)")
    parser.add_argument("--pretrained-path", required=True, help="Path to pretrained student checkpoint")
    parser.add_argument("--teacher-checkpoint", required=True, help="Path to teacher VGGT checkpoint (for depth head)")
    parser.add_argument("--epochs", type=int, default=50)
    parser.add_argument("--lr", type=float, default=1e-5)
    parser.add_argument("--batch-size", type=int, default=1)
    parser.add_argument("--accumulation-steps", type=int, default=50)
    parser.add_argument("--num-workers", type=int, default=8)
    parser.add_argument("--device", default="cuda:0")
    parser.add_argument("--max-samples", type=int, default=None)
    parser.add_argument("--min-images", type=int, default=3)
    parser.add_argument("--max-images", type=int, default=8)
    parser.add_argument("--log-every", type=int, default=10)
    parser.add_argument("--save-path", required=True)
    parser.add_argument("--log-dir", default=None)
    parser.add_argument("--save-epoch-plot", action="store_true")
    parser.add_argument("--lr-scheduler", type=str, default=None, choices=["cosine", "step", "exponential", "constant"])
    parser.add_argument("--warmup-epochs", type=int, default=0)
    parser.add_argument("--min-lr", type=float, default=1e-6)
    parser.add_argument("--grad-clip", type=float, default=1.0)
    parser.add_argument("--weight-decay", type=float, default=0.01)
    parser.add_argument("--depth-weight", type=float, default=1.0)
    parser.add_argument("--alignment-weight", type=float, default=1.0)
    parser.add_argument("--seed", type=int, default=2025)
    parser.add_argument("--local_rank", type=int, default=-1)
    # Accept but ignore to be compatible with provided cmd
    parser.add_argument("--num-samples-per-scene", type=int, default=5)
    return parser.parse_args()


def configure_logging() -> None:
    logging.basicConfig(level=logging.INFO, format="%(asctime)s | %(levelname)s | %(message)s")


# Global flag for graceful shutdown
_SHUTDOWN_FLAG = False

def signal_handler(signum, frame):
    global _SHUTDOWN_FLAG
    _SHUTDOWN_FLAG = True
    logger.info(f"Received signal {signum}, initiating graceful shutdown...")


def load_teacher_depth_head(teacher_checkpoint: str, device: torch.device):
    logger.info(f"Loading teacher DPT head from {teacher_checkpoint}")
    teacher = VGGT()
    checkpoint = torch.load(teacher_checkpoint, map_location="cpu")
    state_dict = checkpoint.get("model", checkpoint)
    teacher.load_state_dict(state_dict, strict=False)
    depth_head = teacher.depth_head
    if depth_head is None:
        raise ValueError("Teacher model does not have a depth head!")
    if hasattr(depth_head, "layer_indices"):
        logger.info(f"DPT head using layer indices: {depth_head.layer_indices}")
    for p in depth_head.parameters():
        p.requires_grad = False
    depth_head.eval()
    depth_head = depth_head.to(device)
    return depth_head


def train() -> None:
    args = parse_args()

    # Register signal handlers
    signal.signal(signal.SIGINT, signal_handler)
    signal.signal(signal.SIGTERM, signal_handler)

    # Seeds
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)

    # DDP setup
    if args.local_rank == -1:
        local_rank = int(os.environ.get("LOCAL_RANK", -1))
    else:
        local_rank = args.local_rank

    if local_rank != -1:
        torch.cuda.set_device(local_rank)
        dist.init_process_group(backend="nccl")
        device = torch.device(f"cuda:{local_rank}")
        world_size = dist.get_world_size()
        is_main = dist.get_rank() == 0
        if is_main:
            configure_logging()
            logger.info(f"Distributed combined fine-tuning initialized: world_size={world_size}, rank={dist.get_rank()}")
    else:
        device = torch.device(args.device if torch.cuda.is_available() else "cpu")
        is_main = True
        configure_logging()

    if args.batch_size != 1:
        raise ValueError("This script currently supports only batch_size=1")

    # Dataset & Loader
    dataset = CombinedFeatureDepthDataset(
        feature_root=args.feature_root,
        image_root=args.image_root,
        dataset_bucket=args.dataset_bucket,
        max_samples=args.max_samples,
        min_images=args.min_images,
        max_images=args.max_images,
        is_main_process=is_main,
    )

    if local_rank != -1:
        sampler = DistributedSampler(dataset, shuffle=True)
        dataloader = DataLoader(dataset, batch_size=args.batch_size, sampler=sampler, num_workers=args.num_workers, pin_memory=True)
    else:
        dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True)

    # Load pretrained student
    if is_main:
        logger.info(f"Loading pretrained student from {args.pretrained_path}")
    checkpoint = torch.load(args.pretrained_path, map_location="cpu")
    model_state = checkpoint.get("model", checkpoint)

    if any("query_proj" in k for k in model_state.keys()):
        if is_main:
            logger.info("Detected MiniVGGTGQA model")
        student = MiniVGGTGQA(MiniVGGTGQAConfig())
    else:
        if is_main:
            logger.info("Detected MiniVGGT model")
        student = MiniVGGT(MiniVGGTConfig())

    student.load_state_dict(model_state, strict=True)
    student = student.to(device)

    # Freeze/keep trainable backbone
    if is_main:
        logger.info(f"Freezing student backbone blocks: {_FROZEN_STUDENT_LAYERS}; trainable: {_TRAINABLE_STUDENT_LAYERS}")
    for idx in _FROZEN_STUDENT_LAYERS:
        if hasattr(student, "blocks") and idx < len(student.blocks):
            for p in student.blocks[idx].parameters():
                p.requires_grad = False
            if is_main:
                logger.info(f"  Frozen backbone block {idx}")
    for idx in _TRAINABLE_STUDENT_LAYERS:
        if hasattr(student, "blocks") and idx < len(student.blocks):
            for p in student.blocks[idx].parameters():
                p.requires_grad = True
            if is_main:
                logger.info(f"  Trainable backbone block {idx}")

    # Alignment criterion (1536 -> teacher_dim from dataset)
    student_dim = student.config.embed_dim * 2  # 768*2=1536
    teacher_dim = dataset.teacher_dim
    criterion = FeatureDistillationCriterion(
        student_dim=student_dim,
        teacher_dim=teacher_dim,
        layer_pairs=[pair for pair in _LAYER_PAIRS if pair[0] in dataset.saved_layers],
        frozen_layers=_FROZEN_ALIGN_LAYERS,
    ).to(device)

    # Load pretrained alignment projections if available
    if "criterion" in checkpoint:
        crit_state = checkpoint["criterion"]
        filtered_state = {k: v for k, v in crit_state.items() if not k.startswith("final_proj")}
        missing, unexpected = criterion.load_state_dict(filtered_state, strict=False)
        if is_main:
            logger.info(f"Loaded alignment projections (filtered). missing={list(missing)}, unexpected={list(unexpected)}")

    # Projector for depth head (1536 -> 2048)
    projector = FeatureProjector(student_dim=student_dim, teacher_dim=2048).to(device)
    # Optionally init projector from alignment criterion's layer_projs
    if "projector" in checkpoint:
        projector.load_state_dict(checkpoint["projector"], strict=False)
        if is_main:
            logger.info("Loaded projector weights from checkpoint")
    elif "criterion" in checkpoint:
        crit_state = checkpoint["criterion"]
        layer_names = ["layer_4", "layer_11", "layer_17", "layer_23"]
        loaded_count = 0
        for i, lname in enumerate(layer_names):
            w_key = f"layer_projs.{lname}.weight"
            b_key = f"layer_projs.{lname}.bias"
            if w_key in crit_state and b_key in crit_state:
                # Important: copy into existing GPU params instead of reassigning .data (which can move to CPU)
                with torch.no_grad():
                    projector.projections[i].weight.copy_(crit_state[w_key].to(projector.projections[i].weight.device))
                    projector.projections[i].bias.copy_(crit_state[b_key].to(projector.projections[i].bias.device))
                loaded_count += 1
        if is_main:
            logger.info(f"Initialized {loaded_count}/4 projector layers from alignment weights")

    # Freeze projector first two layers to mirror alignment freeze
    for i, proj in enumerate(projector.projections):
        for p in proj.parameters():
            p.requires_grad = (i >= 2)  # train only last two projections
        if is_main:
            logger.info(f"  Projector layer {i}: {'trainable' if i>=2 else 'frozen'}")

    # Freeze specified alignment layers
    if is_main:
        logger.info(f"Freezing alignment projection layers: {_FROZEN_ALIGN_LAYERS}")
    criterion.freeze_layers()

    # Teacher depth head (frozen)
    depth_head = load_teacher_depth_head(args.teacher_checkpoint, device)

    # Wrap DDP
    if local_rank != -1:
        student = DDP(student, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=False)
        criterion = DDP(criterion, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=False)
        projector = DDP(projector, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=False)

    # Optimizer
    trainable_params: List[nn.Parameter] = []
    trainable_params += [p for p in (student.module if isinstance(student, DDP) else student).parameters() if p.requires_grad]
    trainable_params += [p for p in (criterion.module if isinstance(criterion, DDP) else criterion).parameters() if p.requires_grad]
    trainable_params += [p for p in (projector.module if isinstance(projector, DDP) else projector).parameters() if p.requires_grad]

    if is_main:
        n_student = sum(p.numel() for p in (student.module if isinstance(student, DDP) else student).parameters() if p.requires_grad)
        n_crit = sum(p.numel() for p in (criterion.module if isinstance(criterion, DDP) else criterion).parameters() if p.requires_grad)
        n_proj = sum(p.numel() for p in (projector.module if isinstance(projector, DDP) else projector).parameters() if p.requires_grad)
        logger.info(f"Trainable params - student: {n_student:,}, criterion: {n_crit:,}, projector: {n_proj:,}; total: {n_student+n_crit+n_proj:,}")

    optimizer = torch.optim.AdamW(trainable_params, lr=args.lr, weight_decay=args.weight_decay)

    # LR scheduler
    scheduler = None
    if args.lr_scheduler == "cosine":
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=max(1, args.epochs - args.warmup_epochs), eta_min=args.min_lr)
        if is_main:
            logger.info("Using Cosine Annealing LR scheduler")
    elif args.lr_scheduler == "step":
        scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.5)
        if is_main:
            logger.info("Using Step LR scheduler")
    elif args.lr_scheduler == "exponential":
        scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.95)
        if is_main:
            logger.info("Using Exponential LR scheduler")
    elif args.lr_scheduler == "constant":
        if is_main:
            logger.info(f"Using Constant LR: {args.lr}")

    # TensorBoard
    writer = None
    if is_main and args.log_dir:
        if SummaryWriter is None:
            logger.warning("torch.utils.tensorboard is unavailable")
        else:
            writer = SummaryWriter(log_dir=args.log_dir)

    start_epoch = 0
    global_step = 0
    epoch_losses: List[float] = []

    # Training loop
    for epoch in range(start_epoch, args.epochs):
        (student.module if isinstance(student, DDP) else student).train()
        (criterion.module if isinstance(criterion, DDP) else criterion).train()
        (projector.module if isinstance(projector, DDP) else projector).train()
        depth_head.eval()

        optimizer.zero_grad()

        # Warmup
        if args.warmup_epochs > 0 and epoch < args.warmup_epochs:
            warmup_lr = args.lr * (epoch + 1) / max(1, args.warmup_epochs)
            for pg in optimizer.param_groups:
                pg["lr"] = warmup_lr

        if local_rank != -1:
            sampler.set_epoch(epoch)  # type: ignore[name-defined]

        running_loss = 0.0
        epoch_loss_total = 0.0
        accumulation_counter = 0
        skipped_batches = 0

        progress = tqdm(dataloader, desc=f"Epoch {epoch+1}/{args.epochs} [Combined]") if is_main else dataloader

        for batch_idx, batch in enumerate(progress):
            if _SHUTDOWN_FLAG:
                if is_main:
                    logger.info("Shutdown flag detected, saving checkpoint and exiting...")
                break

            images = batch["images"].to(device, non_blocking=True)
            teacher_final = batch["teacher_final"].to(device, non_blocking=True)
            teacher_layers = {k: v.to(device, non_blocking=True) for k, v in batch["teacher_layers"].items()}
            gt_depth = batch["depth"].to(device, non_blocking=True)

            if images.dim() == 4:
                images = images.unsqueeze(0)  # (1, S, 3, H, W)
            if gt_depth.dim() == 3:
                gt_depth = gt_depth.unsqueeze(0)  # (1, S, H, W)

            # Forward student
            stu = student.module if isinstance(student, DDP) else student
            student_tokens, patch_start_idx = stu.forward_features(images)

            # Alignment loss
            crit = criterion.module if isinstance(criterion, DDP) else criterion
            align_loss, align_details = crit.compute_losses(student_tokens, teacher_layers, teacher_final)

            # Depth loss via projector + depth head
            proj = projector.module if isinstance(projector, DDP) else projector
            teacher_tokens_for_depth = proj(student_tokens)
            pred_depth_trainable, _ = depth_head(teacher_tokens_for_depth, images=images, patch_start_idx=patch_start_idx)
            depth_criterion = DepthLoss(loss_weight=1.0).to(device)
            depth_loss = depth_criterion(pred_depth_trainable, gt_depth)

            # Combine loss
            loss = args.alignment_weight * align_loss + args.depth_weight * depth_loss

            # NaN/Inf checks
            if torch.isnan(loss) or torch.isinf(loss):
                if is_main:
                    logger.warning(f"Invalid loss at step {global_step} (epoch {epoch+1}, batch {batch_idx}): {loss.item()} | align={align_details.get('total')} depth={depth_loss.item() if torch.isfinite(depth_loss) else float('nan')}")
                optimizer.zero_grad()
                skipped_batches += 1
                continue

            loss = loss / args.accumulation_steps
            loss.backward()
            accumulation_counter += 1

            if accumulation_counter % args.accumulation_steps == 0 or batch_idx == len(dataloader) - 1:
                if args.grad_clip is not None and args.grad_clip > 0:
                    total_norm = torch.nn.utils.clip_grad_norm_(trainable_params, args.grad_clip)
                    if is_main and total_norm > args.grad_clip * 2:
                        logger.warning(f"Large gradient norm: {float(total_norm):.4f} (clipped to {args.grad_clip})")
                    if writer is not None and is_main:
                        writer.add_scalar("train/grad_norm", float(total_norm), global_step)
                optimizer.step()
                optimizer.zero_grad()

                if writer is not None and is_main:
                    writer.add_scalar("train/combined_loss", float(loss.item() * args.accumulation_steps), global_step)
                    writer.add_scalar("train/alignment_loss", float(align_loss.item()), global_step)
                    writer.add_scalar("train/depth_loss", float(depth_loss.item()), global_step)
                    writer.add_scalar("train/lr", float(optimizer.param_groups[0].get("lr", 0.0)), global_step)

                accumulation_counter = 0
                global_step += 1

            running_loss += loss.item() * args.accumulation_steps
            epoch_loss_total += loss.item() * args.accumulation_steps

            if is_main and accumulation_counter == 0 and global_step % args.log_every == 0:
                avg_loss = running_loss / args.log_every
                progress.set_postfix({"loss": f"{avg_loss:.4f}", "align": f"{align_details.get('total', 0):.4f}", "depth": f"{depth_loss.item():.4f}"})
                running_loss = 0.0

        epoch_loss = epoch_loss_total / max(len(dataloader), 1)
        epoch_losses.append(float(epoch_loss))
        if writer is not None and is_main:
            writer.add_scalar("train/epoch_loss", float(epoch_loss), epoch + 1)

        if is_main and skipped_batches > 0:
            logger.warning(f"Skipped {skipped_batches} batches due to invalid loss in epoch {epoch+1}")

        if scheduler is not None and epoch >= args.warmup_epochs:
            scheduler.step()
            if is_main:
                current_lr = optimizer.param_groups[0]["lr"]
                logger.info(f"Epoch {epoch+1}/{args.epochs} - Loss: {epoch_loss:.6f} - LR: {current_lr:.6f}")

        # Save checkpoint each epoch
        if is_main:
            m = student.module if isinstance(student, DDP) else student
            c = criterion.module if isinstance(criterion, DDP) else criterion
            p = projector.module if isinstance(projector, DDP) else projector
            ckpt = {
                "model": m.state_dict(),
                "criterion": c.state_dict(),
                "projector": p.state_dict(),
                "optimizer": optimizer.state_dict(),
                "epoch": epoch,
                "global_step": global_step,
                "epoch_losses": epoch_losses,
                "args": vars(args),
                "frozen_backbone": _FROZEN_STUDENT_LAYERS,
                "trainable_backbone": _TRAINABLE_STUDENT_LAYERS,
                "frozen_align_layers": _FROZEN_ALIGN_LAYERS,
            }
            if scheduler is not None:
                ckpt["scheduler"] = scheduler.state_dict()
            torch.save(ckpt, args.save_path)
            logger.info(f"Saved combined fine-tuned checkpoint to {args.save_path}")

        if _SHUTDOWN_FLAG:
            if is_main:
                logger.info("Shutdown signal received, exiting training loop...")
            break

    # Optionally plot epoch loss
    if is_main and args.log_dir and args.save_epoch_plot and len(epoch_losses) > 0:
        try:
            import matplotlib.pyplot as plt
            os.makedirs(args.log_dir, exist_ok=True)
            plt.figure(figsize=(5, 3))
            plt.plot(range(1, len(epoch_losses) + 1), epoch_losses, marker="o")
            plt.xlabel("Epoch"); plt.ylabel("Loss"); plt.title("MiniVGGT Combined Fine-tuning - Epoch Loss")
            plt.grid(True, alpha=0.3)
            out_path = os.path.join(args.log_dir, "loss_epoch_combined_finetune.png")
            plt.tight_layout(); plt.savefig(out_path, dpi=150); plt.close()
            logger.info("Saved epoch loss plot to %s", out_path)
        except Exception as e:
            logger.warning("Could not save loss plot: %s", e)

    if writer is not None and is_main:
        writer.close()

    if local_rank != -1:
        dist.destroy_process_group()


if __name__ == "__main__":
    train()
