"""Training script to distil MiniVGGT from cached teacher aggregator features.



torchrun \
    --nproc_per_node=6 \
    --master_port=29502 \
    /data0/liqifeng/ZYC/vggt/mini_vggt/Distill/train_mini.py \
    --feature-root /data1/datasets/VGGT_features \
    --epochs 100 \
    --lr 5e-4 \
    --batch-size 1 \
    --num-workers 4 \
    --accumulation-steps 8 \
    --save-path /data0/liqifeng/ZYC/vggt/mini_vggt/Distill/mini_vggt_distilled.pt \
    --log-dir /data0/liqifeng/ZYC/vggt/mini_vggt/Distill/runs/mini_vggt \
    --lr-scheduler cosine \
    --warmup-epochs 5 \
    --min-lr 1e-6


tensorboard --logdir /data0/liqifeng/ZYC/vggt/mini_vggt/Distill/runs/mini_vggt --port 6009



"""


from __future__ import annotations

import argparse
import json
import logging
import os
from dataclasses import dataclass
from typing import Dict, Iterable, List, Sequence

os.environ.setdefault("HDF5_USE_FILE_LOCKING", "FALSE")

try:
    import h5py
except ImportError:  # pragma: no cover - h5py is optional but required for this script
    h5py = None  # type: ignore

try:
    from torch.utils.tensorboard import SummaryWriter
except ImportError:  # pragma: no cover - TensorBoard is optional
    SummaryWriter = None  # type: ignore
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader, Dataset
from torch.utils.data.distributed import DistributedSampler
from tqdm.auto import tqdm
import sys
import os
from pathlib import Path

REPO_ROOT = Path(__file__).resolve().parents[2]  # Go up to ZYC/vggt
if str(REPO_ROOT) not in sys.path:
    sys.path.insert(0, str(REPO_ROOT))

from mini_vggt.Distill.vggt_mini import MiniVGGT, MiniVGGTConfig
from mini_vggt.Distill.vggt_mini_gqa import MiniVGGTGQA, MiniVGGTGQAConfig
from vggt.utils.load_fn import load_and_preprocess_images

logger = logging.getLogger(__name__)


_LAYER_PAIRS: Sequence[tuple[str, int]] = (
    ("layer_4", 0),
    ("layer_11", 1),
    ("layer_17", 2),
    ("layer_23", 3),
)


def _decode_h5_strings(dataset) -> List[str]:
    values = dataset[()]
    if isinstance(values, (bytes, str, torch.Tensor)):
        values = [values]
    elif hasattr(values, "tolist"):
        values = values.tolist()

    decoded: List[str] = []
    for item in values:
        if isinstance(item, (bytes, bytearray)):
            decoded.append(item.decode("utf-8"))
        else:
            decoded.append(str(item))
    return decoded


@dataclass
class TeacherFeatures:
    metadata: Dict
    final: torch.Tensor
    layers: Dict[str, torch.Tensor]
    image_paths: List[str]
    preprocess_mode: str


def load_teacher_features(metadata_path: str) -> TeacherFeatures:
    """Load teacher features following the same logic as demo.py"""
    if h5py is None:
        raise RuntimeError("h5py is required to read HDF5 feature caches. Please install h5py to continue.")
    
    with open(metadata_path, "r", encoding="utf-8") as f:
        metadata = json.load(f)

    scene_ref = metadata.get("scene_file")
    if not scene_ref:
        raise KeyError(f"Metadata missing 'scene_file': {metadata_path}")

    if os.path.isabs(scene_ref):
        scene_file = scene_ref
    else:
        scene_file = os.path.normpath(os.path.join(os.path.dirname(metadata_path), scene_ref))

    if not os.path.exists(scene_file):
        raise FileNotFoundError(f"Scene HDF5 file not found: {scene_file}")

    sample_group = metadata.get("sample_group")
    iteration_group = metadata.get("iteration_group")
    if not sample_group or not iteration_group:
        raise KeyError(f"Metadata missing group identifiers: {metadata_path}")

    layers: Dict[str, torch.Tensor] = {}
    storage_format = metadata.get("storage_format")

    with h5py.File(scene_file, "r", locking=False) as scene_h5:
        if sample_group not in scene_h5:
            raise KeyError(f"Sample group '{sample_group}' not present in {scene_file}")
        sample_grp = scene_h5[sample_group]
        
        if iteration_group not in sample_grp:
            raise KeyError(f"Iteration group '{iteration_group}' not present in scene HDF5")
        iter_grp = sample_grp[iteration_group]

        # Handle different storage formats (scene_hdf5_int8 for quantized storage)
        if storage_format == "scene_hdf5_int8":
            # Quantized int8 storage format
            quant_features = iter_grp["features"][...]
            feature_scale = float(iter_grp.attrs.get("feature_scale", metadata.get("feature_scale", 1.0)))
            final_arr = quant_features.astype("float32") * feature_scale
            
            # Load layer features if present
            layer_grp = iter_grp.get("layer_features")
            if layer_grp is not None:
                layer_scales_meta = metadata.get("layer_scales", {})
                for layer_name in metadata.get("saved_layers", []):
                    if layer_name not in layer_grp:
                        continue
                    layer_ds = layer_grp[layer_name]
                    layer_scale = float(layer_ds.attrs.get("scale", layer_scales_meta.get(layer_name, feature_scale)))
                    layer_arr = layer_ds[...].astype("float32") * layer_scale
                    layers[layer_name] = torch.from_numpy(layer_arr.squeeze(axis=0) if layer_arr.shape[0] == 1 else layer_arr)
        else:
            # Default float32 storage format
            teacher_scale = float(iter_grp.attrs.get("feature_scale", metadata.get("feature_scale", 1.0)))
            final_arr = iter_grp["features"][...].astype("float32") * teacher_scale
            
            layer_grp = iter_grp.get("layer_features")
            layer_scales_meta = metadata.get("layer_scales", {})
            for layer_name in metadata.get("saved_layers", []):
                if layer_grp is None or layer_name not in layer_grp:
                    continue
                layer_ds = layer_grp[layer_name]
                layer_scale = float(layer_ds.attrs.get("scale", layer_scales_meta.get(layer_name, teacher_scale)))
                layer_arr = layer_ds[...].astype("float32") * layer_scale
                layers[layer_name] = torch.from_numpy(layer_arr.squeeze(axis=0) if layer_arr.shape[0] == 1 else layer_arr)

        # Squeeze batch dimension if present
        final_arr = final_arr.squeeze(axis=0) if final_arr.shape[0] == 1 else final_arr

        # Load image paths from HDF5 if not in metadata
        if "image_paths" in iter_grp and not metadata.get("image_paths"):
            metadata["image_paths"] = _decode_h5_strings(iter_grp["image_paths"])

    image_paths = metadata.get("image_paths", [])
    if not image_paths:
        # Fallback: try to load from image_paths.json
        image_paths_file = os.path.join(os.path.dirname(metadata_path), "image_paths.json")
        if os.path.exists(image_paths_file):
            with open(image_paths_file, "r", encoding="utf-8") as f:
                image_paths = json.load(f)
    
    preprocess_mode = metadata.get("preprocess_mode", "crop")

    final_tensor = torch.from_numpy(final_arr)

    return TeacherFeatures(
        metadata=metadata,
        final=final_tensor,
        layers=layers,
        image_paths=image_paths,
        preprocess_mode=preprocess_mode,
    )


class SceneFeatureDataset(Dataset):
    def __init__(self, feature_root: str, max_samples: int | None = None, is_main_process: bool = True) -> None:
        metadata_paths = []
        for root, _dirs, files in os.walk(feature_root):
            if "metadata.json" in files:
                metadata_paths.append(os.path.join(root, "metadata.json"))
        metadata_paths.sort()

        if not metadata_paths:
            raise FileNotFoundError(f"No metadata.json found under {feature_root}")

        if max_samples is not None:
            metadata_paths = metadata_paths[:max_samples]

        self.samples = metadata_paths
        
        # Load first sample to infer dimensions and check data validity
        if is_main_process:
            logger.info(f"Loading first sample to infer dimensions: {self.samples[0]}")
        first_teacher = load_teacher_features(self.samples[0])
        self.teacher_dim = int(first_teacher.final.shape[-1])
        self.saved_layers = list(first_teacher.layers.keys())
        
        if is_main_process:
            logger.info(f"Dataset initialized with {len(self.samples)} samples")
            logger.info(f"Teacher feature dimension: {self.teacher_dim}")
            logger.info(f"Saved layers: {self.saved_layers}")

    def __len__(self) -> int:
        return len(self.samples)

    def __getitem__(self, idx: int) -> Dict:
        metadata_path = self.samples[idx]
        max_retries = 3
        
        for attempt in range(max_retries):
            try:
                teacher = load_teacher_features(metadata_path)
                
                # Load and preprocess images following demo.py logic
                if not teacher.image_paths:
                    raise ValueError(f"No image paths found for sample: {metadata_path}")
                
                images = load_and_preprocess_images(teacher.image_paths, mode=teacher.preprocess_mode)
                
                return {
                    "images": images,  # (S, 3, H, W)
                    "teacher_final": teacher.final,
                    "teacher_layers": teacher.layers,
                    "metadata": teacher.metadata,
                }
            except OSError as e:
                # HDF5 file reading errors (compression/filter issues)
                if "filter returned failure" in str(e) or "Can't synchronously read" in str(e):
                    if attempt < max_retries - 1:
                        logger.warning(f"HDF5 read error on attempt {attempt+1}/{max_retries} for sample {idx}: {e}. Retrying...")
                        import time
                        time.sleep(0.1 * (attempt + 1))  # Exponential backoff
                        continue
                    else:
                        # After max retries, try to use a different sample
                        logger.error(f"Failed to load sample {idx} after {max_retries} attempts. Using next sample.")
                        if idx + 1 < len(self.samples):
                            return self.__getitem__(idx + 1)
                        else:
                            return self.__getitem__(max(0, idx - 1))
                else:
                    raise
            except Exception as e:
                logger.error(f"Error loading sample {idx} from {metadata_path}: {e}")
                raise
        
        # Should not reach here, but just in case
        raise RuntimeError(f"Failed to load sample {idx} after all retries")


class FeatureDistillationCriterion(nn.Module):
    def __init__(
        self,
        student_dim: int,
        teacher_dim: int,
        layer_pairs: Sequence[tuple[str, int]],
        layer_weight: float = 1.0,
    ) -> None:
        super().__init__()
        self.layer_pairs = list(layer_pairs)
        self.layer_weight = layer_weight

        # Only 4 projection layers for 4 student layers
        self.layer_projs = nn.ModuleDict(
            {name: nn.Linear(student_dim, teacher_dim) for name, _ in self.layer_pairs}
        )

    @torch.no_grad()
    def _squeeze_if_batched(self, t: torch.Tensor) -> torch.Tensor:
        # Helper to drop a leading singleton sequence dim if present.
        if t.dim() == 4 and t.shape[0] == 1:
            return t.squeeze(0)
        return t

    def compute_losses(
        self,
        student_tokens: Sequence[torch.Tensor],
        teacher_layers: Dict[str, torch.Tensor],
        teacher_final: torch.Tensor,
    ) -> tuple[torch.Tensor, Dict[str, object]]:
        """Compute total loss and return component-wise breakdown for logging.

        Returns:
            (total_loss, details) where details is a dict with keys:
                - 'layers': Dict[str, float] per-layer MSE (unweighted)
                - 'total': float normalized weighted total loss
        """
        device = student_tokens[-1].device
        total_loss = torch.tensor(0.0, device=device)
        total_weight = 0.0

        details: Dict[str, object] = {"layers": {}, "total": None}

        # Per-layer losses - only 4 layers
        for layer_name, student_idx in self.layer_pairs:
            if layer_name not in teacher_layers or student_idx >= len(student_tokens):
                continue
            student = student_tokens[student_idx].squeeze(0)
            teacher = self._squeeze_if_batched(teacher_layers[layer_name])

            student_flat = student.reshape(-1, student.shape[-1])
            teacher_flat = teacher.reshape(-1, teacher.shape[-1])

            projected = self.layer_projs[layer_name](student_flat)
            layer_mse = F.mse_loss(projected, teacher_flat)
            total_loss = total_loss + self.layer_weight * layer_mse
            total_weight += self.layer_weight
            # Unweighted per-component for clearer monitoring
            cast_layers: Dict[str, float] = details["layers"]  # type: ignore[assignment]
            cast_layers[layer_name] = float(layer_mse.detach().item())

        norm = max(total_weight, 1e-6)
        total_loss = total_loss / norm
        details["total"] = float(total_loss.detach().item())
        return total_loss, details

    def forward(
        self,
        student_tokens: Sequence[torch.Tensor],
        teacher_layers: Dict[str, torch.Tensor],
        teacher_final: torch.Tensor,
    ) -> torch.Tensor:
        total, _details = self.compute_losses(student_tokens, teacher_layers, teacher_final)
        return total


def parse_args() -> argparse.Namespace:
    parser = argparse.ArgumentParser(description="Distil MiniVGGT from cached teacher features")
    parser.add_argument("--feature-root", default="/data1/datasets/VGGT_features")
    parser.add_argument("--epochs", type=int, default=100)
    parser.add_argument("--lr", type=float, default=5e-4)
    parser.add_argument("--batch-size", type=int, default=1)
    parser.add_argument("--accumulation-steps", type=int, default=8, help="Gradient accumulation steps (effective batch size = batch_size * accumulation_steps * num_gpus)")
    parser.add_argument("--num-workers", type=int, default=8)
    parser.add_argument("--device", default="cuda:5")
    parser.add_argument("--max-samples", type=int, default=None)
    parser.add_argument("--log-every", type=int, default=10)
    parser.add_argument("--save-path", default="/data0/liqifeng/ZYC/vggt/Distill/mini_vggt_distilled.pt")
    parser.add_argument("--log-dir", default="/data0/liqifeng/ZYC/vggt/Distill/logs/mini_vggt")
    parser.add_argument("--save-epoch-plot", action="store_true", help="Save a PNG plot of epoch losses to log_dir")
    parser.add_argument("--resume", type=str, default=None, help="Path to checkpoint to resume training from")
    # Learning rate scheduler arguments
    parser.add_argument("--lr-scheduler", type=str, default=None, choices=["cosine", "step", "exponential"], 
                        help="Learning rate scheduler type")
    parser.add_argument("--warmup-epochs", type=int, default=0, help="Number of warmup epochs")
    parser.add_argument("--min-lr", type=float, default=1e-6, help="Minimum learning rate for cosine scheduler")
    # Multi-GPU training arguments
    parser.add_argument("--local_rank", type=int, default=-1, help="Local rank for distributed training")
    parser.add_argument("--world-size", type=int, default=1, help="Number of GPUs to use")
    return parser.parse_args()


def configure_logging() -> None:
    logging.basicConfig(level=logging.INFO, format="%(asctime)s | %(levelname)s | %(message)s")


def train() -> None:
    args = parse_args()
    
    # torchrun uses environment variables, not --local_rank argument
    # Get local_rank from environment variable if not set via argparse
    if args.local_rank == -1:
        local_rank = int(os.environ.get("LOCAL_RANK", -1))
    else:
        local_rank = args.local_rank
    
    # Setup distributed training FIRST before creating any CUDA tensors
    if local_rank != -1:
        # Important: Set device before any CUDA operations
        torch.cuda.set_device(local_rank)
        dist.init_process_group(backend='nccl')
        device = torch.device(f'cuda:{local_rank}')
        world_size = dist.get_world_size()
        is_main_process = dist.get_rank() == 0
        
        if is_main_process:
            configure_logging()
            logger.info(f"Distributed training initialized: world_size={world_size}, rank={dist.get_rank()}, local_rank={local_rank}")
            logger.info(f"CUDA device count: {torch.cuda.device_count()}")
            logger.info(f"Using device: {device} (actual GPU: {torch.cuda.current_device()})")
    else:
        device = torch.device(args.device if torch.cuda.is_available() else "cpu")
        world_size = 1
        is_main_process = True
        configure_logging()
    
    if args.batch_size != 1:
        raise ValueError("This distillation script currently supports only batch_size=1 because samples have variable frame counts.")

    # Load dataset (CPU operations only)
    dataset = SceneFeatureDataset(args.feature_root, max_samples=args.max_samples, is_main_process=is_main_process)
    
    # Use DistributedSampler for multi-GPU training
    if local_rank != -1:
        sampler = DistributedSampler(dataset, shuffle=True)
        dataloader = DataLoader(
            dataset,
            batch_size=args.batch_size,
            sampler=sampler,
            num_workers=args.num_workers,
            pin_memory=True,
        )
    else:
        dataloader = DataLoader(
            dataset,
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=args.num_workers,
            pin_memory=True,
        )

    student = MiniVGGTGQA(MiniVGGTGQAConfig())
    # student = MiniVGGT(MiniVGGTConfig())
    # Create model on CPU first, then move to device

    student = student.to(device)
    
    student_dim = student.config.embed_dim * 2
    teacher_dim = dataset.teacher_dim

    criterion = FeatureDistillationCriterion(
        student_dim=student_dim,
        teacher_dim=teacher_dim,
        layer_pairs=[pair for pair in _LAYER_PAIRS if pair[0] in dataset.saved_layers],
    )
    criterion = criterion.to(device)
    
    # Wrap model with DDP for multi-GPU training AFTER moving to device
    if local_rank != -1:
        student = DDP(student, device_ids=[local_rank], output_device=local_rank)
        criterion = DDP(criterion, device_ids=[local_rank], output_device=local_rank)

    optimizer = torch.optim.AdamW(list(student.parameters()) + list(criterion.parameters()), lr=args.lr)
    
    # Create learning rate scheduler
    scheduler = None
    if args.lr_scheduler == "cosine":
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
            optimizer, 
            T_max=args.epochs - args.warmup_epochs,
            eta_min=args.min_lr
        )
        if is_main_process:
            logger.info(f"Using Cosine Annealing LR scheduler: T_max={args.epochs - args.warmup_epochs}, eta_min={args.min_lr}")
    elif args.lr_scheduler == "step":
        scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.1)
        if is_main_process:
            logger.info("Using Step LR scheduler: step_size=30, gamma=0.1")
    elif args.lr_scheduler == "exponential":
        scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.95)
        if is_main_process:
            logger.info("Using Exponential LR scheduler: gamma=0.95")
    
    # Resume from checkpoint if specified
    start_epoch = 0
    if args.resume and os.path.exists(args.resume):
        if is_main_process:
            logger.info(f"Resuming training from checkpoint: {args.resume}")
        checkpoint = torch.load(args.resume, map_location=device)
        
        # Load model states
        model = student.module if isinstance(student, DDP) else student
        model.load_state_dict(checkpoint["model"])
        
        crit = criterion.module if isinstance(criterion, DDP) else criterion
        crit.load_state_dict(checkpoint["criterion"])
        
        # Load optimizer state
        optimizer.load_state_dict(checkpoint["optimizer"])
        
        # Load scheduler state if exists
        if scheduler is not None and "scheduler" in checkpoint:
            scheduler.load_state_dict(checkpoint["scheduler"])
        
        # Load training state
        start_epoch = checkpoint.get("epoch", 0) + 1  # Start from next epoch
        global_step = checkpoint.get("global_step", 0)
        epoch_losses = checkpoint.get("epoch_losses", [])
        
        if is_main_process:
            logger.info(f"Resumed from epoch {start_epoch}, global_step {global_step}")
    else:
        global_step = 0
        epoch_losses: List[float] = []
    
    writer = None
    if is_main_process:
        if args.log_dir and SummaryWriter is None:
            logger.warning("torch.utils.tensorboard is unavailable; loss curves will not be logged.")
        writer = SummaryWriter(log_dir=args.log_dir) if args.log_dir and SummaryWriter is not None else None

    for epoch in range(start_epoch, args.epochs):
        student.train()
        
        # Initialize gradients at the start of each epoch
        optimizer.zero_grad()
        
        # Apply warmup learning rate
        if args.warmup_epochs > 0 and epoch < args.warmup_epochs:
            warmup_lr = args.lr * (epoch + 1) / args.warmup_epochs
            for param_group in optimizer.param_groups:
                param_group['lr'] = warmup_lr
            if is_main_process and epoch == 0:
                logger.info(f"Warmup phase: epochs 0-{args.warmup_epochs-1}")
        
        # Set epoch for DistributedSampler
        if local_rank != -1:
            sampler.set_epoch(epoch)
        
        running_loss = 0.0
        epoch_loss_total = 0.0
        accumulation_counter = 0  # Track gradient accumulation steps
        
        if is_main_process:
            progress = tqdm(dataloader, desc=f"Epoch {epoch+1}/{args.epochs}")
        else:
            progress = dataloader
            
        for batch_idx, batch in enumerate(progress):
            images = batch["images"].to(device, non_blocking=True)
            teacher_final = batch["teacher_final"].to(device, non_blocking=True)
            teacher_layers = {k: v.to(device, non_blocking=True) for k, v in batch["teacher_layers"].items()}

            # MiniVGGT expects [B, S, 3, H, W], don't squeeze batch dimension
            # But add batch dimension if it's missing (images is [S, 3, H, W])
            if images.dim() == 4:
                images = images.unsqueeze(0)  # [S, 3, H, W] -> [1, S, 3, H, W]
            
            # Teacher features don't need batch dimension
            if teacher_final.dim() == 4 and teacher_final.shape[0] == 1:
                teacher_final = teacher_final.squeeze(0)
            teacher_layers = {
                name: value.squeeze(0) if value.dim() == 4 and value.shape[0] == 1 else value
                for name, value in teacher_layers.items()
            }
            
            # Get the actual model (unwrap DDP if needed)
            model = student.module if isinstance(student, DDP) else student
            student_tokens, _ = model.forward_features(images)
            
            # Get the actual criterion (unwrap DDP if needed)
            crit = criterion.module if isinstance(criterion, DDP) else criterion
            loss, details = crit.compute_losses(student_tokens, teacher_layers, teacher_final)
            
            # Scale loss by accumulation steps for gradient accumulation
            loss = loss / args.accumulation_steps
            loss.backward()
            
            accumulation_counter += 1
            
            # Only update weights after accumulating gradients for accumulation_steps
            if accumulation_counter % args.accumulation_steps == 0 or batch_idx == len(dataloader) - 1:
                optimizer.step()
                optimizer.zero_grad()
                accumulation_counter = 0

            running_loss += loss.item() * args.accumulation_steps  # Unscale for logging
            epoch_loss_total += loss.item() * args.accumulation_steps
            
            # Only increment global_step when we actually update weights
            if accumulation_counter == 0:
                global_step += 1
            # Only increment global_step when we actually update weights
            if accumulation_counter == 0:
                global_step += 1

            if writer is not None and is_main_process and accumulation_counter == 0:
                writer.add_scalar("train/loss", loss.item() * args.accumulation_steps, global_step)
                writer.add_scalar("train/lr", float(optimizer.param_groups[0].get("lr", 0.0)), global_step)
                # Component-wise losses every log interval to keep event size modest
                if global_step % args.log_every == 0:
                    layers = details.get("layers", {})
                    if isinstance(layers, dict):
                        for name, val in layers.items():
                            writer.add_scalar(f"train/loss_layer/{name}", float(val), global_step)

            if is_main_process and accumulation_counter == 0 and global_step % args.log_every == 0:
                avg_loss = running_loss / args.log_every
                progress.set_postfix({"loss": f"{avg_loss:.4f}"})
                running_loss = 0.0

        epoch_loss = epoch_loss_total / max(len(dataloader), 1)
        epoch_losses.append(float(epoch_loss))
        if writer is not None and is_main_process:
            writer.add_scalar("train/epoch_loss", epoch_loss, epoch + 1)
        
        # Step learning rate scheduler (after warmup)
        if scheduler is not None and epoch >= args.warmup_epochs:
            scheduler.step()
            if is_main_process:
                current_lr = optimizer.param_groups[0]['lr']
                logger.info(f"Epoch {epoch+1}/{args.epochs} - LR: {current_lr:.6f}")

        # Only save on main process
        if is_main_process:
            # Save unwrapped model state
            model = student.module if isinstance(student, DDP) else student
            crit = criterion.module if isinstance(criterion, DDP) else criterion
            
            # Save full checkpoint with all training state
            checkpoint = {
                "model": model.state_dict(),
                "criterion": crit.state_dict(),
                "optimizer": optimizer.state_dict(),
                "epoch": epoch,
                "global_step": global_step,
                "epoch_losses": epoch_losses,
                "args": vars(args),
            }
            # Save scheduler state if exists
            if scheduler is not None:
                checkpoint["scheduler"] = scheduler.state_dict()
            
            torch.save(checkpoint, args.save_path)
            logger.info(f"Saved checkpoint to {args.save_path} (epoch {epoch+1}/{args.epochs})")

    # Optionally save a simple epoch-loss plot to the log directory
    if is_main_process and args.log_dir and args.save_epoch_plot and len(epoch_losses) > 0:
        try:
            import matplotlib.pyplot as plt  # type: ignore

            os.makedirs(args.log_dir, exist_ok=True)
            plt.figure(figsize=(5, 3))
            plt.plot(range(1, len(epoch_losses) + 1), epoch_losses, marker="o")
            plt.xlabel("Epoch")
            plt.ylabel("Loss")
            plt.title("MiniVGGT Distillation - Epoch Loss")
            plt.grid(True, alpha=0.3)
            out_path = os.path.join(args.log_dir, "loss_epoch.png")
            plt.tight_layout()
            plt.savefig(out_path, dpi=150)
            plt.close()
            logger.info("Saved epoch loss plot to %s", out_path)
        except Exception as e:  # pragma: no cover - plotting is optional
            logger.warning("Could not save loss plot: %s", e)

    if writer is not None and is_main_process:
        writer.close()
    
    # Clean up distributed training
    if local_rank != -1:
        dist.destroy_process_group()


if __name__ == "__main__":
    train()
