"""Train MiniVGGT-GQA student with teacher DPT depth supervision plus camera and point cloud distillation.

Usage (single GPU example):
CUDA_VISIBLE_DEVICES=0 python /data0/liqifeng/ZYC/vggt/mini_vggt/Distill/train_mini_depth_pcd.py \
  --outputs-root /data1/datasets/VGGT_outputs/libero_10 \
  --teacher-checkpoint /data0/liqifeng/ZYC/model.pt \
  --student-pretrained /data0/liqifeng/ZYC/mini_vggt_distilled.pt \
  --epochs 20 --lr 1e-5 --batch-size 1 --loss-weight-depth 1.0 --loss-weight-camera 0.1 --loss-weight-pcd 0.5


  CUDA_VISIBLE_DEVICES=0 conda activate vggt
python /data0/liqifeng/ZYC/vggt/mini_vggt/Distill/train_mini_depth_pcd.py \
  --outputs-root /data1/datasets/VGGT_outputs/libero_10 \
  --teacher-checkpoint /data0/liqifeng/ZYC/model.pt \
  --smoke-test

CUDA_VISIBLE_DEVICES=0 conda activate vggt
python /data0/liqifeng/ZYC/vggt/mini_vggt/Distill/train_mini_depth_pcd.py \
  --outputs-root /data1/datasets/VGGT_outputs/libero_10 \
  --teacher-checkpoint /data0/liqifeng/ZYC/model.pt \
  --student-pretrained /data0/liqifeng/ZYC/mini_vggt_distilled.pt \
  --epochs 20 --lr 1e-5 --batch-size 1 \
  --loss-weight-depth 1.0 --loss-weight-camera 0.1 --loss-weight-pcd 0.5 \
  --save-path /data0/liqifeng/ZYC/vggt/mini_vggt/Distill/mini_vggt_depth_pcd.pt

The dataset directory is produced by `save_outputs.py` and contains structure:
  outputs_root/suite/scene/sample_xxxx/iter_0000/outputs.npz & metadata.json
We re-load the original image paths from metadata for student forward pass.

Losses:
    - DepthLoss: L1 on valid pixels between student depth predicted by teacher DPT depth_head and teacher stored depth.
    - PointCloudLoss: MSE between student world_points predicted by teacher DPT point_head and teacher stored world_points.
    - CameraLoss: MSE between student camera prediction from teacher camera_head and teacher stored pose_enc (last iteration).

Notes & Assumptions:
  - Teacher NPZ must contain keys: depth, world_points, pose_enc (or pose_enc_list fallback).
  - world_points shape expected: (S, H, W, 3) or (1, S, H, W, 3).
  - depth shape expected: (S, H, W, 1) or (S, H, W) or with leading batch dim.
    - pose_enc shape expected: (S, 9) (absT_quaR_FoV) 或其它兼容长度 (6/7/8/9)，脚本会直接与 camera_head 最终迭代输出对齐。
  - This script focuses on feature / geometry distillation; no ground-truth labels required.

"""
from __future__ import annotations

import argparse
import json
import logging
import os
import random
from dataclasses import dataclass
from pathlib import Path
from typing import List, Dict, Tuple

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data.distributed import DistributedSampler
from tqdm.auto import tqdm

# Repo root inference
REPO_ROOT = Path(__file__).resolve().parents[2]
if str(REPO_ROOT) not in os.sys.path:
    os.sys.path.insert(0, str(REPO_ROOT))

from mini_vggt.Distill.vggt_mini_gqa import MiniVGGTGQA, MiniVGGTGQAConfig
from vggt.models.vggt import VGGT
from vggt.utils.load_fn import load_and_preprocess_images

logger = logging.getLogger(__name__)

# ----------------------------------------------------------------------------
# Dataset
# ----------------------------------------------------------------------------
@dataclass
class OutputSample:
    npz_path: str
    metadata_path: str
    suite: str
    scene: str
    sample: str
    iteration: int

class SavedOutputsDataset(Dataset):
    """Dataset that loads teacher outputs (NPZ) and associated images.

    Each item returns a dict with: images(Tensor S,3,H,W), teacher_depth, teacher_world_points,
    teacher_pose_enc (S,7), plus meta info.
    """
    def __init__(self, outputs_root: str, preprocess_mode: str = "crop", min_images: int = 3, max_images: int = 8):
        self.outputs_root = outputs_root
        self.preprocess_mode = preprocess_mode
        self.min_images = min_images
        self.max_images = max_images
        self.samples: List[OutputSample] = self._collect()
        if len(self.samples) == 0:
            raise RuntimeError(f"No NPZ outputs found under {outputs_root}")
        logger.info(f"Collected {len(self.samples)} NPZ iterations for training")

    def _collect(self) -> List[OutputSample]:
        items: List[OutputSample] = []
        root = Path(self.outputs_root)
        if not root.is_dir():
            return items

        # Recursively find all outputs.npz files to be robust to different folder layouts
        for npz_path in sorted(root.rglob("outputs.npz")):
            try:
                iter_dir = npz_path.parent
                # metadata might be in same folder
                meta_path = iter_dir / "metadata.json"
                if not meta_path.exists():
                    # try parent folder
                    meta_path = iter_dir.parent / "metadata.json"
                if not meta_path.exists():
                    # skip if metadata missing
                    continue

                # infer suite/scene/sample from path parts if possible
                parts = iter_dir.parts
                # default values
                suite = parts[-4] if len(parts) >= 4 else ""
                scene = parts[-3] if len(parts) >= 3 else ""
                sample = parts[-2] if len(parts) >= 2 else ""
                # iteration index from iter_dir name
                try:
                    iteration = int(iter_dir.name.split("_")[-1])
                except Exception:
                    iteration = -1

                items.append(OutputSample(str(npz_path), str(meta_path), suite, scene, sample, iteration))
            except Exception:
                continue

        return items

    def __len__(self) -> int:
        return len(self.samples)

    def __getitem__(self, idx: int) -> Dict:
        item = self.samples[idx]
        # Load metadata
        with open(item.metadata_path, "r", encoding="utf-8") as f:
            meta = json.load(f)
        image_paths: List[str] = meta.get("image_paths", [])
        if not image_paths:
            raise RuntimeError(f"Metadata missing image_paths: {item.metadata_path}")
        # Reload images (student needs original RGB)
        images = load_and_preprocess_images(image_paths, mode=self.preprocess_mode)
        # Load teacher outputs
        npz = np.load(item.npz_path)
        # Depth
        depth = None
        for k in ["depth", "pred_depth", "teacher_depth"]:
            if k in npz:
                depth = npz[k]
                break
        if depth is None:
            raise RuntimeError(f"Depth not found in {item.npz_path}")
        # world points
        wp = None
        for k in ["world_points", "points", "world" ]:
            if k in npz:
                wp = npz[k]
                break
        if wp is None:
            raise RuntimeError(f"world_points not found in {item.npz_path}")
        # pose enc
        pose_enc = None
        for k in ["pose_enc", "camera_pose", "poses"]:
            if k in npz:
                pose_enc = npz[k]
                break
        if pose_enc is None:
            # fallback try list items
            pose_list = [v for key, v in npz.items() if key.startswith("pose_enc_list_item_")]
            if pose_list:
                # take last or first
                pose_enc = pose_list[-1]
        if pose_enc is None:
            raise RuntimeError(f"pose_enc not found in {item.npz_path}")

        # Normalize shapes: Expect (S,H,W,1) depth or (S,H,W)
        if depth.ndim == 5 and depth.shape[0] == 1:
            depth = depth[0]
        if depth.ndim == 4 and depth.shape[-1] == 1:
            depth = depth[...,0]
        # Depth now (S,H,W)

        # World points shape (S,H,W,3)
        # World points shape: accept several variants produced by teacher
        # Common valid shapes we handle:
        #  - (S, H, W, 3)
        #  - (1, S, H, W, 3)
        #  - (S, 1, H, W, 3) -> squeeze
        #  - (1, S, 1, H, W, 3) -> reduce
        if wp.ndim == 6 and wp.shape[0] == 1:
            # (1, S, 1, H, W, 3) or similar -> remove leading batch
            wp = wp[0]

        if wp.ndim == 5:
            # If leading batch dim present (1, S, H, W, 3) -> remove
            if wp.shape[0] == 1:
                wp = wp[0]

            # If an extra singleton channel dim exists before the last (S,1,H,W,3) -> squeeze it
            if wp.ndim == 5 and wp.shape[-2] == 1:
                wp = wp.squeeze(-2)

        if wp.ndim != 4 or wp.shape[-1] != 3:
            raise RuntimeError(f"Unexpected world_points shape {wp.shape} in {item.npz_path}")

        # Pose enc shape (S,7)
        if pose_enc.ndim == 3 and pose_enc.shape[0] == 1:
            pose_enc = pose_enc[0]
        if pose_enc.ndim == 2 and pose_enc.shape[-1] in (6,7,8,9):
            pass
        else:
            raise RuntimeError(f"Unexpected pose_enc shape {pose_enc.shape} in {item.npz_path}")

        return {
            "images": images,  # Tensor (S,3,H,W)
            "teacher_depth": torch.from_numpy(depth).float(),  # (S,H,W)
            "teacher_world_points": torch.from_numpy(wp).float(),  # (S,H,W,3)
            "teacher_pose_enc": torch.from_numpy(pose_enc).float(),  # (S,7)
            "meta": meta,
            "npz_path": item.npz_path,
        }

# ----------------------------------------------------------------------------
# Losses
# ----------------------------------------------------------------------------
class DepthLoss(nn.Module):
    def __init__(self, weight: float = 1.0):
        super().__init__()
        self.weight = weight
    def forward(self, pred: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
        # pred: (B,S,H,W,1) or (B,S,H,W)
        # target: (B,S,H,W) from DataLoader
        if pred.dim() == 5 and pred.shape[-1] == 1:
            pred = pred.squeeze(-1)
        # Ensure target has same number of dims as pred
        if target.dim() == 3:
            # (S,H,W) -> add batch dim
            target = target.unsqueeze(0)
        if pred.shape != target.shape:
            raise ValueError(f"Depth shape mismatch pred {pred.shape} vs target {target.shape}")
        valid = target > 0
        if not valid.any():
            return pred.new_tensor(0.0)
        loss = F.l1_loss(pred[valid], target[valid])
        return loss * self.weight

class PointCloudLoss(nn.Module):
    """Point cloud MSE between predicted 3D points and teacher stored world points.

    Expects pred_pts3d: (B,S,H,W,3), teacher_world: (B,S,H,W,3) from DataLoader.
    Masks invalid teacher points where norm==0.
    """
    def __init__(self, weight: float = 1.0):
        super().__init__()
        self.weight = weight
    def forward(self, pred_pts3d: torch.Tensor, teacher_world: torch.Tensor) -> torch.Tensor:
        if pred_pts3d.dim() != 5 or pred_pts3d.shape[-1] != 3:
            raise ValueError(f"pred_pts3d shape expected (B,S,H,W,3), got {pred_pts3d.shape}")
        B, S, H, W, _ = pred_pts3d.shape
        # Handle both (S,H,W,3) and (B,S,H,W,3) for teacher_world
        if teacher_world.dim() == 4 and teacher_world.shape[-1] == 3:
            # (S,H,W,3) -> add batch dim
            teacher_world = teacher_world.unsqueeze(0)
        if teacher_world.shape != pred_pts3d.shape:
            raise ValueError(f"Teacher world shape {teacher_world.shape} mismatch pred {pred_pts3d.shape}")
        valid = teacher_world.norm(dim=-1) > 0
        if not valid.any():
            return pred_pts3d.new_tensor(0.0)
        loss = F.mse_loss(pred_pts3d[valid], teacher_world[valid])
        return loss * self.weight

class CameraLoss(nn.Module):
    def __init__(self, weight: float = 1.0):
        super().__init__()
        self.weight = weight
    def forward(self, pred_pose_enc: torch.Tensor, teacher_pose_enc: torch.Tensor) -> torch.Tensor:
        # pred_pose_enc: (B,S,D), teacher_pose_enc: (B,S,D) from DataLoader or (S,D)
        if teacher_pose_enc.ndim == 2:
            # (S,D) -> add batch dim
            teacher_pose_enc = teacher_pose_enc.unsqueeze(0)
        if teacher_pose_enc.shape != pred_pose_enc.shape:
            raise ValueError(f"Camera pose shape mismatch: pred {pred_pose_enc.shape} vs teacher {teacher_pose_enc.shape}")
        return F.mse_loss(pred_pose_enc, teacher_pose_enc) * self.weight

# ----------------------------------------------------------------------------
# Teacher depth head loading
# ----------------------------------------------------------------------------

def load_teacher_heads(checkpoint: str, device: torch.device):
    teacher = VGGT()
    ckpt = torch.load(checkpoint, map_location="cpu")
    state_dict = ckpt.get("model", ckpt)
    teacher.load_state_dict(state_dict, strict=False)
    depth_head = teacher.depth_head
    cam_head = teacher.camera_head
    point_head = teacher.point_head
    if depth_head is None or cam_head is None or point_head is None:
        raise ValueError("Teacher checkpoint missing required heads (depth/camera/point)")
    for mod in (depth_head, cam_head, point_head):
        for p in mod.parameters():
            p.requires_grad_(False)
        mod.eval().to(device)
    return depth_head, cam_head, point_head

# ----------------------------------------------------------------------------
# Arg parsing
# ----------------------------------------------------------------------------

def parse_args():
    ap = argparse.ArgumentParser(description="Train MiniVGGT-GQA with camera+depth+pcd distillation from saved teacher outputs")
    ap.add_argument("--outputs-root", required=True, help="Root of saved teacher outputs (VGGT_outputs/...)")
    ap.add_argument("--teacher-checkpoint", required=True, help="Teacher VGGT checkpoint for depth head")
    ap.add_argument("--student-pretrained", required=False, help="Optional pretrained student checkpoint")
    ap.add_argument("--epochs", type=int, default=20)
    ap.add_argument("--lr", type=float, default=1e-5)
    ap.add_argument("--batch-size", type=int, default=1)
    ap.add_argument("--num-workers", type=int, default=4)
    ap.add_argument("--device", default="cuda:0")
    ap.add_argument("--loss-weight-depth", type=float, default=1.0)
    ap.add_argument("--loss-weight-camera", type=float, default=0.1)
    ap.add_argument("--loss-weight-pcd", type=float, default=0.5)
    ap.add_argument("--grad-clip", type=float, default=1.0)
    ap.add_argument("--log-every", type=int, default=20)
    ap.add_argument("--save-path", default="/data0/liqifeng/ZYC/vggt/mini_vggt/Distill/mini_vggt_depth_pcd.pt")
    ap.add_argument("--seed", type=int, default=2025)
    ap.add_argument("--local_rank", type=int, default=-1)
    ap.add_argument("--preprocess-mode", choices=["crop","pad"], default="crop")
    ap.add_argument("--smoke-test", action="store_true", help="Run one forward pass and exit")
    ap.add_argument("--freeze-projector", action="store_true", help="Freeze projector weights (if using pretrained mapping)")
    return ap.parse_args()

# ----------------------------------------------------------------------------
# Training loop
# ----------------------------------------------------------------------------

def setup_logging():
    logging.basicConfig(level=logging.INFO, format="%(asctime)s | %(levelname)s | %(message)s")


def train():
    args = parse_args()
    setup_logging()

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)

    # Distributed setup
    if args.local_rank != -1:
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(backend="nccl")
        device = torch.device(f"cuda:{args.local_rank}")
        world_size = torch.distributed.get_world_size()
        is_main = torch.distributed.get_rank() == 0
    else:
        device = torch.device(args.device if torch.cuda.is_available() else "cpu")
        world_size = 1
        is_main = True

    if args.batch_size != 1:
        raise ValueError("Current implementation supports batch_size=1")

    dataset = SavedOutputsDataset(outputs_root=args.outputs_root, preprocess_mode=args.preprocess_mode)

    if args.local_rank != -1:
        sampler = DistributedSampler(dataset, shuffle=True)
        dataloader = DataLoader(dataset, batch_size=args.batch_size, sampler=sampler, num_workers=args.num_workers, pin_memory=True)
    else:
        dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True)

    # Build student
    student = MiniVGGTGQA(MiniVGGTGQAConfig())
    if args.student_pretrained:
        ckpt = torch.load(args.student_pretrained, map_location="cpu")
        sd = ckpt.get("model", ckpt)
        missing, unexpected = student.load_state_dict(sd, strict=False)
        if is_main:
            logger.info(f"Loaded student pretrained: missing={len(missing)}, unexpected={len(unexpected)}")
    student.to(device)

    # Teacher heads
    depth_head, camera_head, point_head = load_teacher_heads(args.teacher_checkpoint, device)

    # Feature projector (student 2*embed_dim -> teacher 2048) placing at indices [4,11,17,23]
    class FeatureProjector(nn.Module):
        def __init__(self, student_dim: int, teacher_dim: int):
            super().__init__()
            self.layer_mapping = [4,11,17,23]
            self.projections = nn.ModuleList([nn.Linear(student_dim, teacher_dim) for _ in range(4)])
        def forward(self, student_tokens: List[torch.Tensor]) -> List[torch.Tensor]:
            # student_tokens: List[4] each (B,S,P,student_dim)
            B,S,P,Cs = student_tokens[0].shape
            teacher_tokens: List[torch.Tensor] = [None]*24
            for i,(tok,tid) in enumerate(zip(student_tokens,self.layer_mapping)):
                # Project patch dimension only (B,S,P,Cs)->(B,S,P,Ct)
                proj_tok = self.projections[i](tok)
                teacher_tokens[tid] = proj_tok
            # Fill dummies
            template = teacher_tokens[self.layer_mapping[0]]
            for i in range(24):
                if teacher_tokens[i] is None:
                    teacher_tokens[i] = torch.zeros_like(template)
            return teacher_tokens

    projector = FeatureProjector(student_dim=student.config.embed_dim*2, teacher_dim=2048).to(device)

    if args.freeze_projector:
        for p in projector.parameters():
            p.requires_grad_(False)
        if is_main:
            logger.info("Projector frozen")

    # Loss modules
    depth_loss_fn = DepthLoss(args.loss_weight_depth).to(device)
    camera_loss_fn = CameraLoss(weight=args.loss_weight_camera).to(device)
    pcd_loss_fn = PointCloudLoss(args.loss_weight_pcd).to(device)

    # Optimizer (all student params trainable for now)
    # Optimizer (student + projector trainable params)
    opt_params = list(student.parameters()) + [p for p in projector.parameters() if p.requires_grad]
    optimizer = torch.optim.AdamW(opt_params, lr=args.lr, weight_decay=0.01)

    if args.local_rank != -1:
        student = DDP(student, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=False)

    if args.smoke_test:
        batch = next(iter(dataloader))
        images = batch["images"].to(device)  # (B,S,3,H,W) after DataLoader
        # DataLoader already adds batch dim, images should be 5D: (B,S,3,H,W)
        assert images.dim() == 5, f"Expected 5D images (B,S,3,H,W), got {images.shape}"
        model_ref = student.module if isinstance(student, DDP) else student
        tokens_list, patch_start_idx = model_ref.forward_features(images)
        teacher_pose_enc = batch["teacher_pose_enc"].to(device)  # (B,S,D) from DataLoader
        with torch.no_grad():
            # Project tokens for DPT heads (24 list)
            teacher_tokens = projector(tokens_list)
            # Depth
            pred_depth, _ = depth_head(teacher_tokens, images=images, patch_start_idx=patch_start_idx)
            # batch items from DataLoader already include a leading batch dim, no extra unsqueeze
            d_loss = depth_loss_fn(pred_depth, batch["teacher_depth"].to(device))
            # Camera: project per-layer tokens to teacher dim and run camera head
            proj_tokens_list = [proj(tokens_list[i]) for i, proj in enumerate(projector.projections)]
            cam_pred_list = camera_head(proj_tokens_list)
            cam_pred = cam_pred_list[-1]
            c_loss = camera_loss_fn(cam_pred, teacher_pose_enc)
            # Point cloud: run point head to get pts3d
            pts3d, _ = point_head(teacher_tokens, images=images, patch_start_idx=patch_start_idx)
            p_loss = pcd_loss_fn(pts3d, batch["teacher_world_points"].to(device))
        if is_main:
            logger.info(f"Smoke test losses: depth={d_loss.item():.4f} camera={c_loss.item():.4f} pcd={p_loss.item():.4f}")
        return

    global_step = 0
    for epoch in range(args.epochs):
        if args.local_rank != -1:
            dataloader.sampler.set_epoch(epoch)
        (student.module if isinstance(student, DDP) else student).train()
        depth_head.eval(); camera_head.eval(); point_head.eval()
        running = 0.0
        progress = tqdm(dataloader, desc=f"Epoch {epoch+1}/{args.epochs}", disable=not is_main)
        for batch_idx, batch in enumerate(progress):
            images = batch["images"].to(device)  # (B,S,3,H,W) after DataLoader collate
            teacher_depth = batch["teacher_depth"].to(device)  # (B,S,H,W)
            teacher_world = batch["teacher_world_points"].to(device)  # (B,S,H,W,3)
            teacher_pose_enc = batch["teacher_pose_enc"].to(device)  # (B,S,D) where D in {6,7,8,9}

            # DataLoader already adds batch dim, no need to check/add
            assert images.dim() == 5, f"Expected 5D images (B,S,3,H,W), got {images.shape}"
            model_ref = student.module if isinstance(student, DDP) else student
            tokens_list, patch_start_idx = model_ref.forward_features(images)
            # Project to teacher token list for DPT heads
            teacher_tokens = projector(tokens_list)
            pred_depth, _ = depth_head(teacher_tokens, images=images, patch_start_idx=patch_start_idx)
            # Camera
            proj_tokens_list = [proj(tokens_list[i]) for i, proj in enumerate(projector.projections)]
            cam_pred_list = camera_head(proj_tokens_list)
            cam_pred = cam_pred_list[-1]
            # Point cloud
            pts3d, _ = point_head(teacher_tokens, images=images, patch_start_idx=patch_start_idx)

            # Compute losses
            # teacher_depth already has batch dim from DataLoader
            d_loss = depth_loss_fn(pred_depth, teacher_depth)
            c_loss = camera_loss_fn(cam_pred, teacher_pose_enc)
            p_loss = pcd_loss_fn(pts3d, teacher_world)
            total_loss = d_loss + c_loss + p_loss

            if torch.isnan(total_loss) or torch.isinf(total_loss):
                logger.warning(f"Invalid loss at step {global_step}")
                optimizer.zero_grad()
                continue

            total_loss.backward()
            if args.grad_clip and args.grad_clip > 0:
                # Clip gradients for all trainable params (student + projector)
                torch.nn.utils.clip_grad_norm_(opt_params, args.grad_clip)
            optimizer.step()
            optimizer.zero_grad()

            running += total_loss.item()
            global_step += 1
            if is_main and global_step % args.log_every == 0:
                avg = running / args.log_every
                progress.set_postfix({"loss": f"{avg:.4f}"})
                running = 0.0

        # Save checkpoint end of epoch
        if is_main:
            model_sd = (student.module if isinstance(student, DDP) else student).state_dict()
            torch.save({
                "model": model_sd,
                "epoch": epoch,
                "global_step": global_step,
                "args": vars(args),
            }, args.save_path)
            logger.info(f"Saved checkpoint to {args.save_path} (epoch {epoch+1})")

    logger.info("Training complete")

if __name__ == "__main__":
    train()
