import argparse

import sys
import os

# 获取当前脚本（distill_train.py）所在的目录（train/）
train_dir = os.path.dirname(os.path.abspath(__file__))
# 向上跳两级：train/ → Distill/ → vggt/（即项目根目录）
project_root = os.path.dirname(os.path.dirname(train_dir))
# 将项目根目录加入 Python 搜索路径
sys.path.append(project_root)
from typing import Any, Dict

import torch
from torch.utils.data import DataLoader

from Distill.dataset.distill_dataset import DistillCacheDataset
from Distill.loss.distill_loss import DistillationLoss


def collate_fn(batch):
    # simple collate that returns list of samples; training loop will handle stacking
    return batch


def build_dataloader(cache_dir: str, frame_layers: list, global_layers: list, batch_size: int = 4, num_workers: int = 2):
    ds = DistillCacheDataset(cache_dir, frame_layers=frame_layers, global_layers=global_layers)
    return DataLoader(ds, batch_size=batch_size, num_workers=num_workers, collate_fn=collate_fn)


def run_one_epoch(dataloader, student_model, teacher_store_dir: str, loss_fn: DistillationLoss, optimizer=None, device="cuda"):
    student_model.train()
    total_loss = 0.0
    for batch in dataloader:
        # batch is list of samples
        # For simplicity, we'll iterate samples individually and accumulate losses
        optimizer.zero_grad()
        batch_loss = 0.0
        for sample in batch:
            # load teacher cached outputs corresponding to sample
            teacher_prefix = sample["prefix"]
            # teacher outputs are present in the sample dict already
            teacher_outputs = {
                "frame_attn": sample.get("frame_attn", {}),
                "global_attn": sample.get("global_attn", {}),
                "depth": sample.get("depth"),
                "points": sample.get("points"),
            }

            # Prepare student inputs (image batch of 1)
            image = sample.get("image")
            if image is None:
                # cannot run student model without images; skip
                continue
            image = image.unsqueeze(0).to(device)

            student_outputs = student_model(image, return_intermediate=True)

            loss_dict = loss_fn(student_outputs, teacher_outputs)
            loss = loss_dict["objective"]
            loss.backward()
            batch_loss += loss.item()

        if optimizer is not None:
            optimizer.step()
        total_loss += batch_loss
    return total_loss


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--cache_dir", required=True)
    parser.add_argument("--layers", nargs="+", type=int, default=[0, 1, 2, 3])
    parser.add_argument("--batch", type=int, default=2)
    args = parser.parse_args()

    dataloader = build_dataloader(args.cache_dir, args.layers, args.layers, batch_size=args.batch)

    # import student model
    from Distill.vggt_distilled.model import DistilledVGGT
    student = DistilledVGGT()
    device = "cuda" if torch.cuda.is_available() else "cpu"
    student.to(device)

    loss_fn = DistillationLoss(frame_layers=args.layers, global_layers=args.layers)

    optimizer = torch.optim.AdamW(student.parameters(), lr=5e-5)

    run_one_epoch(dataloader, student, args.cache_dir, loss_fn, optimizer=optimizer, device=device)


if __name__ == "__main__":
    main()
