from models.surgformer import LitSurgFormer
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint
from dataset.autolaparo import AutoLaparo
from torch.utils.data import Subset, DataLoader
import pathlib
from typing import List
import torch
import os
import time
import glob


class TimeBasedCheckpoint(pl.Callback):
    """定时保存检查点
    """
    def __init__(self, save_dir: pathlib.Path, save_interval=1800, max_checkpoints=5):
        super().__init__()
        self.save_interval = save_interval
        self.max_checkpoints = max_checkpoints
        self.last_save_time = time.time()
        self.checkpoint_dir = save_dir

        # 确保保存目录存在
        os.makedirs(self.checkpoint_dir, exist_ok=True)

    def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx):
        current_time = time.time()
        if (current_time - self.last_save_time >= self.save_interval) or (batch_idx % 1000 == 0):
            checkpoint_path = os.path.join(self.checkpoint_dir, f"checkpoint_epoch_{trainer.current_epoch}_batch_{batch_idx}.ckpt")
            trainer.save_checkpoint(checkpoint_path)
            self.last_save_time = current_time
            
            # 管理 checkpoint 文件
            self.manage_checkpoints()

    def manage_checkpoints(self):
        # 获取所有 checkpoint 文件
        checkpoints = glob.glob(os.path.join(self.checkpoint_dir, "*.ckpt"))
        # 按照创建时间排序，保留最新的文件
        checkpoints.sort(key=os.path.getctime)

        # 删除旧的 checkpoint
        while len(checkpoints) > self.max_checkpoints:
            os.remove(checkpoints[0])
            checkpoints.pop(0)


def convert_to_channel_first(batch: List[torch.Tensor]):
    imgs = [b[0].transpose(0, 1) for b in batch]
    labels = [b[1] for b in batch]
    
    # (B, C, T, H, W)
    imgs = torch.stack(imgs)
    # (B,)
    labels = torch.stack(labels)
    
    return imgs, labels


dataset_root = pathlib.Path("/path/to/autolaparo")
dataset = AutoLaparo(dataset_root, resize=(540, 960))

# 训练、验证与测试的视频数分别为10、4、7
train_size = sum(AutoLaparo.NUM_FRAMES_VIDEO[:10])
val_size = sum(AutoLaparo.NUM_FRAMES_VIDEO[10:14])
test_size = len(dataset) - train_size - val_size

train_dataset = Subset(dataset, range(0, train_size))
val_dataset = Subset(dataset, range(train_size, train_size+val_size))
test_dataset = Subset(dataset, range(train_size+val_size, len(dataset)))

train_loader = DataLoader(train_dataset, batch_size=2, num_workers=8, collate_fn=convert_to_channel_first)
val_loader = DataLoader(val_dataset, batch_size=2, num_workers=8, collate_fn=convert_to_channel_first)

best_acc_callback = ModelCheckpoint(
    dirpath=pathlib.Path("/opt/data/private/code/george-reimplement/medical/SurgFormer"),
    filename="best_model_epoch{epoch}_acc{val_avg_acc:.2f}_loss{val_avg_loss:.2f}",
    monitor="val_avg_acc",
    mode="max",
    verbose=True,
    save_top_k=1
)
best_loss_callback = ModelCheckpoint(
    dirpath=pathlib.Path("/opt/data/private/code/george-reimplement/medical/SurgFormer"),
    filename="best_model_epoch{epoch}_acc{val_avg_acc:.2f}_loss{val_avg_loss:.2f}",
    monitor="val_avg_loss",
    mode="min",
    verbose=True,
    save_top_k=1
)
backup_callback = TimeBasedCheckpoint(
    save_dir=pathlib.Path("/opt/data/private/code/george-reimplement/medical/SurgFormer/checkpoints"),
    save_interval=900
)

trainer = pl.Trainer(accelerator="gpu", max_epochs=50, callbacks=[best_acc_callback, best_loss_callback, backup_callback])
model = LitSurgFormer(
    img_size=(540, 960), num_heads=8
)
trainer.fit(model, train_loader, val_loader)
