from dataset.cholec50 import NormalCholec50
from model.mtfist import MTFiST
from torch.utils.data import DataLoader
from torchvision.transforms import transforms
import pathlib
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint
import time
import glob
import os


class TimeBasedCheckpoint(pl.Callback):
    def __init__(self, save_interval=1800, max_checkpoints=5):
        super().__init__()
        self.save_interval = save_interval
        self.max_checkpoints = max_checkpoints
        self.last_save_time = time.time()
        self.checkpoint_dir = "/opt/data/private/code/george-reimplement/medical/MTFiST/checkpoints"

        # 确保保存目录存在
        os.makedirs(self.checkpoint_dir, exist_ok=True)

    def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx):
        current_time = time.time()
        # 每间隔save_interval时间、每500batch都保存一次
        if (current_time - self.last_save_time >= self.save_interval) or (batch_idx % 500 == 0):
            checkpoint_path = os.path.join(self.checkpoint_dir, f"checkpoint_epoch_{trainer.current_epoch}_batch_{batch_idx}.ckpt")
            trainer.save_checkpoint(checkpoint_path)
            self.last_save_time = current_time
            
            # 管理 checkpoint 文件
            self.manage_checkpoints()

    def manage_checkpoints(self):
        # 获取所有 checkpoint 文件
        checkpoints = glob.glob(os.path.join(self.checkpoint_dir, "*.ckpt"))
        # 按照创建时间排序，保留最新的文件
        checkpoints.sort(key=os.path.getctime)

        # 删除旧的 checkpoint
        while len(checkpoints) > self.max_checkpoints:
            os.remove(checkpoints[0])
            checkpoints.pop(0)

# 保存mAP最大的
map_checkpoint_callback = ModelCheckpoint(
    monitor="mAP_ivt",
    mode="max",
    save_top_k=1,
    filename="best-model-{epoch:02d}-{mAP_ivt:.2f}",
    dirpath="/opt/data/private/code/george-reimplement/medical/MTFiST"
)

folds_video_ids = {
    1: [79, 2, 51, 6, 25, 14, 66, 23, 50, 111],
    2: [80, 32, 5, 15, 40, 47, 26, 48, 70, 96],
    3: [31, 57, 36, 18, 52, 68, 10, 8, 73, 103],
    4: [42, 29, 60, 27, 65, 75, 22, 49, 12, 110],
    5: [78, 43, 62, 35, 74, 1, 56, 4, 13, 92]
}
train_val = []
test_fold = 1
for fold, video_list in folds_video_ids.items():
    if fold != test_fold:
        train_val += video_list.copy()
    else:
        test_videos = video_list

train_videos = train_val[:-5]
val_videos = train_val[-5:]


train_dataset = NormalCholec50(
    pathlib.Path("/opt/data/private/dataset/CholecT50"),
    transforms.Compose([
        transforms.Resize((250, 250)),
        transforms.RandomCrop(224),
        transforms.ToTensor(),
        transforms.Normalize([0.44893518,0.3226702,0.34424525],[0.22357443,0.18503027,0.1900281])
    ]),
    train_videos
)
train_loader = DataLoader(train_dataset, batch_size=320, num_workers=8, drop_last=True)

val_dataset = NormalCholec50(
    pathlib.Path("/opt/data/private/dataset/CholecT50"),
    transforms.Compose([
        transforms.Resize((250, 250)),
        transforms.RandomCrop(224),
        transforms.ToTensor(),
        transforms.Normalize([0.44893518,0.3226702,0.34424525],[0.22357443,0.18503027,0.1900281])
    ]),
    val_videos
)
val_loader = DataLoader(val_dataset, batch_size=320, num_workers=8, drop_last=True)

trainer = pl.Trainer(accelerator="gpu", max_epochs=200, devices=1, callbacks=[map_checkpoint_callback, TimeBasedCheckpoint()])
model = MTFiST()
trainer.fit(model, train_loader, val_loader)
