from dataset import cholec80
import pathlib
from torch.utils.data import Subset, DataLoader
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint
import torch
from models.dacat import TemporalCNN, CNNBackbone, LitDACAT
from typing import List, Tuple
import glob, time, os

# 保存最佳性能ckpt文件与中间结果备份的路径
best_checkpoint_savepath = pathlib.Path.home() / "DACAT_best_ckpts"
cache_checkpoint_savepath = pathlib.Path.home() / "DACAT_cache_ckpts"

# 用以保存达到最小损失的ckpt
checkpoint_callback = ModelCheckpoint(
    monitor='val_avg_loss',
    mode='min',
    save_top_k=1,
    filename='best-model-{epoch:02d}-{val_avg_loss:.2f}-{val_avg_acc:.2f}',
    dirpath="/opt/data/private/code/george-reimplement/medical/DACAT"
)
# 用以保存达到最高准确率的ckpt
checkpoint_callback2 = ModelCheckpoint(
    monitor='val_avg_acc',
    mode='max',
    save_top_k=1,
    filename='best-model-{epoch:02d}-{val_avg_loss:.2f}-{val_avg_acc:.2f}',
    dirpath="/opt/data/private/code/george-reimplement/medical/DACAT"
)

# 用以定时备份保存ckpt的Callback
class TimeBasedCheckpoint(pl.Callback):
    def __init__(self, save_interval=1800, max_checkpoints=5):
        super().__init__()
        self.save_interval = save_interval
        self.max_checkpoints = max_checkpoints
        self.last_save_time = time.time()
        self.checkpoint_dir = "/opt/data/private/code/george-reimplement/medical/DACAT/checkpoints"

        # 确保保存目录存在
        os.makedirs(self.checkpoint_dir, exist_ok=True)

    def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx):
        current_time = time.time()
        if (current_time - self.last_save_time >= self.save_interval) or (batch_idx % 1000 == 0):
            checkpoint_path = os.path.join(self.checkpoint_dir, f"checkpoint_epoch_{trainer.current_epoch}_batch_{batch_idx}.ckpt")
            trainer.save_checkpoint(checkpoint_path)
            self.last_save_time = current_time
            
            # 管理 checkpoint 文件
            self.manage_checkpoints()

    def manage_checkpoints(self):
        # 获取所有 checkpoint 文件
        checkpoints = glob.glob(os.path.join(self.checkpoint_dir, "*.ckpt"))
        # 按照创建时间排序，保留最新的文件
        checkpoints.sort(key=os.path.getctime)

        # 删除旧的 checkpoint
        while len(checkpoints) > self.max_checkpoints:
            os.remove(checkpoints[0])
            checkpoints.pop(0)

def convert_to_single_batch(batch: List[torch.Tensor]) -> Tuple[torch.Tensor]:
    """将数据转化为单batch的形式
    
    将batch_size调整为1，即将所有数据都堆叠到序列长度上，形成了batch_size=1形式的数据，此处记batch_size=len(batch)=B
    
    每个batch中应该至少有视频数据和ground truth两个元素，视频数据元素尺寸为 (seq_len, C, H, W)；
    ground truth的尺寸为 (seq_len, 1)
    
    记 T = seq_len·B

    Args:
        batch (List): 从Dataset中读取到的原始数据列表，列表的每个元素均为一个batch

    Returns:
        Tuple[torch.Tensor]: 两个元素，第一个为 (1, T, C, H, W) 的视频数据，第二个为 (1, T, 1) 的ground truth
    """
    # (B, seq_len, C, H, W)
    data = torch.stack([b[0] for b in batch])
    _, _, C, H, W = data.shape
    # (T, C, H, W)
    data = data.transpose(0, 1).view(-1, C, H, W).contiguous()
    
    # (B, seq_len, 1)
    target = torch.stack([b[1] for b in batch])
    # (T, 1)
    target = target.transpose(0, 1).view(-1, 1).contiguous()

    # (1, T, C, H, W) (1, T, 1)
    return data.unsqueeze(0), target.unsqueeze(0)

# 数据集解压后的根目录
dataset_root = pathlib.Path.home() / "dataset/cholec80"
dataset = cholec80.Cholec80(dataset_root, seq_len=1)
train_size = int(0.5 * len(dataset))
val_size = int(0.1 * len(dataset))
test_size = len(dataset) - train_size - val_size

train_dataset = Subset(dataset, range(0, train_size))
val_dataset = Subset(dataset, range(train_size, train_size+val_size))
test_dataset = Subset(dataset, range(train_size+val_size, len(dataset)))


# -------  训练特征提取部分  -------
# train_loader = DataLoader(train_dataset, batch_size=256, num_workers=8, collate_fn=convert_to_single_batch)
# val_loader = DataLoader(val_dataset, batch_size=256, num_workers=8, collate_fn=convert_to_single_batch)
# trainer = pl.Trainer(accelerator="gpu", max_epochs=200, callbacks=[checkpoint_callback, TimeBasedCheckpoint(), checkpoint_callback2])
# model = LitDACAT(LitDACAT.Mode.EXTRACTOR, CNNBackbone.BackboneType.ConvNeXtv2, 7, TemporalCNN.HeadType.LSTM, 7, 64, freeze_cnn=True, lr=1e-4)
# trainer.fit(model, train_loader, val_loader)

# -------  训练原代码实现中的long_short预测部分 -------
train_loader = DataLoader(train_dataset, batch_size=48, num_workers=8, collate_fn=convert_to_single_batch)
val_loader = DataLoader(val_dataset, batch_size=48, num_workers=8, collate_fn=convert_to_single_batch)
trainer = pl.Trainer(accelerator="gpu", max_epochs=200, callbacks=[checkpoint_callback, TimeBasedCheckpoint(), checkpoint_callback2])
# model = LitDACAT(LitDACAT.Mode.PREDICTOR, CNNBackbone.BackboneType.ConvNeXtv2, 7, TemporalCNN.HeadType.LSTM, 7, 64, freeze_cnn=True)
# 特征提取器训练好的权重ckpt的路径
weight_savepath = pathlib.Path.home() / "extractor_best.ckpt"
model = LitDACAT.load_from_checkpoint(weight_savepath.absolute().as_posix(), mode=LitDACAT.Mode.PREDICTOR, lr=1e-5)
trainer.fit(model, train_loader, val_loader)
