# import argparse
# import random
# import torch
#
# from pytorch_lightning import Trainer, seed_everything
# from pytorch_lightning.callbacks import ModelCheckpoint
# from pytorch_lightning.loggers import WandbLogger
# from torch.utils.data import DataLoader
# from datasets import Custom_Collator, load_dataset
# from MemeCLIP import create_model, MemeCLIP
# from configs import cfg
# import os
# import torchmetrics
# from tqdm import tqdm
#
# torch.use_deterministic_algorithms(False)
#
# def main(cfg):
#
#     seed_everything(cfg.seed, workers=True)
#
#     dataset_train = load_dataset(cfg=cfg, split='train')
#     dataset_val = load_dataset(cfg=cfg, split='val')
#     dataset_test = load_dataset(cfg=cfg, split='test')
#
#     print("Number of training examples:", len(dataset_train))
#     print("Number of validation examples:", len(dataset_val))
#     print("Number of test examples:", len(dataset_test))
#
#
#     collator = Custom_Collator(cfg)
#
#     train_loader = DataLoader(dataset_train, batch_size=cfg.batch_size, shuffle=True,
#                                   collate_fn=collator, num_workers=0)
#     val_loader = DataLoader(dataset_test, batch_size=cfg.batch_size, collate_fn=collator, num_workers=0)
#     test_loader = DataLoader(dataset_test, batch_size=cfg.batch_size,
#                                  collate_fn=collator, num_workers=0)
#
#     model = create_model(cfg)
#
#     num_params = {f'param_{n}': p.numel() for n, p in model.named_parameters() if p.requires_grad}
#
#     monitor = "val/auroc"
#     checkpoint_callback = ModelCheckpoint(dirpath=cfg.checkpoint_path, filename='model',
#                                           monitor=monitor, mode='max', verbose=True, save_weights_only=True,
#                                           save_top_k=1, save_last=False)
#
#
#     trainer = Trainer(accelerator='gpu', devices=cfg.gpus, max_epochs=cfg.max_epochs, callbacks=[checkpoint_callback], deterministic=False)
#
#     if cfg.reproduce == False:
#
#         trainer.fit(model, train_dataloaders=train_loader, val_dataloaders=val_loader)
#
#     model = MemeCLIP.load_from_checkpoint(checkpoint_path = cfg.checkpoint_file, cfg = cfg)
#     trainer.test(model, dataloaders=test_loader)
#
# if __name__ == '__main__':
#       main(cfg)

import argparse
import random
import torch
import gc
import os

from pytorch_lightning import Trainer, seed_everything
from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping
from pytorch_lightning.loggers import TensorBoardLogger
from torch.utils.data import DataLoader
from datasets import Custom_Collator, load_dataset
from MemeCLIP import create_model, MemeCLIP
from configs import cfg
import torchmetrics
from tqdm import tqdm

# T1000优化设置
torch.use_deterministic_algorithms(False)
torch.backends.cudnn.benchmark = True  # 提升性能
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:128'  # 减少显存碎片


def print_gpu_memory():
    """打印GPU显存使用情况"""
    if torch.cuda.is_available():
        print(
            f"GPU Memory: {torch.cuda.memory_allocated() / 1024 ** 3:.2f}GB / {torch.cuda.memory_reserved() / 1024 ** 3:.2f}GB")


def main(cfg):
    print("=== MemeCLIP Training on T1000 GPU ===")
    print(f"Batch size: {cfg.batch_size}")
    print(f"CLIP variant: {cfg.clip_variant}")
    print(f"Image size: {cfg.image_size}")

    seed_everything(cfg.seed, workers=True)

    # 清理GPU缓存
    if torch.cuda.is_available():
        torch.cuda.empty_cache()
        gc.collect()

    # 加载数据集
    print("Loading datasets...")
    dataset_train = load_dataset(cfg=cfg, split='train')
    dataset_val = load_dataset(cfg=cfg, split='val')
    dataset_test = load_dataset(cfg=cfg, split='test')

    print("Number of training examples:", len(dataset_train))
    print("Number of validation examples:", len(dataset_val))
    print("Number of test examples:", len(dataset_test))

    # T1000优化的数据加载器
    collator = Custom_Collator(cfg)
    #
    # 优化的DataLoader设置
    dataloader_kwargs = {
        'batch_size': cfg.batch_size,
        'collate_fn': collator,
        'num_workers': 0,  # T1000适合的worker数量
        'pin_memory': True,  # 加速GPU传输
        'persistent_workers': False,  # 保持worker活跃
        'drop_last': True,  # 避免最后一个不完整batch
    }

    train_loader = DataLoader(dataset_train, shuffle=True, **dataloader_kwargs)
    val_loader = DataLoader(dataset_val, shuffle=False, **dataloader_kwargs)
    test_loader = DataLoader(dataset_test, shuffle=False, **dataloader_kwargs)

    print("Creating model...")
    model = create_model(cfg)

    # 计算可训练参数
    total_params = sum(p.numel() for p in model.parameters())
    trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    print(f"Total parameters: {total_params:,}")
    print(f"Trainable parameters: {trainable_params:,}")
    print(f"Trainable ratio: {trainable_params / total_params:.2%}")

    # 回调函数设置
    monitor = "val/auroc"
    checkpoint_callback = ModelCheckpoint(
        dirpath=cfg.checkpoint_path,
        filename='model_t1000_best',
        monitor=monitor,
        mode='max',
        verbose=True,
        save_weights_only=True,
        save_top_k=1,
        save_last=True  # 同时保存最后一个checkpoint
    )

    # 早停策略 - 防止过拟合
    early_stop_callback = EarlyStopping(
        monitor=monitor,
        min_delta=0.001,
        patience=5,  # T1000训练较慢，给更多patience
        verbose=True,
        mode='max'
    )

    # 日志记录
    logger = TensorBoardLogger("tb_logs", name="memeclip_t1000")

    # T1000优化的Trainer配置
    trainer = Trainer(
        accelerator='gpu',
        devices=cfg.gpus,
        max_epochs=cfg.max_epochs,
        callbacks=[checkpoint_callback, early_stop_callback],
        logger=logger,
        deterministic=False,
        # T1000优化选项
        precision='16-mixed',  # 使用混合精度节省显存
        gradient_clip_val=1.0,  # 梯度裁剪防止梯度爆炸
        accumulate_grad_batches=2,  # 梯度累积模拟更大batch size
        # 显存优化
        enable_checkpointing=True,
        enable_progress_bar=True,
        enable_model_summary=True,
        # 验证设置
        check_val_every_n_epoch=1,
        val_check_interval=0.5,  # 每半个epoch验证一次
    )

    print_gpu_memory()

    if not cfg.test_only:
        print("Starting training...")
        try:
            trainer.fit(model, train_dataloaders=train_loader, val_dataloaders=val_loader)
            print("Training completed successfully!")
        except RuntimeError as e:
            if "out of memory" in str(e):
                print("GPU Out of Memory! Try reducing batch_size or image_size")
                print("Current settings:")
                print(f"  - batch_size: {cfg.batch_size}")
                print(f"  - image_size: {cfg.image_size}")
                print("Suggested settings:")
                print(f"  - batch_size: {cfg.batch_size // 2}")
                print(f"  - image_size: {cfg.image_size - 32}")
            raise e

    # 测试模型
    print("Loading best model for testing...")
    if os.path.exists(cfg.checkpoint_file):
        model = MemeCLIP.load_from_checkpoint(checkpoint_path=cfg.checkpoint_file, cfg=cfg)
    else:
        print("No checkpoint found, using current model")

    print("Starting testing...")
    test_results = trainer.test(model, dataloaders=test_loader)

    print("=== Test Results ===")
    for key, value in test_results[0].items():
        print(f"{key}: {value:.4f}")

    print_gpu_memory()

    # 清理GPU缓存
    torch.cuda.empty_cache()
    gc.collect()


if __name__ == '__main__':
    # 检查GPU可用性
    if not torch.cuda.is_available():
        print("CUDA not available! Please check your PyTorch installation.")
        exit(1)

    print(f"Using GPU: {torch.cuda.get_device_name()}")
    print(f"GPU Memory: {torch.cuda.get_device_properties(0).total_memory / 1024 ** 3:.1f}GB")

    main(cfg)
