#!/usr/bin/env python
"""
CN-CLIP PyTorch Lightning 快速训练脚本
使用现有的WenwuDataset，轻松快速做实验

Usage:
    # 基础训练
    python train_lightning.py --model ViT-B-16 --epochs 5
    
    # 新的冻结策略示例
    python train_lightning.py --model ViT-L-14 --freeze visual --epochs 8
    python train_lightning.py --model ViT-B-16 --freeze backbones --epochs 5
    python train_lightning.py --model ViT-L-14 --freeze visual+projections --epochs 10
    
    # 快速实验
    python train_lightning.py --model ViT-B-16 --data-scale 0.1 --epochs 3 --experiment test_run
"""

import os
import sys
import argparse
from pathlib import Path
from datetime import datetime

import torch
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint, LearningRateMonitor, EarlyStopping, RichProgressBar
from pytorch_lightning.loggers import TensorBoardLogger
sys.path.append(str(Path(__file__).parent.parent))
from dataset.dataset import get_train_set, get_val_set, get_test_set

# 添加项目路径以支持从根目录执行
project_root = Path(__file__).parent.parent.parent
sys.path.insert(0, str(project_root / "codebase" / "cnclip_finetune"))

from model import CNClipLightning, create_data_module


def setup_callbacks(experiment_name: str, monitor: str = "val_mean_r1"):
    """设置训练回调"""
    callbacks = []
    
    # 模型检查点 - 保存到cnclip_finetune目录下
    checkpoint_callback = ModelCheckpoint(
        dirpath=f"./codebase/cnclip_finetune/checkpoints/{experiment_name}",
        filename="{epoch:02d}-{val_mean_r1:.4f}",
        monitor=monitor,
        mode="max",
        save_top_k=3,
        save_last=True,
        every_n_epochs=1,
        save_on_train_epoch_end=False  # 只在验证后保存检查点
    )
    callbacks.append(checkpoint_callback)
    
    # 学习率监控
    lr_monitor = LearningRateMonitor(logging_interval='epoch')
    callbacks.append(lr_monitor)
    
    # 进度条
    progress_bar = RichProgressBar()
    callbacks.append(progress_bar)
    
    # 早停（可选）
    # early_stop = EarlyStopping(
    #     monitor=monitor,
    #     patience=5,
    #     mode="max",
    #     verbose=True
    # )
    # callbacks.append(early_stop)
    
    return callbacks


def setup_logger(experiment_name: str):
    """设置日志记录器"""
    # TensorBoard日志 - 保存到cnclip_finetune目录下
    logger = TensorBoardLogger(
        save_dir="./codebase/cnclip_finetune/logs",
        name=experiment_name,
        version=datetime.now().strftime("%m%d_%H%M")
    )
    
    return logger


def create_experiment_command_record(args, experiment_name: str):
    """创建实验命令记录（用于可重现性）"""
    # 直接使用实验名称，不添加日期前缀
    exp_dir = Path("research/data") / experiment_name
    
    if exp_dir.exists():
        commands_dir = exp_dir / "commands"
        commands_dir.mkdir(exist_ok=True)
        
        # 生成完整的训练命令
        cmd_parts = ["python codebase/cnclip_finetune/train_lightning.py"]
        
        # 添加所有参数
        for arg, value in vars(args).items():
            if value is not None and value is not False:
                if isinstance(value, bool) and value:
                    cmd_parts.append(f"--{arg.replace('_', '-')}")
                else:
                    cmd_parts.append(f"--{arg.replace('_', '-')} {value}")
        
        # 写入训练脚本
        train_script = commands_dir / "train.sh"
        with open(train_script, 'w', encoding='utf-8') as f:
            f.write("#!/bin/bash\n")
            f.write("# 自动生成的训练命令\n")
            f.write("set -e\n\n")
            f.write("# 环境准备\n")
            f.write("conda activate wenwuclip\n")
            f.write("cd /root/WenwuClip\n\n")
            f.write("# 训练命令\n")
            f.write(" \\\n  ".join(cmd_parts) + "\n")
        
        # 添加可执行权限
        os.chmod(train_script, 0o755)
        
        # 写入README
        readme_file = exp_dir / "README.md"
        with open(readme_file, 'w', encoding='utf-8') as f:
            f.write(f"# {experiment_name} 实验\n\n")
            f.write(f"**创建时间**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n")
            f.write("## 实验配置\n\n")
            f.write(f"- 模型: {args.model}\n")
            f.write(f"- 数据规模: {args.data_scale}\n")
            f.write(f"- 训练轮数: {args.epochs}\n")
            f.write(f"- 批次大小: {args.batch_size}\n")
            f.write(f"- 学习率: {args.learning_rate}\n")
            
            if args.freeze != "none":
                f.write(f"- 冻结策略: {args.freeze}\n")
                
            f.write("\n## 完整执行命令\n\n")
            f.write("```bash\n")
            f.write("# 环境准备\n")
            f.write("conda activate wenwuclip\n")
            f.write("cd /root/WenwuClip\n\n")
            f.write("# 训练命令\n")
            f.write(" \\\n  ".join(cmd_parts) + "\n")
            f.write("```\n\n")
            f.write("## 实验文件结构\n\n")
            f.write("```\n")
            f.write(f"{experiment_name}/\n")
            f.write("├── README.md          # 本文件\n")
            f.write("├── config/           # 配置文件\n")
            f.write("├── results/          # 实验结果\n")
            f.write("├── logs/            # 系统和训练日志\n")
            f.write("├── commands/        # 可重现执行脚本\n")
            f.write("└── raw_outputs/     # 原始输出\n")
            f.write("```\n")
        
        print(f"实验记录已创建: {exp_dir}")
        print(f"训练脚本: {train_script}")


def main():
    parser = argparse.ArgumentParser(description="CN-CLIP Lightning训练")
    
    # 模型参数
    parser.add_argument("--model", type=str, default="ViT-B-16", 
                       choices=["ViT-B-16", "ViT-L-14", "ViT-H-14"],
                       help="CLIP模型规模")
    parser.add_argument("--freeze", type=str, default="none",
                       help="冻结策略 | 选项: none, visual, text, both-encoders, backbones, visual-early-layers, text-early-layers, projections | 支持组合: visual+projections")
    
    
    # 训练参数
    parser.add_argument("--epochs", type=int, default=10, help="训练轮数")
    parser.add_argument("--batch-size", type=int, default=32, help="批次大小")
    parser.add_argument("--learning-rate", type=float, default=5e-7, help="学习率")
    parser.add_argument("--weight-decay", type=float, default=0.01, help="权重衰减")
    
    # 数据参数
    parser.add_argument("--data-scale", type=float, default=1.0, help="数据集比例")
    parser.add_argument("--num-workers", type=int, default=4, help="数据加载器进程数")
    
    # 实验参数
    parser.add_argument("--experiment", type=str, help="实验名称")
    parser.add_argument("--resume", type=str, help="恢复训练的检查点路径")
    parser.add_argument("--fast-dev-run", action="store_true", help="快速开发模式")
    parser.add_argument("--gpus", type=int, default=1, help="GPU数量")
    parser.add_argument("--precision", type=str, default="32", 
                       choices=["16", "32", "bf16"],
                       help="训练精度")
    parser.add_argument("--val-check-interval", type=float, default=1,
                       help="验证检查间隔 (1.0=每个epoch, 0.5=每半个epoch, 0.25=每1/4个epoch)")
    
    # 日志参数
    parser.add_argument("--no-wandb", action="store_true", help="禁用wandb")
    
    args = parser.parse_args()
    
    # 生成实验名称（如果没有指定）
    if not args.experiment:
        timestamp = datetime.now().strftime("%m%d_%H%M")
        model_short = args.model.replace("ViT-", "").replace("-", "")
        strategy_suffix = ""
        if args.freeze != "none":
            strategy_suffix = f"_freeze_{args.freeze}"
        
        args.experiment = f"cnclip_{model_short}{strategy_suffix}_{timestamp}"
    
    
    
    # 创建模型
    model = CNClipLightning(
        model_name=args.model,
        learning_rate=args.learning_rate,
        weight_decay=args.weight_decay,
        freeze_strategy=args.freeze,
        experiment_name=args.experiment,
        data_scale=args.data_scale
    )
    
    train_set = get_train_set(args.data_scale)
    val_set = get_val_set(args.data_scale)
    
    # 创建数据模块
    data_module = create_data_module(
        train_set,
        val_set,
        batch_size=args.batch_size,
        num_workers=args.num_workers
    )
    
    # 设置回调和日志
    callbacks = setup_callbacks(args.experiment)
    logger = setup_logger(args.experiment)
    
    # 创建实验记录
    create_experiment_command_record(args, args.experiment)
    
    # 创建训练器
    trainer_kwargs = {
        "max_epochs": args.epochs,
        "callbacks": callbacks,
        "logger": logger,
        "precision": args.precision,
        "accelerator": "gpu" if torch.cuda.is_available() else "cpu",
        "devices": min(args.gpus, torch.cuda.device_count()) if torch.cuda.is_available() else 1,
        "fast_dev_run": args.fast_dev_run,
        "enable_progress_bar": True,
        "enable_model_summary": True,
        "log_every_n_steps": 10,
        "val_check_interval": args.val_check_interval
    }
    
    print(f"\n{'='*60}")
    print(f"🚀 开始CN-CLIP微调实验")
    print(f"{'='*60}")
    print(f"📦 模型规模: {args.model}")
    
    print(f"📊 数据规模: {args.data_scale} (训练:{ len(train_set):,}, 验证:{len(val_set):,})")
    print(f"🎯 训练策略: {args.freeze if args.freeze != 'none' else '全量微调'}")
    print(f"⚡ 训练轮数: {args.epochs}")
    print(f"📈 学习率: {args.learning_rate}")
    print(f"📦 批次大小: {args.batch_size}")
    print(f"🔄 验证间隔: {args.val_check_interval}x epoch")
    print(f"💾 实验名称: {args.experiment}")
    print(f"{'='*60}\n")
    
    trainer = pl.Trainer(**trainer_kwargs)
    
    # 开始训练
    if args.resume:
        print(f"📂 从检查点恢复: {args.resume}")
        trainer.fit(model, data_module, ckpt_path=args.resume)
    else:
        trainer.fit(model, data_module)
    
    # 保存最终结果
    model.save_final_results()
    
    # 显示最佳结果
    best_metric = trainer.callback_metrics.get("val_mean_r1", 0.0)
    print(f"\n🎉 训练完成!")
    print(f"📈 最佳 Mean R@1: {best_metric:.4f}")
    print(f"📁 检查点位置: ./codebase/cnclip_finetune/checkpoints/{args.experiment}/")
    print(f"📊 日志位置: ./codebase/cnclip_finetune/logs/{args.experiment}/")
    
    if model.experiment_dir:
        print(f"🗂️ 实验数据: {model.experiment_dir}/")


if __name__ == "__main__":
    main()