#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
Qwen模型微调脚本 - 增强版
支持继续训练、版本管理、防遗忘训练
"""

import os
import torch
import json
import logging
import glob
from datetime import datetime
from pathlib import Path

from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import TrainingArguments, Trainer
from transformers.trainer_callback import TrainerCallback
from datasets import Dataset
from peft import get_peft_model, LoraConfig, TaskType, PeftModel

# 设置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
os.environ["TOKENIZERS_PARALLELISM"] = "false"

# 导入模型查找工具函数
PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import sys
sys.path.append(PROJECT_ROOT)
from utils.model_utils import find_local_model_path

# 导入参数解析模块
from training.scripts.argument_parser import get_finetune_args

# 导入数据处理模块
from training.scripts.data_processor import prepare_dataset

# 导入训练回调模块
from training.scripts.training_callback import ProgressCallback


def setup_output_directory(base_output_dir, version=None, resume_from_checkpoint=None):
    """
    设置输出目录，简化版本管理
    
    Args:
        base_output_dir: 基础输出目录
        version: 版本号，如果指定则创建对应目录
        resume_from_checkpoint: 从检查点恢复训练
    """
    # 检查点本身就提供了版本管理功能，我们不需要额外的自动版本号
    # 只有在用户明确指定版本号时才创建版本目录
    
    # 判断是否为继续训练模式
    is_continue_training = bool(resume_from_checkpoint and os.path.exists(resume_from_checkpoint))
    
    if version:
        # 用户明确指定版本号时创建版本目录
        output_dir = os.path.join(base_output_dir, f"v{version}")
        os.makedirs(output_dir, exist_ok=True)
        print(f"🆕 创建指定版本目录: {output_dir}")
    else:
        # 不指定版本号时直接使用基础输出目录
        output_dir = base_output_dir
        os.makedirs(output_dir, exist_ok=True)
        print(f"📂 使用输出目录: {output_dir}")
    
    return output_dir, is_continue_training


def load_model_and_tokenizer(model_path, use_lora=False, lora_rank=8, use_cpu=False, 
                           adapter_path=None, resume_from_checkpoint=None):
    """
    加载模型和分词器，支持继续训练
    
    Args:
        model_path: 基础模型路径
        use_lora: 是否使用LoRA
        lora_rank: LoRA秩
        use_cpu: 是否使用CPU
        adapter_path: 现有适配器路径（用于继续训练）
        resume_from_checkpoint: 检查点路径（已废弃，保持向后兼容）
    """
    print(f"正在加载模型: {model_path}")
    
    # 判断是否为继续训练模式（优先使用adapter_path，向后兼容resume_from_checkpoint）
    checkpoint_path = adapter_path or resume_from_checkpoint
    is_continue_training = checkpoint_path and os.path.exists(checkpoint_path)
    
    # 设置设备映射
    device_map = "cpu" if use_cpu else "auto"
    
    # 判断是否为本地路径
    is_local_path = os.path.exists(model_path) and not model_path.startswith("Qwen/")
    
    # 加载分词器
    tokenizer = AutoTokenizer.from_pretrained(
        model_path, 
        local_files_only=is_local_path,
        trust_remote_code=True
    )
    
    # 确保tokenizer有pad_token
    if tokenizer.pad_token is None:
        tokenizer.pad_token = tokenizer.eos_token
        print("设置pad_token为eos_token")
    
    # 加载基础模型
    model_kwargs = {
        "device_map": device_map,
        "dtype": torch.float16 if not use_cpu else None,
        "low_cpu_mem_usage": True,
        "local_files_only": is_local_path,
        "load_in_8bit": False,
        "trust_remote_code": True,
        "pad_token_id": tokenizer.pad_token_id,
        "use_cache": False,  # 禁用缓存以兼容梯度检查点
    }
    
    if not use_cpu:
        model_kwargs["offload_folder"] = "./offload"
    
    # 如果是继续训练且检查点存在，则从检查点加载模型
    if is_continue_training and use_lora:
        # 对于LoRA继续训练，先加载基础模型，再加载检查点中的LoRA权重
        model = AutoModelForCausalLM.from_pretrained(model_path, **model_kwargs)
        print(f"🔁 继续训练模式: 加载检查点LoRA权重 {checkpoint_path}")
        model = PeftModel.from_pretrained(model, checkpoint_path)
        
        # 确保LoRA参数需要梯度计算
        for name, param in model.named_parameters():
            if "lora" in name.lower():
                param.requires_grad = True
                
        # 检查是否可训练
        if hasattr(model, 'print_trainable_parameters'):
            model.print_trainable_parameters()
    elif use_lora:
        # 新建LoRA适配器
        model = AutoModelForCausalLM.from_pretrained(model_path, **model_kwargs)
        peft_config = LoraConfig(
            task_type=TaskType.CAUSAL_LM,
            inference_mode=False,
            r=lora_rank,
            lora_alpha=32,
            lora_dropout=0.1,
            target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"]
        )
        model = get_peft_model(model, peft_config)
        print("🆕 创建新的LoRA适配器")
        model.print_trainable_parameters()
    else:
        # 全参数训练
        model = AutoModelForCausalLM.from_pretrained(model_path, **model_kwargs)
    
    return model, tokenizer




def fine_tune_model(
    model_path,
    data_path,
    output_dir,
    use_lora=False,
    lora_rank=8,
    max_length=512,
    num_train_epochs=3,
    per_device_train_batch_size=4,
    learning_rate=2e-5,
    save_steps=100,
    use_cpu=False,
    gradient_accumulation_steps=8,
    resume_from_checkpoint=None,
    old_data_path=None,
    replay_ratio=0.0,
):
    """
    微调模型 - 增强版
    
    Args:
        ... (其他参数)
        resume_from_checkpoint: 从检查点恢复训练
        old_data_path: 旧数据路径（经验回放）
        replay_ratio: 经验回放比例
    """
    # 添加调试信息
    print(f"  - resume_from_checkpoint exists: {os.path.exists(resume_from_checkpoint) if resume_from_checkpoint else False}")
    
    # 设置输出目录和训练模式
    output_dir, is_continue_training = setup_output_directory(
        output_dir, 
        resume_from_checkpoint=resume_from_checkpoint
    )
    
    print(f"  - output_dir: {output_dir}")
    print(f"  - is_continue_training: {is_continue_training}")
    
    # 加载模型和分词器
    model, tokenizer = load_model_and_tokenizer(
        model_path, 
        use_lora, 
        lora_rank, 
        use_cpu,
        adapter_path=resume_from_checkpoint,
        resume_from_checkpoint=resume_from_checkpoint  # 保持向后兼容
    )
    
    # 准备数据集（支持经验回放）
    train_dataset = prepare_dataset(
        data_path, 
        tokenizer, 
        max_length, 
        old_data_path, 
        replay_ratio
    )
    
    # 训练参数 - 根据训练模式调整
    if is_continue_training:
        # 继续训练时使用更小的学习率
        learning_rate = learning_rate * 0.1
        print(f"🔁 继续训练模式，调整学习率为: {learning_rate}")
    
    training_args = TrainingArguments(
        output_dir=output_dir,
        num_train_epochs=num_train_epochs,
        per_device_train_batch_size=per_device_train_batch_size,
        learning_rate=learning_rate,
        save_steps=save_steps,
        logging_steps=10,
        save_total_limit=3,  # 增加保存限制
        gradient_accumulation_steps=gradient_accumulation_steps,
        fp16=not use_cpu and torch.cuda.is_available(),
        dataloader_num_workers=2,
        report_to="none",
        optim="adamw_torch",
        dataloader_pin_memory=False,
        dataloader_prefetch_factor=2,
        gradient_checkpointing=True,
        gradient_checkpointing_kwargs={"use_reentrant": False},
        logging_dir=f'{output_dir}/logs',
        logging_strategy="steps",
        save_strategy="steps",
        eval_strategy="no",
        no_cuda=use_cpu,
        dataloader_drop_last=True,
        remove_unused_columns=True,
        lr_scheduler_type="cosine",
        warmup_steps=100,
        resume_from_checkpoint=resume_from_checkpoint,  # 支持从检查点恢复
        # 添加这个参数避免优化器状态冲突
        ignore_data_skip=False,
    )
    
    # 创建训练器
    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=train_dataset,
        tokenizer=tokenizer,
    )
    
    # 添加进度回调
    trainer.add_callback(ProgressCallback(num_train_epochs))
    
    # 开始训练
    print("🎯 开始微调...")
    print(f"训练参数:")
    print(f"  - 训练轮数: {num_train_epochs}")
    print(f"  - 批次大小: {per_device_train_batch_size}")
    print(f"  - 学习率: {learning_rate}")
    print(f"  - 数据集大小: {len(train_dataset)}")
    print(f"  - 使用CPU训练: {use_cpu}")
    print(f"  - 使用LoRA: {use_lora}")
    if use_lora:
        print(f"  - LoRA秩: {lora_rank}")
    if replay_ratio > 0:
        print(f"  - 经验回放比例: {replay_ratio}")
    if is_continue_training:
        print(f"  - 训练模式: 继续训练")
    
    # 如果是继续训练，尝试加载检查点并处理可能的优化器状态不匹配问题
    try:
        trainer.train(resume_from_checkpoint=resume_from_checkpoint)
    except ValueError as e:
        if "loaded state dict contains a parameter group that doesn't match the size of optimizer's group" in str(e) and resume_from_checkpoint:
            print("⚠️  优化器状态不匹配，跳过优化器状态加载，仅加载模型权重")
            print("💡 这可能是因为训练参数发生了变化（如批次大小、学习率等）")
            trainer.train(resume_from_checkpoint=None)  # 仅加载模型权重，不加载优化器状态
        else:
            raise e
    
    # 保存模型
    print("💾 保存模型...")
    trainer.save_model(output_dir)
    tokenizer.save_pretrained(output_dir)
    
    # 保存训练配置信息
    config_info = {
        "training_date": datetime.now().isoformat(),
        "model_path": model_path,
        "data_path": data_path,
        "use_lora": use_lora,
        "lora_rank": lora_rank,
        "num_train_epochs": num_train_epochs,
        "learning_rate": learning_rate,
        "resume_from_checkpoint": resume_from_checkpoint,
    }
    
    with open(os.path.join(output_dir, "training_config.json"), "w") as f:
        json.dump(config_info, f, indent=2)
    
    print(f"✅ 微调完成! 模型保存在: {output_dir}")


def main():
    args = get_finetune_args()
    
    # 创建基础输出目录
    os.makedirs(args.output_dir, exist_ok=True)
    
    # 执行微调
    fine_tune_model(
        model_path=args.model_path,
        data_path=args.data_path,
        output_dir=args.output_dir,
        use_lora=args.use_lora,
        lora_rank=args.lora_rank,
        max_length=args.max_length,
        num_train_epochs=args.num_train_epochs,
        per_device_train_batch_size=args.per_device_train_batch_size,
        learning_rate=args.learning_rate,
        use_cpu=args.use_cpu,
        gradient_accumulation_steps=args.gradient_accumulation_steps,
        resume_from_checkpoint=args.resume_from_checkpoint,
        old_data_path=args.old_data_path,
        replay_ratio=args.replay_ratio
    )


if __name__ == "__main__":
    main()