#!/usr/bin/env python3
"""
Qwen3 32B 微调脚本
使用Unsloth + QLoRA进行高效微调
支持自动路径识别和配置驱动
"""

import torch
from unsloth import FastLanguageModel
from datasets import load_dataset, load_from_disk
from trl import SFTTrainer
from transformers import TrainingArguments
import os
import sys
import argparse
from pathlib import Path
from datetime import datetime
import re

# 添加当前脚本目录到Python路径，以便导入config模块
script_dir = Path(__file__).parent.resolve()
if str(script_dir) not in sys.path:
    sys.path.insert(0, str(script_dir))

import config

def _sanitize_for_filename(text: str) -> str:
    """将模型/数据集ID等转换为安全的文件名片段"""
    text = text.replace("/", "-")
    text = re.sub(r"[^A-Za-z0-9._-]+", "_", text)
    return text


class _Tee:
    """简单的tee，将stdout/stderr同时写文件与控制台"""
    def __init__(self, stream, logfile):
        self.stream = stream
        self.logfile = logfile

    def write(self, data):
        self.stream.write(data)
        try:
            self.logfile.write(data)
        except Exception:
            pass

    def flush(self):
        self.stream.flush()
        try:
            self.logfile.flush()
        except Exception:
            pass


def init_logging(model_id: str, dataset_id: str, max_steps: int, batch_size: int, learning_rate: float):
    """初始化日志到 logs/ 目录，文件名包含日期、模型、数据集与核心参数"""
    os.makedirs(config.LOGS_DIR, exist_ok=True)
    date_str = datetime.now().strftime("%Y%m%d_%H%M%S")
    fname = f"{date_str}__{_sanitize_for_filename(model_id)}__{_sanitize_for_filename(dataset_id)}__steps{max_steps}_bs{batch_size}_lr{learning_rate}.log"
    log_path = os.path.join(config.LOGS_DIR, fname)

    log_file = open(log_path, "a", buffering=1, encoding="utf-8")
    sys.stdout = _Tee(sys.stdout, log_file)
    sys.stderr = _Tee(sys.stderr, log_file)

    print(f"📜 日志文件: {log_path}")
    return log_path


def download_model(model_id=None, local_dir=None):
    """下载模型"""
    from modelscope import snapshot_download
    
    model_id = model_id or config.MODEL_ID
    local_dir = local_dir or config.MODEL_DOWNLOAD_DIR
    expected_path = config.get_model_path(model_id, os.path.basename(local_dir))

    # 本地优先
    if os.path.exists(os.path.join(expected_path, "config.json")):
        print(f"✅ 检测到本地模型，跳过下载: {expected_path}")
        return expected_path

    print(f"开始下载模型: {model_id}")
    print(f"下载目录: {local_dir}")
    
    os.makedirs(local_dir, exist_ok=True)
    
    model_path = snapshot_download(
        model_id=model_id,
        cache_dir=local_dir,
        revision='master'
    )
    print(f"模型下载完成: {model_path}")
    
    # 验证模型路径
    if os.path.exists(os.path.join(expected_path, "config.json")):
        print(f"✅ 模型验证成功: {expected_path}")
        return expected_path
    else:
        print(f"⚠️  使用下载返回路径: {model_path}")
        return model_path

def download_dataset(dataset_id=None, local_dir=None, num_samples=None):
    """下载并准备数据集"""
    dataset_id = dataset_id or config.DATASET_ID
    local_dir = local_dir or config.DATA_PATH
    num_samples = num_samples or config.NUM_SAMPLES
    
    print(f"开始下载数据集: {dataset_id}")
    print(f"保存路径: {local_dir}")
    
    os.makedirs(os.path.dirname(local_dir), exist_ok=True)

    # 本地优先
    if os.path.exists(local_dir):
        try:
            from datasets import load_from_disk
            _ = load_from_disk(local_dir)
            print(f"✅ 检测到本地数据集，跳过下载: {local_dir}")
            return local_dir
        except Exception:
            pass
    
    try:
        # 仅从ModelScope下载（不回退到HF）
        from modelscope.msdatasets import MsDataset
        print("使用ModelScope下载数据集...")
        dataset = MsDataset.load(dataset_id)
        
        # 转换为datasets格式
        from datasets import Dataset
        data_list = list(dataset)
        
        if num_samples and len(data_list) > num_samples:
            data_list = data_list[:num_samples]
            print(f"选取前 {num_samples} 条数据")
        
        dataset = Dataset.from_list(data_list)
    except Exception as e:
        print(f"ModelScope下载失败: {e}")
        # 如果本地已有，继续使用本地
        if os.path.exists(local_dir):
            print(f"⚠️  使用已存在的本地数据集: {local_dir}")
            return local_dir
        return None
    
    # 保存数据集
    dataset.save_to_disk(local_dir)
    print(f"数据集保存到: {local_dir}")
    print(f"数据集大小: {len(dataset)}")
    
    # 显示数据样本
    print("\n数据样本预览:")
    for i in range(min(2, len(dataset))):
        sample = dataset[i]
        print(f"样本 {i+1}:")
        for key, value in sample.items():
            if isinstance(value, str) and len(value) > 100:
                print(f"  {key}: {value[:100]}...")
            else:
                print(f"  {key}: {value}")
    
    return local_dir

def format_dataset_for_qwen(examples):
    """格式化数据为Qwen格式"""
    texts = []
    
    # 获取字段名列表
    fields = list(examples.keys())
    length = len(examples[fields[0]])
    
    for i in range(length):
        # 智能识别字段
        instruction = ""
        input_text = ""
        output = ""
        
        # 尝试不同的字段名组合
        for key in examples.keys():
            value = examples[key][i] if i < len(examples[key]) else ""
            
            if key in ["instruction", "question", "query", "input_text"]:
                instruction = value
            elif key in ["input", "context", "background"]:
                input_text = value
            elif key in ["output", "response", "answer", "target"]:
                output = value
            elif key == "messages" and isinstance(value, list):
                # 处理对话格式：支持[{role, content}]或["...", "..."]
                if len(value) >= 1:
                    first = value[0]
                    # 列表元素为字典
                    if isinstance(first, dict):
                        for msg in value:
                            role = msg.get("role") if isinstance(msg, dict) else None
                            content = msg.get("content") if isinstance(msg, dict) else None
                            if role == "user" and content is not None:
                                instruction = content
                            elif role == "assistant" and content is not None:
                                output = content
                    # 列表元素为字符串
                    elif isinstance(first, str):
                        instruction = value[0]
                        if len(value) >= 2 and isinstance(value[1], str):
                            output = value[1]
        
        # 如果没有找到指令字段，使用第一个非空字段
        if not instruction and not output:
            for key, value in examples.items():
                if i < len(value) and value[i]:
                    if not instruction:
                        instruction = value[i]
                    elif not output:
                        output = value[i]
                        break
        
        # 构造Qwen格式
        if instruction and output:
            if input_text and input_text.strip():
                text = f"<|im_start|>user\n{instruction}\n{input_text}<|im_end|>\n<|im_start|>assistant\n{output}<|im_end|>"
            else:
                text = f"<|im_start|>user\n{instruction}<|im_end|>\n<|im_start|>assistant\n{output}<|im_end|>"
            texts.append(text)
    
    return {"text": texts}

def main():
    parser = argparse.ArgumentParser(description="Qwen3 32B 微调脚本")
    parser.add_argument("--config", action="store_true", help="显示当前配置")
    parser.add_argument("--download_model", action="store_true", help="下载模型")
    parser.add_argument("--download_data", action="store_true", help="下载数据集")
    parser.add_argument("--download_only", action="store_true", help="仅下载并退出")
    parser.add_argument("--skip_download", action="store_true", help="跳过下载，直接开始训练")
    
    # 可选的配置覆盖
    parser.add_argument("--model_id", help="覆盖配置中的模型ID")
    parser.add_argument("--dataset_id", help="覆盖配置中的数据集ID")
    parser.add_argument("--max_steps", type=int, help="覆盖配置中的训练步数")
    parser.add_argument("--batch_size", type=int, help="覆盖配置中的批次大小")
    parser.add_argument("--learning_rate", type=float, help="覆盖配置中的学习率")
    parser.add_argument("--num_samples", type=int, help="覆盖配置中的样本数量")
    
    args = parser.parse_args()
    
    # 显示配置
    if args.config:
        config.print_config()
        return
    
    # 验证配置
    if not config.validate_config():
        print("❌ 配置验证失败，请检查config.py")
        return
    
    # 应用命令行覆盖
    model_id = args.model_id or config.MODEL_ID
    dataset_id = args.dataset_id or config.DATASET_ID
    max_steps = args.max_steps or config.MAX_STEPS
    batch_size = args.batch_size or config.BATCH_SIZE
    learning_rate = args.learning_rate or config.LEARNING_RATE
    num_samples = args.num_samples or config.NUM_SAMPLES
    
    # 重新计算路径（如果有覆盖）
    if args.model_id:
        model_path = config.get_model_path(model_id, "model")
    else:
        model_path = config.MODEL_PATH
        
    if args.dataset_id:
        data_path = config.get_dataset_path(dataset_id, "data")
    else:
        data_path = config.DATA_PATH
    
    # 初始化文件日志（在任何输出之前）
    init_logging(model_id, dataset_id, max_steps, batch_size, learning_rate)

    # 显示当前运行配置
    print("🚀 开始微调")
    config.print_config()
    
    if args.model_id or args.dataset_id or args.max_steps or args.batch_size or args.learning_rate:
        print("\n📝 命令行覆盖:")
        if args.model_id: print(f"  模型ID: {model_id}")
        if args.dataset_id: print(f"  数据集ID: {dataset_id}")
        if args.max_steps: print(f"  训练步数: {max_steps}")
        if args.batch_size: print(f"  批次大小: {batch_size}")
        if args.learning_rate: print(f"  学习率: {learning_rate}")
        if args.num_samples: print(f"  样本数量: {num_samples}")
    
    # 下载模型（如果需要）
    if args.download_model or (not args.skip_download and not os.path.exists(model_path)):
        print("\n📥 下载模型...")
        actual_model_path = download_model(model_id)
        if actual_model_path:
            model_path = actual_model_path
    
    # 下载数据集（如果需要）
    if args.download_data or (not args.skip_download and not os.path.exists(data_path)):
        print("\n📥 下载数据集...")
        actual_data_path = download_dataset(dataset_id, data_path, num_samples)
        if actual_data_path:
            data_path = actual_data_path

    # 仅下载则退出，避免重复训练
    if args.download_only:
        print("✅ 下载完成，按 --download_only 要求退出")
        return
    
    # 检查文件是否存在
    if not os.path.exists(model_path):
        print(f"❌ 模型路径不存在: {model_path}")
        print("请使用 --download_model 下载模型")
        return
    
    if not os.path.exists(data_path):
        print(f"❌ 数据集路径不存在: {data_path}")
        print("请使用 --download_data 下载数据集")
        return
    
    print("\n" + "=" * 60)
    print("🔧 开始加载模型和tokenizer...")
    
    # 加载模型和tokenizer
    model, tokenizer = FastLanguageModel.from_pretrained(
        model_name=model_path,
        max_seq_length=config.MAX_SEQ_LENGTH,
        dtype=None,
        device_map="auto",
        trust_remote_code=True,
    )
    
    print("⚙️  配置LoRA...")
    # 配置LoRA
    model = FastLanguageModel.get_peft_model(
        model,
        r=config.LORA_R,
        target_modules=config.TARGET_MODULES,
        lora_alpha=config.LORA_ALPHA,
        lora_dropout=config.LORA_DROPOUT,
        bias="none",
        use_gradient_checkpointing="unsloth",
        random_state=3407,
        use_rslora=False,
        loftq_config=None,
    )
    
    # 设置tokenizer
    tokenizer.pad_token = tokenizer.eos_token
    tokenizer.padding_side = "right"
    
    print("📊 加载数据集...")
    # 加载数据集
    dataset = load_from_disk(data_path)
    print(f"数据集大小: {len(dataset)}")
    
    # 格式化数据集
    if "text" not in dataset.column_names:
        print("🔄 格式化数据集...")
        dataset = dataset.map(format_dataset_for_qwen, batched=True, remove_columns=dataset.column_names)
        print("✅ 数据集格式化完成")
    
    # 确保输出目录存在
    os.makedirs(config.OUTPUT_DIR, exist_ok=True)
    
    print("🏃 配置训练器...")
    # 训练配置
    trainer = SFTTrainer(
        model=model,
        tokenizer=tokenizer,
        train_dataset=dataset,
        dataset_text_field="text",
        max_seq_length=config.MAX_SEQ_LENGTH,
        dataset_num_proc=2,
        packing=False,
        args=TrainingArguments(
            per_device_train_batch_size=batch_size,
            gradient_accumulation_steps=config.GRADIENT_ACCUMULATION_STEPS,
            warmup_steps=config.WARMUP_STEPS,
            max_steps=max_steps,
            learning_rate=learning_rate,
            fp16=not torch.cuda.is_bf16_supported(),
            bf16=torch.cuda.is_bf16_supported(),
            logging_steps=config.LOGGING_STEPS,
            optim=config.OPTIMIZER,
            weight_decay=config.WEIGHT_DECAY,
            lr_scheduler_type=config.LR_SCHEDULER_TYPE,
            seed=3407,
            output_dir=config.OUTPUT_DIR,
            save_steps=config.SAVE_STEPS,
            save_total_limit=config.SAVE_TOTAL_LIMIT,
            dataloader_pin_memory=False,
            report_to="none",  # 禁用wandb等日志
        ),
    )
    
    # 显示GPU信息
    gpu_stats = torch.cuda.get_device_properties(0)
    start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)
    max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3)
    print(f"🖥️  GPU = {gpu_stats.name}. Max memory = {max_memory} GB.")
    print(f"💾 {start_gpu_memory} GB of memory reserved.")
    
    print("\n" + "=" * 60)
    print("🚀 开始微调...")
    print("=" * 60)
    
    # 开始训练
    trainer_stats = trainer.train()
    
    # 显示训练统计
    used_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)
    used_memory_for_lora = round(used_memory - start_gpu_memory, 3)
    used_percentage = round(used_memory / max_memory * 100, 3)
    lora_percentage = round(used_memory_for_lora / max_memory * 100, 3)
    
    print("\n" + "=" * 60)
    print("🎉 训练完成!")
    print("=" * 60)
    print(f"⏱️  训练时间: {trainer_stats.metrics['train_runtime']:.2f} 秒")
    print(f"⏱️  训练时间: {round(trainer_stats.metrics['train_runtime']/60, 2)} 分钟")
    print(f"💾 峰值显存使用: {used_memory} GB")
    print(f"💾 训练显存使用: {used_memory_for_lora} GB")
    print(f"📊 显存使用率: {used_percentage}%")
    print(f"📊 训练显存使用率: {lora_percentage}%")
    
    # 保存模型
    lora_save_path = os.path.join(config.OUTPUT_DIR, "lora_adapters")
    print(f"\n💾 保存LoRA适配器到: {lora_save_path}")
    model.save_pretrained(lora_save_path)
    tokenizer.save_pretrained(lora_save_path)
    print("✅ 模型保存完成!")
    
    print("\n🎯 下一步:")
    print(f"  • 测试模型: python {script_dir}/test_model.py --model_path {lora_save_path}")
    print(f"  • 查看输出: ls -la {config.OUTPUT_DIR}")
    print(f"  • 查看日志: ls -la {config.LOGS_DIR} (如果有)")

if __name__ == "__main__":
    main()
