#!/usr/bin/env python3
"""
Qwen3 32B 微调配置文件
支持自动路径推导和项目路径识别
"""

import os
import sys
from pathlib import Path

def get_project_root():
    """自动识别项目根目录"""
    # 从当前脚本位置开始向上查找
    current_path = Path(__file__).resolve()
    
    # 查找包含特征文件的目录作为项目根目录
    project_markers = [
        'Qwen3_32B_微调指南.md',
        'README.md',
        'scripts',
        '项目总结.md'
    ]
    
    for parent in [current_path.parent] + list(current_path.parents):
        if any((parent / marker).exists() for marker in project_markers):
            return str(parent)
    
    # 如果找不到，使用当前工作目录
    return os.getcwd()

# 项目根目录
PROJECT_ROOT = get_project_root()

def get_model_path(model_id, base_dir="model"):
    """根据模型ID推导实际存储路径"""
    model_dir = os.path.join(PROJECT_ROOT, base_dir)
    
    # ModelScope下载后的路径规则：base_dir/模型组织/模型名称
    # 例如：model/unsloth/Qwen3-32B-unsloth-bnb-4bit
    if "/" in model_id:
        org, name = model_id.split("/", 1)
        model_path = os.path.join(model_dir, org, name)
    else:
        model_path = os.path.join(model_dir, model_id)
    
    return model_path

def get_dataset_path(dataset_id, base_dir="data"):
    """根据数据集ID推导实际存储路径"""
    data_dir = os.path.join(PROJECT_ROOT, base_dir)
    
    # 为不同数据集创建友好的目录名
    dataset_mapping = {
        "swift/Chinese-Qwen3-235B-2507-Distill-data-110k-SFT": "chinese-qwen-sft",
        "silk-road/alpaca-data-gpt4-chinese": "chinese-alpaca", 
        "BelleGroup/train_2M_CN": "belle",
        "YeungNLP/firefly-train-1.1M": "firefly"
    }
    
    if dataset_id in dataset_mapping:
        dataset_name = dataset_mapping[dataset_id]
    else:
        # 对于其他数据集，使用最后一部分作为目录名
        dataset_name = dataset_id.split("/")[-1].lower().replace("-", "_")
    
    return os.path.join(data_dir, dataset_name)

# ================================
# 主要配置项 - 只需修改这里！
# ================================

## 模型配置 - 修改这里切换模型
MODEL_ID = "unsloth/Qwen3-32B-unsloth-bnb-4bit"  # ModelScope模型ID
# MODEL_ID = "unsloth/Qwen2.5-32B-unsloth-bnb-4bit"  # 其他模型选择

## 数据集配置 - 修改这里切换数据集  
DATASET_ID = "swift/Chinese-Qwen3-235B-2507-Distill-data-110k-SFT"  # ModelScope数据集ID
# DATASET_ID = "silk-road/alpaca-data-gpt4-chinese"  # HuggingFace数据集
# DATASET_ID = "BelleGroup/train_2M_CN"  # Belle数据集
NUM_SAMPLES = 5000  # 训练样本数量，设置为None使用全部数据

## 训练配置 - 根据需要调整
MAX_STEPS = 100        # 训练步数
BATCH_SIZE = 1         # 批次大小
LEARNING_RATE = 2e-4   # 学习率

# ================================
# 自动推导的路径 - 无需修改
# ================================

# 根据配置自动推导路径
MODEL_DOWNLOAD_DIR = os.path.join(PROJECT_ROOT, "model")
MODEL_PATH = get_model_path(MODEL_ID)
DATA_DOWNLOAD_DIR = os.path.join(PROJECT_ROOT, "data") 
DATA_PATH = get_dataset_path(DATASET_ID)
OUTPUT_DIR = os.path.join(PROJECT_ROOT, "output")
LOGS_DIR = os.path.join(PROJECT_ROOT, "logs")

# ================================
# 详细配置 - 高级用户可修改
# ================================

## 模型参数
MAX_SEQ_LENGTH = 2048
LOAD_IN_4BIT = True

## LoRA配置
LORA_R = 16
LORA_ALPHA = 16
LORA_DROPOUT = 0.05
TARGET_MODULES = [
    "q_proj", "k_proj", "v_proj", "o_proj",
    "gate_proj", "up_proj", "down_proj"
]

## 训练详细配置
GRADIENT_ACCUMULATION_STEPS = 4
WARMUP_STEPS = 10
WEIGHT_DECAY = 0.01
OPTIMIZER = "adamw_8bit"
LR_SCHEDULER_TYPE = "linear"

## 保存配置
SAVE_STEPS = 50
SAVE_TOTAL_LIMIT = 2
LOGGING_STEPS = 1

## GPU配置
USE_FP16 = True  # 如果不支持BF16则使用FP16
USE_BF16 = False  # 根据GPU自动检测
USE_GRADIENT_CHECKPOINTING = True

## 推理配置
TEMPERATURE = 0.7
MAX_NEW_TOKENS = 512
DO_SAMPLE = True

# 配置验证和辅助函数
def print_config():
    """打印当前配置"""
    print("=" * 60)
    print("🔧 当前配置")
    print("=" * 60)
    print(f"📁 项目根目录: {PROJECT_ROOT}")
    print(f"🤖 模型ID: {MODEL_ID}")
    print(f"📂 模型路径: {MODEL_PATH}")
    print(f"📊 数据集ID: {DATASET_ID}")
    print(f"📂 数据集路径: {DATA_PATH}")
    print(f"📤 输出目录: {OUTPUT_DIR}")
    print(f"📈 训练步数: {MAX_STEPS}")
    print(f"📦 批次大小: {BATCH_SIZE}")
    print(f"🎯 学习率: {LEARNING_RATE}")
    print(f"📝 样本数量: {NUM_SAMPLES if NUM_SAMPLES else '全部'}")
    print("=" * 60)

def validate_config():
    """验证配置"""
    issues = []
    
    if not MODEL_ID:
        issues.append("❌ MODEL_ID 不能为空")
    
    if not DATASET_ID:
        issues.append("❌ DATASET_ID 不能为空")
        
    if MAX_STEPS <= 0:
        issues.append("❌ MAX_STEPS 必须大于0")
        
    if BATCH_SIZE <= 0:
        issues.append("❌ BATCH_SIZE 必须大于0")
    
    if issues:
        print("\n".join(issues))
        return False
    
    return True

if __name__ == "__main__":
    print_config()
    if validate_config():
        print("✅ 配置验证通过")
    else:
        print("❌ 配置验证失败")
        sys.exit(1)
