#!/bin/bash
# 中文 nanochat 模型训练脚本
# 完整训练流程：数据准备 → 分词器 → 预训练 → 微调 → 评估

set -e

# 颜色定义
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'

# 配置参数
DEPTH=${DEPTH:-20}                    # 模型深度
DEVICE_BATCH_SIZE=${DEVICE_BATCH_SIZE:-32}  # 批量大小
NUM_GPUS=${NUM_GPUS:-8}               # GPU 数量
WANDB_RUN=${WANDB_RUN:-chinese-d20}   # wandb 运行名称

# 目录配置
NANOCHAT_BASE_DIR="${HOME}/.cache/nanochat"
CHINESE_DATA_DIR="${NANOCHAT_BASE_DIR}/chinese_data"
TOKENIZER_DIR="${NANOCHAT_BASE_DIR}/tokenizer"

# 打印函数
print_header() {
    echo ""
    echo "=========================================="
    echo "  $1"
    echo "=========================================="
    echo ""
}

print_info() {
    echo -e "${BLUE}[INFO]${NC} $1"
}

print_success() {
    echo -e "${GREEN}[SUCCESS]${NC} $1"
}

print_warning() {
    echo -e "${YELLOW}[WARNING]${NC} $1"
}

print_error() {
    echo -e "${RED}[ERROR]${NC} $1"
}

# 检查环境
check_environment() {
    print_header "检查环境"
    
    # 检查虚拟环境
    if [ -z "$VIRTUAL_ENV" ]; then
        print_warning "未激活虚拟环境，尝试激活..."
        if [ -f ".venv/bin/activate" ]; then
            source .venv/bin/activate
            print_success "虚拟环境已激活"
        else
            print_error "未找到虚拟环境，请先运行: bash docs/zh-CN/配置工具/setup_china.sh"
            exit 1
        fi
    fi
    
    # 检查 GPU
    if ! command -v nvidia-smi &> /dev/null; then
        print_error "未检测到 NVIDIA GPU"
        exit 1
    fi
    
    GPU_COUNT=$(nvidia-smi --list-gpus | wc -l)
    print_info "检测到 ${GPU_COUNT} 个 GPU"
    
    if [ "$GPU_COUNT" -lt "$NUM_GPUS" ]; then
        print_warning "可用 GPU ($GPU_COUNT) 少于配置的数量 ($NUM_GPUS)"
        print_warning "将使用 $GPU_COUNT 个 GPU"
        NUM_GPUS=$GPU_COUNT
    fi
    
    # 检查磁盘空间
    AVAILABLE_SPACE=$(df -BG . | tail -1 | awk '{print $4}' | sed 's/G//')
    if [ "$AVAILABLE_SPACE" -lt 50 ]; then
        print_error "磁盘空间不足（需要至少 50GB，当前可用 ${AVAILABLE_SPACE}GB）"
        exit 1
    fi
    print_info "可用磁盘空间: ${AVAILABLE_SPACE}GB"
    
    print_success "环境检查完成"
}

# 下载中文数据集
download_chinese_data() {
    print_header "下载中文数据集"
    
    mkdir -p "$CHINESE_DATA_DIR"
    
    # 检查是否已有数据
    if [ -f "${CHINESE_DATA_DIR}/data_ready.flag" ]; then
        print_info "中文数据集已存在，跳过下载"
        return
    fi
    
    print_info "开始下载中文数据集..."
    print_info "数据来源："
    print_info "  - 中文维基百科"
    print_info "  - 百度百科"
    print_info "  - 中文新闻"
    print_info "  - 中文书籍"
    
    # 使用 Python 脚本下载和处理数据
    python << 'PYTHON_SCRIPT'
import os
from datasets import load_dataset, concatenate_datasets
from tqdm import tqdm

print("📥 加载数据集...")

# 1. 中文维基百科
try:
    wiki = load_dataset("wikipedia", "20220301.zh", split="train[:100000]")
    print(f"✅ 维基百科: {len(wiki)} 条")
except Exception as e:
    print(f"⚠️  维基百科加载失败: {e}")
    wiki = None

# 2. 百度百科
try:
    baike = load_dataset("xusenlin/baidubaike-563w", split="train[:200000]")
    print(f"✅ 百度百科: {len(baike)} 条")
except Exception as e:
    print(f"⚠️  百度百科加载失败: {e}")
    baike = None

# 3. 新闻数据
try:
    news = load_dataset("THUDM/LongBench", "news", split="train[:50000]")
    print(f"✅ 新闻数据: {len(news)} 条")
except Exception as e:
    print(f"⚠️  新闻数据加载失败: {e}")
    news = None

# 合并数据集
datasets_to_merge = [ds for ds in [wiki, baike, news] if ds is not None]

if not datasets_to_merge:
    print("❌ 所有数据集加载失败！")
    exit(1)

print("\n🔄 合并数据集...")
combined = concatenate_datasets(datasets_to_merge)

# 清洗数据
print("\n🧹 清洗数据...")
import re

def clean_text(example):
    text = example.get('text', example.get('content', ''))
    # 去除特殊字符
    text = re.sub(r'[^\u4e00-\u9fa5a-zA-Z0-9，。！？、；：""''（）【】《》\s]', '', text)
    # 去除过短文本
    if len(text) < 50:
        return None
    return {'text': text}

combined = combined.map(clean_text, remove_columns=combined.column_names)
combined = combined.filter(lambda x: x is not None)

print(f"\n✅ 清洗后数据量: {len(combined)} 条")

# 保存为 parquet 格式
print("\n💾 保存数据...")
output_dir = os.path.expanduser("~/.cache/nanochat/chinese_data")
os.makedirs(output_dir, exist_ok=True)

# 分割成多个文件（每个约 10 万条）
shard_size = 100000
num_shards = (len(combined) + shard_size - 1) // shard_size

for i in range(num_shards):
    start = i * shard_size
    end = min((i + 1) * shard_size, len(combined))
    shard = combined.select(range(start, end))
    output_file = f"{output_dir}/shard_{i:05d}.parquet"
    shard.to_parquet(output_file)
    print(f"  保存: {output_file} ({len(shard)} 条)")

# 创建完成标记
with open(f"{output_dir}/data_ready.flag", "w") as f:
    f.write("ready")

print("\n✅ 数据下载和处理完成！")
PYTHON_SCRIPT
    
    print_success "中文数据集准备完成"
}

# 训练中文分词器
train_chinese_tokenizer() {
    print_header "训练中文分词器"
    
    if [ -f "${TOKENIZER_DIR}/tokenizer.pkl" ]; then
        print_info "分词器已存在，跳过训练"
        return
    fi
    
    print_info "开始训练中文分词器..."
    print_info "词汇表大小: 80000"
    print_info "训练数据: ${CHINESE_DATA_DIR}"
    
    python << 'PYTHON_SCRIPT'
import os
import sys
from pathlib import Path
import pyarrow.parquet as pq
import rustbpe
import pickle
import tiktoken

# 中文分词正则
CHINESE_PATTERN = r"""[\u4e00-\u9fa5]+|[a-zA-Z]+|[0-9]+|[^\s\u4e00-\u9fa5a-zA-Z0-9]+"""

# 特殊 token
SPECIAL_TOKENS = [
    "<|bos|>",
    "<|user_start|>",
    "<|user_end|>",
    "<|assistant_start|>",
    "<|assistant_end|>",
    "<|python_start|>",
    "<|python_end|>",
    "<|output_start|>",
    "<|output_end|>",
]

def text_iterator():
    """读取中文数据"""
    data_dir = Path.home() / ".cache/nanochat/chinese_data"
    parquet_files = sorted(data_dir.glob("shard_*.parquet"))
    
    print(f"找到 {len(parquet_files)} 个数据文件")
    
    for file in parquet_files:
        print(f"读取: {file.name}")
        table = pq.read_table(file)
        texts = table.column('text').to_pylist()
        for text in texts:
            yield text

print("🔤 开始训练中文分词器...")

# 训练
tokenizer = rustbpe.Tokenizer()
vocab_size = 80000 - len(SPECIAL_TOKENS)

tokenizer.train_from_iterator(
    text_iterator(),
    vocab_size=vocab_size,
    pattern=CHINESE_PATTERN
)

print("✅ 训练完成，构建 tiktoken encoding...")

# 构建 tiktoken encoding
pattern = tokenizer.get_pattern()
mergeable_ranks_list = tokenizer.get_mergeable_ranks()
mergeable_ranks = {bytes(k): v for k, v in mergeable_ranks_list}
tokens_offset = len(mergeable_ranks)
special_tokens = {name: tokens_offset + i for i, name in enumerate(SPECIAL_TOKENS)}

enc = tiktoken.Encoding(
    name="rustbpe-chinese",
    pat_str=pattern,
    mergeable_ranks=mergeable_ranks,
    special_tokens=special_tokens,
)

# 保存
output_dir = Path.home() / ".cache/nanochat/tokenizer"
output_dir.mkdir(parents=True, exist_ok=True)
output_file = output_dir / "tokenizer.pkl"

with open(output_file, "wb") as f:
    pickle.dump(enc, f)

print(f"✅ 分词器已保存到: {output_file}")

# 测试
test_text = "今天天气真好，我们去公园玩吧！"
tokens = enc.encode_ordinary(test_text)
decoded = enc.decode(tokens)

print(f"\n测试分词:")
print(f"  原文: {test_text}")
print(f"  Token数: {len(tokens)}")
print(f"  压缩率: {len(test_text) / len(tokens):.2f} 字符/token")
print(f"  解码: {decoded}")
PYTHON_SCRIPT
    
    print_success "中文分词器训练完成"
}

# 预训练
pretrain_model() {
    print_header "预训练中文模型"
    
    print_info "模型配置:"
    print_info "  深度: d${DEPTH}"
    print_info "  批量大小: ${DEVICE_BATCH_SIZE}"
    print_info "  GPU 数量: ${NUM_GPUS}"
    
    if [ "$NUM_GPUS" -gt 1 ]; then
        print_info "使用分布式训练"
        torchrun --standalone --nproc_per_node=$NUM_GPUS \
            -m scripts.base_train -- \
            --depth=$DEPTH \
            --device_batch_size=$DEVICE_BATCH_SIZE \
            --run=$WANDB_RUN
    else
        print_info "使用单卡训练"
        python -m scripts.base_train -- \
            --depth=$DEPTH \
            --device_batch_size=$DEVICE_BATCH_SIZE \
            --run=$WANDB_RUN
    fi
    
    print_success "预训练完成"
}

# 中期训练
midtrain_model() {
    print_header "中期训练（对话格式）"
    
    print_info "下载中文对话数据..."
    
    # 下载 Belle 数据集
    python << 'PYTHON_SCRIPT'
from datasets import load_dataset
import os

print("📥 下载 Belle 中文对话数据...")
try:
    belle = load_dataset("BelleGroup/train_1M_CN", split="train[:100000]")
    
    # 转换格式
    def convert_format(example):
        instruction = example['instruction']
        input_text = example.get('input', '')
        output = example['output']
        
        if input_text:
            user_content = f"{instruction}\n\n{input_text}"
        else:
            user_content = instruction
        
        return {
            "messages": [
                {"role": "user", "content": user_content},
                {"role": "assistant", "content": output}
            ]
        }
    
    converted = belle.map(convert_format)
    
    # 保存
    output_dir = os.path.expanduser("~/.cache/nanochat/chinese_sft")
    os.makedirs(output_dir, exist_ok=True)
    converted.to_json(f"{output_dir}/belle_100k.jsonl")
    
    print(f"✅ 已保存 {len(converted)} 条对话数据")
except Exception as e:
    print(f"⚠️  下载失败: {e}")
    print("将使用预训练模型继续...")
PYTHON_SCRIPT
    
    if [ "$NUM_GPUS" -gt 1 ]; then
        torchrun --standalone --nproc_per_node=$NUM_GPUS \
            -m scripts.mid_train -- \
            --device_batch_size=$DEVICE_BATCH_SIZE \
            --run=$WANDB_RUN
    else
        python -m scripts.mid_train -- \
            --device_batch_size=$DEVICE_BATCH_SIZE \
            --run=$WANDB_RUN
    fi
    
    print_success "中期训练完成"
}

# 监督微调
finetune_model() {
    print_header "监督微调"
    
    if [ "$NUM_GPUS" -gt 1 ]; then
        torchrun --standalone --nproc_per_node=$NUM_GPUS \
            -m scripts.chat_sft -- \
            --device_batch_size=4 \
            --run=$WANDB_RUN
    else
        python -m scripts.chat_sft -- \
            --device_batch_size=4 \
            --run=$WANDB_RUN
    fi
    
    print_success "监督微调完成"
}

# 评估模型
evaluate_model() {
    print_header "评估模型"
    
    print_info "运行评估任务..."
    
    if [ "$NUM_GPUS" -gt 1 ]; then
        torchrun --standalone --nproc_per_node=$NUM_GPUS \
            -m scripts.chat_eval -- -i sft
    else
        python -m scripts.chat_eval -- -i sft
    fi
    
    print_success "评估完成"
}

# 生成报告
generate_report() {
    print_header "生成训练报告"
    
    python -m nanochat.report generate
    
    if [ -f "report.md" ]; then
        print_success "报告已生成: report.md"
        print_info "查看报告: cat report.md"
    fi
}

# 主流程
main() {
    START_TIME=$(date +%s)
    
    print_header "🇨🇳 开始训练中文 nanochat 模型"
    
    print_info "配置信息:"
    print_info "  模型深度: d${DEPTH}"
    print_info "  批量大小: ${DEVICE_BATCH_SIZE}"
    print_info "  GPU 数量: ${NUM_GPUS}"
    print_info "  WandB 运行: ${WANDB_RUN}"
    
    # 1. 检查环境
    check_environment
    
    # 2. 下载数据
    download_chinese_data
    
    # 3. 训练分词器
    train_chinese_tokenizer
    
    # 4. 预训练
    pretrain_model
    
    # 5. 中期训练
    midtrain_model
    
    # 6. 监督微调
    finetune_model
    
    # 7. 评估
    evaluate_model
    
    # 8. 生成报告
    generate_report
    
    END_TIME=$(date +%s)
    DURATION=$((END_TIME - START_TIME))
    HOURS=$((DURATION / 3600))
    MINUTES=$(((DURATION % 3600) / 60))
    
    print_header "🎉 训练完成！"
    print_success "总耗时: ${HOURS}小时 ${MINUTES}分钟"
    
    echo ""
    echo "下一步："
    echo "  1. 查看报告: cat report.md"
    echo "  2. 启动 Web 服务: python -m scripts.chat_web"
    echo "  3. 与模型对话: python -m scripts.chat_cli"
    echo ""
}

# 运行主函数
main "$@"

