#!/usr/bin/env python3
"""
数据集下载和处理脚本
支持多种常用的中文微调数据集
支持自动路径识别和配置驱动
"""

import os
import argparse
import sys
from pathlib import Path
from datasets import load_dataset, Dataset
import json

# 添加当前脚本目录到Python路径，以便导入config模块
script_dir = Path(__file__).parent.resolve()
if str(script_dir) not in sys.path:
    sys.path.insert(0, str(script_dir))

import config

def download_chinese_qwen_sft(save_path="./data/chinese-qwen-sft", num_samples=None):
    """下载Chinese Qwen SFT数据集（推荐）"""
    print("下载 Chinese Qwen SFT 数据集...")
    
    from modelscope.msdatasets import MsDataset
    
    # 根据样本数量决定加载的数据量
    if num_samples:
        split = f'train[:{num_samples}]'
    else:
        split = 'train'
    
    dataset = MsDataset.load('swift/Chinese-Qwen3-235B-2507-Distill-data-110k-SFT', split=split)
    
    # 转换为标准datasets格式
    data_list = []
    for item in dataset:
        # 数据格式转换：将messages格式转换为标准格式
        if 'messages' in item:
            messages = item['messages']
            if len(messages) >= 2:
                user_msg = messages[0]['content']
                assistant_msg = messages[1]['content']
                
                data_list.append({
                    'instruction': user_msg,
                    'output': assistant_msg,
                    'input': ''
                })
    
    dataset = Dataset.from_list(data_list)
    
    os.makedirs(os.path.dirname(save_path), exist_ok=True)
    dataset.save_to_disk(save_path)
    
    print(f"数据集保存到: {save_path}")
    print(f"数据集大小: {len(dataset)}")
    
    # 显示样本
    print("\n数据样本:")
    for i in range(min(3, len(dataset))):
        sample = dataset[i]
        print(f"样本 {i+1}:")
        instruction = sample['instruction'][:100] + "..." if len(sample['instruction']) > 100 else sample['instruction']
        output = sample['output'][:100] + "..." if len(sample['output']) > 100 else sample['output']
        print(f"  指令: {instruction}")
        print(f"  输出: {output}")
    
    return save_path

def download_chinese_alpaca(save_path="./data/chinese-alpaca", num_samples=None):
    """下载Chinese Alpaca数据集"""
    print("下载 Chinese Alpaca 数据集...")
    
    dataset = load_dataset("silk-road/alpaca-data-gpt4-chinese", split="train")
    
    if num_samples:
        dataset = dataset.select(range(min(num_samples, len(dataset))))
    
    os.makedirs(os.path.dirname(save_path), exist_ok=True)
    dataset.save_to_disk(save_path)
    
    print(f"数据集保存到: {save_path}")
    print(f"数据集大小: {len(dataset)}")
    
    # 显示样本
    print("\n数据样本:")
    for i in range(min(3, len(dataset))):
        print(f"样本 {i+1}:")
        print(f"  指令: {dataset[i]['instruction'][:100]}...")
        print(f"  输出: {dataset[i]['output'][:100]}...")
    
    return save_path

def download_belle(save_path="./data/belle", num_samples=10000):
    """下载Belle数据集"""
    print("下载 Belle 数据集...")
    
    dataset = load_dataset("BelleGroup/train_2M_CN", split="train")
    
    if num_samples:
        dataset = dataset.select(range(min(num_samples, len(dataset))))
    
    os.makedirs(os.path.dirname(save_path), exist_ok=True)
    dataset.save_to_disk(save_path)
    
    print(f"数据集保存到: {save_path}")
    print(f"数据集大小: {len(dataset)}")
    
    # 显示样本
    print("\n数据样本:")
    for i in range(min(3, len(dataset))):
        print(f"样本 {i+1}:")
        print(f"  指令: {dataset[i]['instruction'][:100]}...")
        print(f"  输出: {dataset[i]['output'][:100]}...")
    
    return save_path

def download_firefly(save_path="./data/firefly", num_samples=10000):
    """下载Firefly数据集"""
    print("下载 Firefly 数据集...")
    
    dataset = load_dataset("YeungNLP/firefly-train-1.1M", split="train")
    
    if num_samples:
        dataset = dataset.select(range(min(num_samples, len(dataset))))
    
    os.makedirs(os.path.dirname(save_path), exist_ok=True)
    dataset.save_to_disk(save_path)
    
    print(f"数据集保存到: {save_path}")
    print(f"数据集大小: {len(dataset)}")
    
    # 显示样本
    print("\n数据样本:")
    for i in range(min(3, len(dataset))):
        print(f"样本 {i+1}:")
        print(f"  类型: {dataset[i]['kind']}")
        print(f"  输入: {dataset[i]['input'][:100]}...")
        print(f"  目标: {dataset[i]['target'][:100]}...")
    
    return save_path

def create_custom_dataset(data_file, save_path="./data/custom"):
    """从JSON文件创建自定义数据集"""
    print(f"从 {data_file} 创建自定义数据集...")
    
    with open(data_file, 'r', encoding='utf-8') as f:
        data = json.load(f)
    
    # 验证数据格式
    required_keys = ['instruction', 'output']
    for i, item in enumerate(data):
        for key in required_keys:
            if key not in item:
                raise ValueError(f"数据项 {i} 缺少必需字段: {key}")
    
    from datasets import Dataset
    dataset = Dataset.from_list(data)
    
    os.makedirs(os.path.dirname(save_path), exist_ok=True)
    dataset.save_to_disk(save_path)
    
    print(f"数据集保存到: {save_path}")
    print(f"数据集大小: {len(dataset)}")
    
    return save_path

def format_dataset_preview(dataset_path):
    """预览数据集格式"""
    from datasets import load_from_disk
    
    dataset = load_from_disk(dataset_path)
    
    print(f"数据集路径: {dataset_path}")
    print(f"数据集大小: {len(dataset)}")
    print(f"数据集字段: {dataset.column_names}")
    
    print("\n前3个样本:")
    for i in range(min(3, len(dataset))):
        print(f"\n样本 {i+1}:")
        for key, value in dataset[i].items():
            if isinstance(value, str) and len(value) > 100:
                print(f"  {key}: {value[:100]}...")
            else:
                print(f"  {key}: {value}")

def convert_to_qwen_format(dataset_path, output_path=None):
    """将数据集转换为Qwen格式"""
    from datasets import load_from_disk
    
    if output_path is None:
        output_path = dataset_path + "_qwen_format"
    
    dataset = load_from_disk(dataset_path)
    
    def format_for_qwen(examples):
        texts = []
        
        for i in range(len(examples["instruction"])):
            instruction = examples["instruction"][i]
            
            # 处理不同的输入字段名
            input_text = ""
            if "input" in examples and examples["input"][i]:
                input_text = examples["input"][i]
            elif "context" in examples and examples["context"][i]:
                input_text = examples["context"][i]
            
            output = examples["output"][i] if "output" in examples else examples["target"][i]
            
            if input_text and input_text.strip():
                text = f"<|im_start|>user\n{instruction}\n{input_text}<|im_end|>\n<|im_start|>assistant\n{output}<|im_end|>"
            else:
                text = f"<|im_start|>user\n{instruction}<|im_end|>\n<|im_start|>assistant\n{output}<|im_end|>"
            
            texts.append(text)
        
        return {"text": texts}
    
    formatted_dataset = dataset.map(
        format_for_qwen, 
        batched=True, 
        remove_columns=dataset.column_names
    )
    
    os.makedirs(os.path.dirname(output_path), exist_ok=True)
    formatted_dataset.save_to_disk(output_path)
    
    print(f"格式化后的数据集保存到: {output_path}")
    print(f"数据集大小: {len(formatted_dataset)}")
    
    # 显示格式化后的样本
    print("\n格式化后的样本:")
    for i in range(min(2, len(formatted_dataset))):
        print(f"\n样本 {i+1}:")
        print(formatted_dataset[i]['text'][:300] + "...")
    
    return output_path

def main():
    parser = argparse.ArgumentParser(description="数据集下载和处理脚本")
    parser.add_argument("--dataset", choices=["qwen-sft", "alpaca", "belle", "firefly", "custom"], 
                       required=True, help="要下载的数据集")
    parser.add_argument("--save_path", help="保存路径")
    parser.add_argument("--num_samples", type=int, help="采样数量")
    parser.add_argument("--data_file", help="自定义数据集的JSON文件路径")
    parser.add_argument("--preview", help="预览指定路径的数据集")
    parser.add_argument("--convert_to_qwen", help="将数据集转换为Qwen格式")
    
    args = parser.parse_args()
    
    # 预览数据集
    if args.preview:
        format_dataset_preview(args.preview)
        return
    
    # 转换为Qwen格式
    if args.convert_to_qwen:
        convert_to_qwen_format(args.convert_to_qwen)
        return
    
    # 下载数据集
    if args.dataset == "qwen-sft":
        save_path = args.save_path or "./data/chinese-qwen-sft"
        download_chinese_qwen_sft(save_path, args.num_samples)
    
    elif args.dataset == "alpaca":
        save_path = args.save_path or "./data/chinese-alpaca"
        download_chinese_alpaca(save_path, args.num_samples)
    
    elif args.dataset == "belle":
        save_path = args.save_path or "./data/belle"
        download_belle(save_path, args.num_samples)
    
    elif args.dataset == "firefly":
        save_path = args.save_path or "./data/firefly"
        download_firefly(save_path, args.num_samples)
    
    elif args.dataset == "custom":
        if not args.data_file:
            print("错误: 使用自定义数据集需要指定 --data_file")
            return
        save_path = args.save_path or "./data/custom"
        create_custom_dataset(args.data_file, save_path)

if __name__ == "__main__":
    main()
