# scripts/download_data.py
from datasets import load_dataset, interleave_datasets
import os
import re

# ========================
# 配置
# ========================
OUTPUT_DIR = "../data"
os.makedirs(OUTPUT_DIR, exist_ok=True)

os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'
os.environ['HF_DATASETS_TRUST_REMOTE_CODE'] = '1'
os.environ['HF_DATASETS_CACHE'] = './hf_cache'

TEXT_FILE = os.path.join(OUTPUT_DIR, "pretrain_corpus.txt")
MAX_LINES = 500_000
MIN_WORDS = 50  # 至少 50 个单词才采用


def count_words(text):
    return len(text.strip().split())

def clean_text_for_pretraining(text):
    """
    清理文本：将文档内部的所有换行、制表符等替换为单个空格
    并压缩多余空白，保留文档完整性。
    不会影响文档间的分隔（由外部控制）。
    """
    # 将所有换行符、制表符等替换为空格
    text = re.sub(r'[\r\n\t]+', ' ', text)
    # 压缩多个空格为一个
    text = re.sub(r' +', ' ', text)
    # 去头尾空白
    return text.strip()


# ========================
# 数据集配置
# ========================
DATASETS_CONFIG = [
    ("allenai/c4", "en", "train", "text", 0.6),
    ("HuggingFaceTB/cosmopedia", "web_samples_v2", "train", "text", 0.4),
]

print("🚀 正在加载多个高质量英文语料库...")

datasets_list = []
probabilities = []

for ds_name, subset, split, text_key, prob in DATASETS_CONFIG:
    try:
        print(f"🔍 加载 {ds_name} ({subset})...")
        ds = load_dataset(
            ds_name,
            subset,
            split=split,
            streaming=True,
            trust_remote_code=False,
        )

        # 统一字段为 'text'
        ds = ds.map(lambda x: {"text": x[text_key]})

        datasets_list.append(ds)
        probabilities.append(prob)

    except Exception as e:
        print(f"❌ 无法加载 {ds_name}: {e}")
        continue

if len(datasets_list) == 0:
    raise ValueError("❌ 没有成功加载任何数据集，请检查网络或镜像。")

# 混合数据集
print(f"🔀 混合 {len(datasets_list)} 个数据集...")
mixed_dataset = interleave_datasets(
    datasets_list,
    probabilities=probabilities,
    seed=42,
    stopping_strategy="all_exhausted"
)

# ========================
# 写入文件（关键：清理文档内换行，保留文档间分隔）
# ========================
print(f"📝 正在写入文本到 {TEXT_FILE} ...")

count = 0
with open(TEXT_FILE, "w", encoding="utf-8") as f:
    for example in mixed_dataset:
        try:
            text = example["text"]

            # ✅ 清理：只处理当前文档内部的换行
            cleaned_text = clean_text_for_pretraining(text)

            # 过滤：至少 50 个词
            if count_words(cleaned_text) < MIN_WORDS:
                continue

            # ✅ 写入：文档之间用 \n\n 分隔
            if count > 0:
                f.write("\n\n")
            f.write(cleaned_text)
            count += 1

            if count % 5_000 == 0:
                print(f"✅ 已保存 {count} 段文本")

            if count >= MAX_LINES:
                print(f"🎉 已达到 {MAX_LINES} 条，停止。")
                break

        except Exception as e:
            # 安静跳过损坏数据
            continue

print(f"🎉 数据下载完成！共 {count} 段文本")
print(f"📁 保存路径: {TEXT_FILE}")