# scripts/tokenize_data.py
import os
from transformers import GPT2Tokenizer
from datasets import Dataset
import argparse

def line_generator(path):
    """逐行生成器，适配大文件"""
    with open(path, "r", encoding="utf-8") as f:
        for line in f:
            line = line.strip()
            if line:
                yield {"text": line}

def tokenize_and_save(input_path, output_path, tokenizer_name="gpt2"):
    print(f"🔍 Loading tokenizer: {tokenizer_name}")
    tokenizer = GPT2Tokenizer.from_pretrained(tokenizer_name)
    tokenizer.pad_token = tokenizer.eos_token

    print(f"📖 Creating streaming dataset from {input_path}")
    dataset = Dataset.from_generator(lambda: line_generator(input_path))

    print("✂️ Tokenizing in batches...")
    tokenized_dataset = dataset.map(
        lambda e: tokenizer(e["text"], truncation=True, padding="max_length", max_length=512),
        batched=True,
        batch_size=512,  # 每次处理 512 行文本
    )

    os.makedirs(os.path.dirname(output_path), exist_ok=True)
    print(f"💾 Saving tokenized dataset to {output_path}")
    tokenized_dataset.save_to_disk(output_path)

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--input", default="data/raw/train.txt")
    parser.add_argument("--output", default="data/tokenized/tokenized.arrow")
    parser.add_argument("--tokenizer", default="gpt2", help="Tokenizer name or path")
    args = parser.parse_args()

    tokenize_and_save(args.input, args.output, args.tokenizer)
