import os
import argparse
from transformers import (
    GPT2Config,
    GPT2LMHeadModel,
    GPT2Tokenizer,
    Trainer,
    TrainingArguments,
    DataCollatorForLanguageModeling,
)
from datasets import load_dataset, Dataset


def tokenize_batch(examples, tokenizer, max_length=512):
    return tokenizer(
        examples["text"],
        truncation=True,
        padding="max_length",
        max_length=max_length
    )


def get_streaming_and_eval(tokenizer, file_path="data/raw/train.txt", max_length=1024, batch_size=100, eval_sample_size=1000):
    from datasets import IterableDataset
    print(f"🔄 Loading streaming dataset from {file_path}")
    ds_stream = load_dataset("text", data_files=file_path, streaming=True, split="train")

    tokenized_stream = ds_stream.map(
        lambda examples: tokenize_batch(examples, tokenizer, max_length=max_length),
        batched=True,
        batch_size=batch_size,
        remove_columns=["text"]
    )

    print("📝 Sampling a small evaluation subset from the streaming dataset...")
    eval_samples = []
    for i, sample in enumerate(tokenized_stream):
        eval_samples.append(sample)
        if i + 1 >= eval_sample_size:
            break
    eval_dataset = Dataset.from_list(eval_samples)

    return tokenized_stream, eval_dataset


def main():
    # --- 命令行参数 ---
    parser = argparse.ArgumentParser(description="GPT-2 streaming training with multi-GPU support")
    parser.add_argument("--resume", action="store_true", help="Resume training from the last checkpoint if available")
    parser.add_argument("--lr", type=float, default=5e-5, help="Learning rate (default: 5e-4)")
    args = parser.parse_args()

    # --- 1. 加载 tokenizer ---
    tokenizer_name = "gpt2"
    print(f"🔍 Loading tokenizer: {tokenizer_name}")
    tokenizer = GPT2Tokenizer.from_pretrained("gpt2_tokenizer")
    tokenizer.pad_token = tokenizer.eos_token

    # --- 2. 自定义 GPT-2 配置 ---
    config = GPT2Config(
        vocab_size=len(tokenizer),
        n_positions=1024,
        n_ctx=1024,
        n_embd=768,
        n_layer=12,
        n_head=12,
        bos_token_id=tokenizer.bos_token_id if tokenizer.bos_token_id is not None else tokenizer.eos_token_id,
        eos_token_id=tokenizer.eos_token_id,
    )
    model = GPT2LMHeadModel(config)

    # --- 3. 数据集 ---
    train_dataset, eval_dataset = get_streaming_and_eval(
        tokenizer,
        file_path="data/combined.txt",
        max_length=512,
        batch_size=1000,
        eval_sample_size=1000
    )

    # --- 4. Collator ---
    data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)

    # --- 5. 训练参数 ---
    training_args = TrainingArguments(
        output_dir="outputs/gpt2",
        num_train_epochs=3,
        per_device_train_batch_size=20,
        per_device_eval_batch_size=20,
        eval_strategy="steps",
        eval_steps=1000,
        save_steps=1000,
        logging_steps=100,
        fp16=False,  # 暂时关闭，之后可以在稳定训练后重新打开
        save_total_limit=2,
        remove_unused_columns=False,
        report_to=["tensorboard"],
        logging_dir="logs",
        max_steps=10000000,
        warmup_steps=2000,
        max_grad_norm=1.0,
        dataloader_num_workers=4,
        weight_decay=0.01,
        learning_rate=args.lr,  # 默认你运行脚本时加上 --lr 5e-5
        lr_scheduler_type="cosine",  # 可选：'linear', 'cosine'
        ddp_find_unused_parameters=True,
    )
    # --- 6. Trainer ---
    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=train_dataset,
        eval_dataset=eval_dataset,
        data_collator=data_collator,
    )

    # --- 7. 检查 checkpoint ---
    print("🚀 Starting training in streaming mode...")
    resume_checkpoint = None

    if args.resume:
        checkpoint_dir = training_args.output_dir
        if os.path.isdir(checkpoint_dir):
            checkpoints = [os.path.join(checkpoint_dir, d) for d in os.listdir(checkpoint_dir) if d.startswith("checkpoint-")]
            if checkpoints:
                resume_checkpoint = max(checkpoints, key=os.path.getmtime)
                print(f"🔁 Resuming from checkpoint: {resume_checkpoint}")

    trainer.train(resume_from_checkpoint=resume_checkpoint)

    # --- 8. 保存最终模型 ---
    trainer.save_model("outputs/gpt2-streaming/final")
    print("✅ Training complete. Final model saved to outputs/gpt2-streaming/final")


if __name__ == "__main__":
    main()

