# pip install torch transformers[torch] datasets

from transformers import GPT2LMHeadModel, GPT2Tokenizer, Trainer, TrainingArguments
from datasets import Dataset

# 读取数据
with open("/home/lurker/文档/playground/python/pytorch/20241114/LLM_trial/fine-tuning-data.txt", "r", encoding="utf-8") as f:
    data = f.read()

# 加载GPT-2模型和分词器
model_name = "gpt2"
model = GPT2LMHeadModel.from_pretrained(model_name)
tokenizer = GPT2Tokenizer.from_pretrained(model_name)

# 添加特殊token
tokenizer.add_special_tokens({"pad_token": "[PAD]"})
model.resize_token_embeddings(len(tokenizer))

# 创建数据集对象并分词
texts = data.split("\n")
dataset = Dataset.from_dict({"text": texts})


def tokenize_function(examples):
    return tokenizer(
        examples["text"], return_tensors="pt", truncation=True, padding=True
    )


tokenized_datasets = dataset.map(tokenize_function, batched=True)

# 设置训练参数
training_args = TrainingArguments(
    output_dir="./gpt2_finetuned",  # 模型保存路径
    num_train_epochs=3,  # 训练轮数
    per_device_train_batch_size=4,  # 每个设备的批次大小
    per_device_eval_batch_size=8,  # 每个设备的评估批次大小
    warmup_steps=500,  # 预热步数
    weight_decay=0.01,  # 权重衰减
    logging_dir="./logs",  # 日志目录
    logging_steps=10,
    save_steps=500,
    evaluation_strategy="steps",
    save_total_limit=2,
)

# 创建Trainer对象并开始训练
trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=tokenized_datasets,
    eval_dataset=tokenized_datasets,
)

trainer.train()

# 保存微调后的模型
model.save_pretrained("./gpt2_finetuned")
tokenizer.save_pretrained("./gpt2_finetuned")
