from transformers import AutoTokenizer, AutoModelForQuestionAnswering, Trainer, TrainingArguments

from load_squad_style_dataset import load_squad_style_dataset

# 引入你刚刚实现的函数

# 1. 加载分词器和模型
model_name = 'hfl/chinese-bert-wwm-ext'
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForQuestionAnswering.from_pretrained(model_name)

# 2. 定义数据文件路径并调用加载函数
train_file = './train_flat.jsonl'
eval_file = './eval_flat.jsonl'
train_dataset, eval_dataset = load_squad_style_dataset(tokenizer, train_file, eval_file)

# 3. 设置训练参数 (与之前相同)
training_args = TrainingArguments(
    output_dir='./results',
    num_train_epochs=3,
    per_device_train_batch_size=8,
    per_device_eval_batch_size=8,
    warmup_steps=500,
    weight_decay=0.01,
    logging_dir='./logs',
)

# 4. 初始化并开始训练 (与之前相同)
trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=train_dataset,
    eval_dataset=eval_dataset,
)

print("开始训练...")
trainer.train()
print("训练完成！")

# 5. 保存微调好的模型 (与之前相同)
model.save_pretrained("./my_professional_qa_model")
tokenizer.save_pretrained("./my_professional_qa_model")