Datasets:
Size:
10K<n<100K
License:
import json | |
from datasets import Dataset | |
from sklearn.model_selection import train_test_split | |
from transformers import ( | |
T5Tokenizer, | |
T5ForConditionalGeneration, | |
TrainingArguments, | |
Trainer | |
) | |
def load_squad_data(file_path): | |
with open(file_path, "r", encoding="utf-8") as f: | |
squad_data = json.load(f) | |
data = [] | |
for article in squad_data["data"]: | |
for paragraph in article["paragraphs"]: | |
context = paragraph.get("context", "") | |
for qa in paragraph["qas"]: | |
if not qa.get("is_impossible", False) and qa.get("answers"): | |
answer = qa["answers"][0]["text"] | |
question = qa["question"] | |
input_text = f"answer: {answer} context: {context}" | |
data.append({"input": input_text, "target": question}) | |
return data | |
def preprocess_function(example, tokenizer, max_input_length=512, max_target_length=64): | |
model_inputs = tokenizer( | |
example["input"], | |
max_length=max_input_length, | |
padding="max_length", | |
truncation=True, | |
) | |
labels = tokenizer( | |
text_target=example["target"], | |
max_length=max_target_length, | |
padding="max_length", | |
truncation=True, | |
) | |
model_inputs["labels"] = labels["input_ids"] | |
return model_inputs | |
def main(): | |
data_path = "30ktrain.json" | |
output_dir = "t5-viet-qg-finetuned" | |
logs_dir = "logs" | |
model_name = "VietAI/vit5-base" | |
print("Tải mô hình và tokenizer...") | |
tokenizer = T5Tokenizer.from_pretrained(model_name) | |
model = T5ForConditionalGeneration.from_pretrained(model_name) | |
print("Đọc và chia dữ liệu...") | |
raw_data = load_squad_data(data_path) | |
train_data, val_data = train_test_split(raw_data, test_size=0.2, random_state=42) | |
train_dataset = Dataset.from_list(train_data) | |
val_dataset = Dataset.from_list(val_data) | |
tokenized_train = train_dataset.map( | |
lambda x: preprocess_function(x, tokenizer), | |
batched=True, | |
remove_columns=["input", "target"] | |
) | |
tokenized_val = val_dataset.map( | |
lambda x: preprocess_function(x, tokenizer), | |
batched=True, | |
remove_columns=["input", "target"] | |
) | |
print("Cấu hình huấn luyện...") | |
training_args = TrainingArguments( | |
output_dir=output_dir, | |
overwrite_output_dir=True, | |
per_device_train_batch_size=1, | |
gradient_accumulation_steps=1, | |
num_train_epochs=3, | |
learning_rate=2e-4, | |
weight_decay=0.01, | |
warmup_steps=0, | |
logging_dir=logs_dir, | |
logging_steps=10, | |
fp16=False | |
) | |
print("Huấn luyện mô hình...") | |
trainer = Trainer( | |
model=model, | |
args=training_args, | |
train_dataset=tokenized_train, | |
eval_dataset=tokenized_val, | |
tokenizer=tokenizer, | |
) | |
trainer.train() | |
print("Lưu mô hình...") | |
model.save_pretrained(output_dir) | |
tokenizer.save_pretrained(output_dir) | |
print("Huấn luyện hoàn tất!") | |
if __name__ == "__main__": | |
main() | |