DANGDOCAO commited on
Commit
c7cd490
·
verified ·
1 Parent(s): 145c152

Delete HVU_QA/fine_tune_qg.py

Browse files
Files changed (1) hide show
  1. HVU_QA/fine_tune_qg.py +0 -125
HVU_QA/fine_tune_qg.py DELETED
@@ -1,125 +0,0 @@
1
- import os, json
2
- from datasets import Dataset
3
- from sklearn.model_selection import train_test_split
4
- from transformers import T5Tokenizer, T5ForConditionalGeneration, TrainingArguments, Trainer, DataCollatorForSeq2Seq
5
-
6
-
7
- def load_squad(path: str):
8
- with open(path, "r", encoding="utf-8") as f:
9
- d = json.load(f)
10
-
11
- data = []
12
- for a in d.get("data", []):
13
- for p in a.get("paragraphs", []):
14
- ctx = p.get("context", "")
15
- for qa in p.get("qas", []):
16
- if qa.get("is_impossible") or not qa.get("answers"):
17
- continue
18
- ans = qa["answers"][0].get("text", "")
19
- q = qa.get("question", "")
20
- if ans and q and ctx:
21
- data.append({"input": f"answer: {ans} context: {ctx}", "target": q})
22
- return data
23
-
24
-
25
- def tokenize(batch, tok, max_in=512, max_out=64):
26
- x = tok(batch["input"], max_length=max_in, truncation=True)
27
- y = tok(text_target=batch["target"], max_length=max_out, truncation=True)
28
- x["labels"] = y["input_ids"]
29
- return x
30
-
31
-
32
- def latest_ckpt(out_dir: str):
33
- if not os.path.isdir(out_dir):
34
- return None
35
-
36
- best_step, best_path = -1, None
37
- for name in os.listdir(out_dir):
38
- if not name.startswith("checkpoint-"):
39
- continue
40
- try:
41
- step = int(name.split("-")[-1])
42
- except ValueError:
43
- continue
44
- if step > best_step:
45
- best_step, best_path = step, os.path.join(out_dir, name)
46
-
47
- return best_path
48
-
49
-
50
- def main():
51
- data_path = "39k_train.json"
52
- out_dir = "t5-viet-qg-finetuned"
53
- logs_dir = "logs"
54
- model_name = "VietAI/vit5-base"
55
-
56
- print("Tải mô hình và tokenizer...")
57
- tok = T5Tokenizer.from_pretrained(model_name)
58
- model = T5ForConditionalGeneration.from_pretrained(model_name)
59
-
60
- print("Đọc và chia dữ liệu...")
61
- data = load_squad(data_path)
62
- tr, va = train_test_split(data, test_size=0.2, random_state=42)
63
-
64
- print("Tokenize dữ liệu...")
65
- tr_ds = Dataset.from_list(tr).map(
66
- lambda b: tokenize(b, tok),
67
- batched=True,
68
- remove_columns=["input", "target"],
69
- )
70
- va_ds = Dataset.from_list(va).map(
71
- lambda b: tokenize(b, tok),
72
- batched=True,
73
- remove_columns=["input", "target"],
74
- )
75
-
76
- print("Cấu hình huấn luyện (checkpoint + resume)...")
77
- args = TrainingArguments(
78
- output_dir=out_dir,
79
- overwrite_output_dir=False,
80
- per_device_train_batch_size=1,
81
- gradient_accumulation_steps=1,
82
- num_train_epochs=3,
83
- learning_rate=2e-4,
84
- weight_decay=0.01,
85
- warmup_steps=0,
86
- save_strategy="steps",
87
- save_steps=500,
88
- save_total_limit=100,
89
- eval_strategy="steps",
90
- eval_steps=500,
91
- load_best_model_at_end=True,
92
- metric_for_best_model="eval_loss",
93
- greater_is_better=False,
94
- logging_dir=logs_dir,
95
- logging_steps=10,
96
- fp16=True,
97
- report_to="none",
98
- )
99
-
100
- trainer = Trainer(
101
- model=model,
102
- args=args,
103
- train_dataset=tr_ds,
104
- eval_dataset=va_ds,
105
- tokenizer=tok,
106
- data_collator=DataCollatorForSeq2Seq(tokenizer=tok, model=model),
107
- )
108
-
109
- ckpt = latest_ckpt(out_dir)
110
- if ckpt:
111
- print(f"Phát hiện checkpoint: {ckpt} → Resume training")
112
- trainer.train(resume_from_checkpoint=ckpt)
113
- else:
114
- print("Không có checkpoint → Train từ đầu")
115
- trainer.train()
116
-
117
- print("Lưu mô hình cuối cùng...")
118
- trainer.save_model(out_dir)
119
- tok.save_pretrained(out_dir)
120
-
121
- print("Huấn luyện hoàn tất!")
122
-
123
-
124
- if __name__ == "__main__":
125
- main()