DANGDOCAO commited on
Commit
2870377
·
verified ·
1 Parent(s): 7311c7c
Files changed (1) hide show
  1. HVU_QA/fine_tune_qg.py +0 -556
HVU_QA/fine_tune_qg.py DELETED
@@ -1,556 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import argparse
4
- import json
5
- import os
6
- import subprocess
7
- import sys
8
- from importlib import metadata
9
- from inspect import signature
10
- from pathlib import Path
11
- from typing import Any
12
-
13
- os.environ.setdefault("TOKENIZERS_PARALLELISM", "false")
14
- os.environ.setdefault("PYTORCH_CUDA_ALLOC_CONF", "expandable_segments:True")
15
-
16
-
17
- def raise_missing_dependency_error(exc: ModuleNotFoundError) -> None:
18
- root = Path(__file__).resolve().parent
19
- script = Path(__file__).resolve()
20
- requirements = root / "requirements.txt"
21
- venv_python = root / "venv" / ("Scripts/python.exe" if os.name == "nt" else "bin/python")
22
- lines = [f"Thiếu thư viện Python: {exc.name}", f"Interpreter hiện tại: {sys.executable}"]
23
- if venv_python.exists():
24
- lines.extend([f"Venv của project: {venv_python}", f"Chạy lại bằng: {venv_python} {script}"])
25
- if requirements.exists():
26
- lines.extend(
27
- [
28
- "Hoặc cài dependencies cho interpreter hiện tại bằng:",
29
- f"{sys.executable} -m pip install -r {requirements}",
30
- ]
31
- )
32
- raise SystemExit("\n".join(lines)) from exc
33
-
34
-
35
- try:
36
- import torch
37
- from datasets import Dataset
38
- from transformers import (
39
- AutoModelForSeq2SeqLM,
40
- AutoTokenizer,
41
- DataCollatorForSeq2Seq,
42
- EarlyStoppingCallback,
43
- Seq2SeqTrainer,
44
- Seq2SeqTrainingArguments,
45
- set_seed,
46
- )
47
- from transformers.trainer_utils import get_last_checkpoint
48
- except ModuleNotFoundError as exc:
49
- raise_missing_dependency_error(exc)
50
-
51
-
52
- def normalize_text(text: Any) -> str:
53
- return " ".join(str(text or "").split())
54
-
55
-
56
- def dedupe(items) -> list[str]:
57
- seen, output = set(), []
58
- for item in items:
59
- if item and item not in seen:
60
- seen.add(item)
61
- output.append(item)
62
- return output
63
-
64
-
65
- def save_json(data: dict[str, Any], path: Path) -> None:
66
- path.write_text(json.dumps(data, ensure_ascii=False, indent=2), encoding="utf-8")
67
-
68
-
69
- def get_installed_version(package_name: str) -> tuple[int, ...]:
70
- try:
71
- version = metadata.version(package_name)
72
- except metadata.PackageNotFoundError:
73
- return ()
74
-
75
- parts = []
76
- for chunk in version.replace("-", ".").split("."):
77
- digits = "".join(ch for ch in chunk if ch.isdigit())
78
- if not digits:
79
- break
80
- parts.append(int(digits))
81
- return tuple(parts)
82
-
83
-
84
- def supports_data_seed() -> bool:
85
- return get_installed_version("accelerate") >= (1, 1, 0)
86
-
87
-
88
- def run_nvidia_smi(query: str) -> list[list[str]]:
89
- try:
90
- result = subprocess.run(
91
- ["nvidia-smi", f"--query-{query}", "--format=csv,noheader,nounits"],
92
- check=True,
93
- capture_output=True,
94
- text=True,
95
- )
96
- except (FileNotFoundError, subprocess.CalledProcessError):
97
- return []
98
-
99
- return [
100
- [part.strip() for part in line.split(",")]
101
- for line in result.stdout.strip().splitlines()
102
- if line.strip()
103
- ]
104
-
105
-
106
- def query_gpu_memory():
107
- rows = run_nvidia_smi("gpu=memory.total,memory.used,memory.free")
108
- if not rows or len(rows[0]) < 3:
109
- return None
110
- try:
111
- total_mb, used_mb, free_mb = (int(value) for value in rows[0][:3])
112
- except ValueError:
113
- return None
114
- return {"total_mb": total_mb, "used_mb": used_mb, "free_mb": free_mb}
115
-
116
-
117
- def query_gpu_processes() -> list[dict[str, Any]]:
118
- processes = []
119
- for row in run_nvidia_smi("compute-apps=pid,process_name,used_memory"):
120
- if len(row) != 3:
121
- continue
122
- try:
123
- pid = int(row[0])
124
- used_memory_mb = int(row[2])
125
- except ValueError:
126
- continue
127
- processes.append({"pid": pid, "process_name": row[1], "used_memory_mb": used_memory_mb})
128
- return processes
129
-
130
-
131
- def format_memory_mb(memory_mb: int) -> str:
132
- return f"{memory_mb} MiB ({memory_mb / 1024:.2f} GiB)"
133
-
134
-
135
- def active_gpu_processes() -> list[dict[str, Any]]:
136
- current_pid = os.getpid()
137
- return sorted(
138
- [proc for proc in query_gpu_processes() if proc["pid"] != current_pid and proc["used_memory_mb"] > 0],
139
- key=lambda item: item["used_memory_mb"],
140
- reverse=True,
141
- )
142
-
143
-
144
- def append_process_lines(lines: list[str], header: str, processes: list[dict[str, Any]]) -> None:
145
- if not processes:
146
- return
147
- lines.append(header)
148
- lines.extend(
149
- f"- PID {proc['pid']} | {proc['process_name']} | {format_memory_mb(proc['used_memory_mb'])}"
150
- for proc in processes
151
- )
152
-
153
-
154
- def ensure_device_ready(args) -> None:
155
- if args.device == "cpu":
156
- return
157
- if not torch.cuda.is_available():
158
- if args.device == "cuda":
159
- raise SystemExit("Bạn đã chọn --device cuda nhưng môi trường hiện tại không có CUDA.")
160
- return
161
- if args.skip_gpu_preflight:
162
- return
163
-
164
- gpu_memory = query_gpu_memory()
165
- if gpu_memory is None or gpu_memory["free_mb"] >= args.min_free_gpu_mb:
166
- return
167
-
168
- lines = [
169
- "GPU không đủ bộ nhớ để bắt đầu train ổn định.",
170
- f"GPU free: {format_memory_mb(gpu_memory['free_mb'])} / total: {format_memory_mb(gpu_memory['total_mb'])}.",
171
- f"Ngưỡng tối thiểu hiện tại: {format_memory_mb(args.min_free_gpu_mb)}.",
172
- ]
173
- append_process_lines(lines, "Các tiến trình CUDA đang chiếm GPU:", active_gpu_processes())
174
- lines.extend(
175
- [
176
- "Cách xử lý:",
177
- "- Giải phóng tiến trình đang chiếm GPU rồi chạy lại.",
178
- "- Hoặc train trên CPU bằng `python fine_tune_qg.py --device cpu`.",
179
- "- Nếu GPU đã rảnh mà vẫn thiếu VRAM, thử `--per_device_train_batch_size 1 --per_device_eval_batch_size 1 --gradient_accumulation_steps 16 --gradient_checkpointing`.",
180
- "- Nếu bạn vẫn muốn thử trên GPU hiện tại, thêm `--skip_gpu_preflight` để bỏ qua kiểm tra này.",
181
- ]
182
- )
183
- raise SystemExit("\n".join(lines))
184
-
185
-
186
- def raise_cuda_oom(args) -> None:
187
- gpu_memory = query_gpu_memory()
188
- lines = ["Train thất bại do CUDA out of memory."]
189
- if gpu_memory is not None:
190
- lines.append(
191
- f"VRAM hiện tại: free {format_memory_mb(gpu_memory['free_mb'])}, used {format_memory_mb(gpu_memory['used_mb'])}, total {format_memory_mb(gpu_memory['total_mb'])}."
192
- )
193
- append_process_lines(lines, "Các tiến trình khác đang dùng GPU:", active_gpu_processes())
194
- lines.extend(
195
- [
196
- "Gợi ý:",
197
- "- Dừng tiến trình CUDA khác rồi chạy lại.",
198
- f"- Hoặc chạy trên CPU: python fine_tune_qg.py --device cpu --output_dir {args.output_dir}-cpu",
199
- "- Khi GPU rảnh, nếu vẫn thiếu VRAM, giảm batch: `--per_device_train_batch_size 1 --per_device_eval_batch_size 1 --gradient_accumulation_steps 16 --gradient_checkpointing`.",
200
- ]
201
- )
202
- raise SystemExit("\n".join(lines))
203
-
204
-
205
- def build_source(title: str, context: str, answer: str, task_prefix: str) -> str:
206
- parts = [f"{task_prefix}:"]
207
- if title:
208
- parts.append(f"tiêu đề: {title}")
209
- parts.extend((f"ngữ cảnh: {context}", f"đáp án: {answer}"))
210
- return "\n".join(parts)
211
-
212
-
213
- def load_squad_qg_examples(
214
- file_path: str,
215
- use_all_answers: bool = True,
216
- task_prefix: str = "sinh câu hỏi",
217
- require_answer_in_context: bool = False,
218
- ) -> tuple[list[dict[str, str]], dict[str, int]]:
219
- data = json.loads(Path(file_path).read_text(encoding="utf-8"))
220
- examples = []
221
- stats = {
222
- "articles": 0,
223
- "paragraphs": 0,
224
- "qas": 0,
225
- "examples": 0,
226
- "skipped_impossible": 0,
227
- "skipped_no_context": 0,
228
- "skipped_no_question": 0,
229
- "skipped_no_answers": 0,
230
- "skipped_answer_not_in_context": 0,
231
- "answers_not_in_context_but_kept": 0,
232
- }
233
-
234
- for article in data.get("data", []):
235
- stats["articles"] += 1
236
- title = normalize_text(article.get("title"))
237
- for paragraph in article.get("paragraphs", []):
238
- stats["paragraphs"] += 1
239
- context = normalize_text(paragraph.get("context"))
240
- if not context:
241
- stats["skipped_no_context"] += 1
242
- continue
243
-
244
- for qa in paragraph.get("qas", []):
245
- stats["qas"] += 1
246
- question = normalize_text(qa.get("question"))
247
- if qa.get("is_impossible", False):
248
- stats["skipped_impossible"] += 1
249
- continue
250
- if not question:
251
- stats["skipped_no_question"] += 1
252
- continue
253
-
254
- answers = dedupe(normalize_text(answer.get("text")) for answer in qa.get("answers", []))
255
- if not answers:
256
- stats["skipped_no_answers"] += 1
257
- continue
258
- if not use_all_answers:
259
- answers = answers[:1]
260
-
261
- for answer in answers:
262
- in_context = answer in context
263
- if require_answer_in_context and not in_context:
264
- stats["skipped_answer_not_in_context"] += 1
265
- continue
266
- if not in_context:
267
- stats["answers_not_in_context_but_kept"] += 1
268
-
269
- examples.append(
270
- {
271
- "source": build_source(title, context, answer, task_prefix),
272
- "target": question,
273
- }
274
- )
275
- stats["examples"] += 1
276
-
277
- return examples, stats
278
-
279
-
280
- def preprocess_function(batch, tokenizer, max_source_length: int, max_target_length: int) -> dict[str, Any]:
281
- model_inputs = tokenizer(batch["source"], max_length=max_source_length, truncation=True)
282
- model_inputs["labels"] = tokenizer(
283
- text_target=batch["target"],
284
- max_length=max_target_length,
285
- truncation=True,
286
- )["input_ids"]
287
- return model_inputs
288
-
289
-
290
- def build_supported_kwargs(cls, kwargs: dict[str, Any], aliases=None) -> dict[str, Any]:
291
- params = set(signature(cls.__init__).parameters)
292
- aliases = aliases or {}
293
- resolved = {}
294
- for key, value in kwargs.items():
295
- if value is None:
296
- continue
297
- target = key if key in params else aliases.get(key)
298
- if target in params:
299
- resolved[target] = value
300
- return resolved
301
-
302
-
303
- def build_training_args(args, has_eval: bool):
304
- kwargs = {
305
- "output_dir": args.output_dir,
306
- "overwrite_output_dir": False,
307
- "learning_rate": args.learning_rate,
308
- "per_device_train_batch_size": args.per_device_train_batch_size,
309
- "per_device_eval_batch_size": args.per_device_eval_batch_size,
310
- "gradient_accumulation_steps": args.gradient_accumulation_steps,
311
- "weight_decay": args.weight_decay,
312
- "num_train_epochs": args.num_train_epochs,
313
- "warmup_ratio": args.warmup_ratio,
314
- "logging_strategy": "steps",
315
- "logging_steps": args.logging_steps,
316
- "save_strategy": args.save_strategy_type,
317
- "save_steps": args.save_steps if args.save_strategy_type == "steps" else None,
318
- "save_total_limit": args.save_total_limit,
319
- "report_to": "none",
320
- "fp16": args.fp16,
321
- "bf16": args.bf16,
322
- "predict_with_generate": False,
323
- "generation_max_length": args.max_target_length,
324
- "dataloader_num_workers": args.dataloader_num_workers,
325
- "dataloader_pin_memory": not args.no_pin_memory,
326
- "save_only_model": args.save_only_model,
327
- "restore_callback_states_from_checkpoint": args.restore_callback_states_from_checkpoint,
328
- "torch_empty_cache_steps": args.torch_empty_cache_steps or None,
329
- "seed": args.seed,
330
- "data_seed": args.seed if supports_data_seed() else None,
331
- "use_cpu": True if args.device == "cpu" else None,
332
- "gradient_checkpointing": True if args.gradient_checkpointing else None,
333
- "load_best_model_at_end": has_eval,
334
- "metric_for_best_model": "eval_loss" if has_eval else None,
335
- "greater_is_better": False if has_eval else None,
336
- "eval_strategy": args.save_strategy_type if has_eval else None,
337
- "eval_steps": args.eval_steps if has_eval and args.save_strategy_type == "steps" else None,
338
- }
339
- return Seq2SeqTrainingArguments(
340
- **build_supported_kwargs(
341
- Seq2SeqTrainingArguments,
342
- kwargs,
343
- aliases={"eval_strategy": "evaluation_strategy", "use_cpu": "no_cuda"},
344
- )
345
- )
346
-
347
-
348
- def resolve_resume_checkpoint(args):
349
- if args.resume_checkpoint:
350
- if not Path(args.resume_checkpoint).is_dir():
351
- raise FileNotFoundError(f"Không tìm thấy resume_checkpoint: {args.resume_checkpoint}")
352
- return args.resume_checkpoint
353
- if args.resume_from_latest and Path(args.output_dir).is_dir():
354
- return get_last_checkpoint(args.output_dir)
355
- return None
356
-
357
-
358
- def validate_args(args, has_eval: bool) -> None:
359
- if has_eval and args.save_strategy_type == "steps":
360
- if args.eval_steps <= 0 or args.save_steps <= 0:
361
- raise ValueError("save_steps và eval_steps phải > 0")
362
- if args.save_steps % args.eval_steps != 0:
363
- raise ValueError("save_steps phải là bội số của eval_steps")
364
- if args.save_only_model and (args.resume_from_latest or args.resume_checkpoint):
365
- print("Cảnh báo: save_only_model sẽ không resume train đầy đủ được.")
366
-
367
-
368
- def build_parser() -> argparse.ArgumentParser:
369
- parser = argparse.ArgumentParser()
370
- add = parser.add_argument
371
-
372
- add("--train_file", default="40k_train.json")
373
- add("--validation_file", default=None)
374
- add("--output_dir", default="t5-viet-qg-finetuned")
375
- add("--model_name", default="VietAI/vit5-base")
376
- add("--task_prefix", default="sinh câu hỏi")
377
-
378
- add("--max_source_length", type=int, default=512)
379
- add("--max_target_length", type=int, default=64)
380
- add("--val_ratio", type=float, default=0.1)
381
-
382
- add("--per_device_train_batch_size", type=int, default=4)
383
- add("--per_device_eval_batch_size", type=int, default=4)
384
- add("--gradient_accumulation_steps", type=int, default=4)
385
- add("--learning_rate", type=float, default=1e-4)
386
- add("--weight_decay", type=float, default=0.01)
387
- add("--warmup_ratio", type=float, default=0.05)
388
- add("--num_train_epochs", type=int, default=3)
389
- add("--logging_steps", type=int, default=50)
390
- add("--seed", type=int, default=42)
391
- add("--early_stopping_patience", type=int, default=2)
392
-
393
- add("--save_strategy_type", default="steps", choices=["steps", "epoch"])
394
- add("--save_steps", type=int, default=500)
395
- add("--eval_steps", type=int, default=500)
396
- add("--save_total_limit", type=int, default=1)
397
-
398
- parser.set_defaults(resume_from_latest=True)
399
- add("--resume_from_latest", dest="resume_from_latest", action="store_true")
400
- add("--no_resume_from_latest", dest="resume_from_latest", action="store_false")
401
- add("--resume_checkpoint", default=None)
402
- add("--save_only_model", action="store_true")
403
- add("--restore_callback_states_from_checkpoint", action="store_true")
404
-
405
- add("--fp16", action="store_true")
406
- add("--bf16", action="store_true")
407
- add("--gradient_checkpointing", action="store_true")
408
- add("--dataloader_num_workers", type=int, default=0)
409
- add("--no_pin_memory", action="store_true")
410
- add("--torch_empty_cache_steps", type=int, default=0)
411
- add("--device", default="auto", choices=["auto", "cuda", "cpu"])
412
- add("--min_free_gpu_mb", type=int, default=4096)
413
- add("--skip_gpu_preflight", action="store_true")
414
-
415
- add("--use_first_answer_only", action="store_true")
416
- add("--require_answer_in_context", action="store_true")
417
- return parser
418
-
419
-
420
- def load_datasets(args):
421
- load_kwargs = {
422
- "use_all_answers": not args.use_first_answer_only,
423
- "task_prefix": args.task_prefix,
424
- "require_answer_in_context": args.require_answer_in_context,
425
- }
426
- train_examples, train_stats = load_squad_qg_examples(args.train_file, **load_kwargs)
427
- if not train_examples:
428
- raise ValueError("Không có dữ liệu train hợp lệ sau khi tiền xử lý.")
429
-
430
- train_dataset = Dataset.from_list(train_examples)
431
- val_dataset = None
432
- val_stats = None
433
-
434
- if args.validation_file:
435
- val_examples, val_stats = load_squad_qg_examples(args.validation_file, **load_kwargs)
436
- if not val_examples:
437
- raise ValueError("Không có dữ liệu validation hợp lệ sau khi tiền xử lý.")
438
- val_dataset = Dataset.from_list(val_examples)
439
- elif args.val_ratio > 0 and len(train_dataset) > 10:
440
- split = train_dataset.train_test_split(test_size=args.val_ratio, seed=args.seed)
441
- train_dataset, val_dataset = split["train"], split["test"]
442
-
443
- return train_dataset, val_dataset, train_stats, val_stats
444
-
445
-
446
- def tokenize_dataset(dataset, tokenizer, args):
447
- return dataset.map(
448
- lambda batch: preprocess_function(batch, tokenizer, args.max_source_length, args.max_target_length),
449
- batched=True,
450
- remove_columns=dataset.column_names,
451
- )
452
-
453
-
454
- def build_trainer(model, tokenizer, training_args, train_dataset, eval_dataset, args):
455
- kwargs = {
456
- "model": model,
457
- "args": training_args,
458
- "data_collator": DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model),
459
- "train_dataset": train_dataset,
460
- "eval_dataset": eval_dataset,
461
- "callbacks": [EarlyStoppingCallback(early_stopping_patience=args.early_stopping_patience)]
462
- if eval_dataset is not None
463
- else None,
464
- "processing_class": tokenizer,
465
- }
466
- return Seq2SeqTrainer(
467
- **build_supported_kwargs(Seq2SeqTrainer, kwargs, aliases={"processing_class": "tokenizer"})
468
- )
469
-
470
-
471
- def main() -> None:
472
- args = build_parser().parse_args()
473
- output_dir = Path(args.output_dir)
474
- output_dir.mkdir(parents=True, exist_ok=True)
475
-
476
- set_seed(args.seed)
477
- ensure_device_ready(args)
478
-
479
- raw_train_dataset, raw_val_dataset, train_stats, val_stats = load_datasets(args)
480
- has_eval = raw_val_dataset is not None
481
- validate_args(args, has_eval)
482
-
483
- tokenizer = AutoTokenizer.from_pretrained(args.model_name)
484
- model = AutoModelForSeq2SeqLM.from_pretrained(args.model_name)
485
-
486
- if args.gradient_checkpointing:
487
- model.gradient_checkpointing_enable()
488
- if hasattr(model.config, "use_cache"):
489
- model.config.use_cache = False
490
-
491
- tokenized_train = tokenize_dataset(raw_train_dataset, tokenizer, args)
492
- tokenized_val = tokenize_dataset(raw_val_dataset, tokenizer, args) if has_eval else None
493
- trainer = build_trainer(
494
- model,
495
- tokenizer,
496
- build_training_args(args, has_eval),
497
- tokenized_train,
498
- tokenized_val,
499
- args,
500
- )
501
-
502
- resume_checkpoint = resolve_resume_checkpoint(args)
503
- try:
504
- train_result = trainer.train(resume_from_checkpoint=resume_checkpoint)
505
- except torch.OutOfMemoryError:
506
- raise_cuda_oom(args)
507
- except RuntimeError as exc:
508
- if "CUDA out of memory" in str(exc):
509
- raise_cuda_oom(args)
510
- raise
511
-
512
- trainer.save_state()
513
-
514
- export_dir = output_dir / ("best-model" if has_eval else "final-model")
515
- export_dir.mkdir(parents=True, exist_ok=True)
516
- for path in (export_dir, output_dir):
517
- trainer.save_model(str(path))
518
- tokenizer.save_pretrained(str(path))
519
-
520
- train_metrics = train_result.metrics
521
- trainer.log_metrics("train", train_metrics)
522
- trainer.save_metrics("train", train_metrics)
523
-
524
- eval_metrics = None
525
- if has_eval:
526
- eval_metrics = trainer.evaluate(
527
- max_length=args.max_target_length,
528
- num_beams=4,
529
- metric_key_prefix="eval",
530
- )
531
- trainer.log_metrics("eval", eval_metrics)
532
- trainer.save_metrics("eval", eval_metrics)
533
-
534
- save_json(
535
- {
536
- "base_model": args.model_name,
537
- "task_prefix": args.task_prefix,
538
- "output_dir": str(output_dir),
539
- "export_dir": str(export_dir),
540
- "train_size": len(raw_train_dataset),
541
- "val_size": len(raw_val_dataset) if raw_val_dataset is not None else 0,
542
- "train_stats": train_stats,
543
- "val_stats": val_stats,
544
- "best_model_checkpoint": trainer.state.best_model_checkpoint,
545
- "best_metric": trainer.state.best_metric,
546
- "resumed_from_checkpoint": resume_checkpoint,
547
- "args": vars(args),
548
- "train_metrics": train_metrics,
549
- "eval_metrics": eval_metrics,
550
- },
551
- output_dir / "training_summary.json",
552
- )
553
-
554
-
555
- if __name__ == "__main__":
556
- main()