DANGDOCAO commited on
Commit
85af144
·
verified ·
1 Parent(s): 2870377
Files changed (1) hide show
  1. HVU_QA/generate_question.py +0 -383
HVU_QA/generate_question.py DELETED
@@ -1,383 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import argparse
4
- import json
5
- import os
6
- import re
7
- import sys
8
- import threading
9
- from pathlib import Path
10
- from typing import Any
11
-
12
- os.environ.setdefault("TOKENIZERS_PARALLELISM", "false")
13
- os.environ.setdefault("PYTORCH_CUDA_ALLOC_CONF", "expandable_segments:True")
14
-
15
-
16
- def raise_missing_dependency_error(exc: ModuleNotFoundError) -> None:
17
- root = Path(__file__).resolve().parent
18
- requirements = root / "requirements.txt"
19
- message = [
20
- f"Thiếu thư viện Python: {exc.name}",
21
- f"Interpreter hiện tại: {sys.executable}",
22
- ]
23
- if requirements.exists():
24
- message.extend(
25
- [
26
- "Cài đặt dependencies bằng lệnh:",
27
- f"{sys.executable} -m pip install -r {requirements}",
28
- ]
29
- )
30
- raise SystemExit("\n".join(message)) from exc
31
-
32
-
33
- try:
34
- import torch
35
- from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
36
- except ModuleNotFoundError as exc:
37
- raise_missing_dependency_error(exc)
38
-
39
-
40
- APP_TITLE = "Mô hình sinh câu hỏi thường gặp"
41
- TASK_PREFIX = "sinh câu hỏi"
42
- QUESTION_LIMIT = 100
43
- GENERATION_PASSES = (
44
- (0.9, 0.95, None, 1, 4),
45
- (1.0, 0.97, 16, 1, 5),
46
- (1.08, 0.99, 8, 2, 6),
47
- )
48
-
49
-
50
- def normalize_text(text: Any) -> str:
51
- return " ".join(str(text or "").split())
52
-
53
-
54
- def unique_text(items: list[str]) -> list[str]:
55
- seen: set[str] = set()
56
- output: list[str] = []
57
- for item in items:
58
- value = normalize_text(item)
59
- key = value.lower()
60
- if key and key not in seen:
61
- seen.add(key)
62
- output.append(value)
63
- return output
64
-
65
-
66
- def parse_question_count(value: Any, default: int = 5) -> int:
67
- try:
68
- parsed = int(value)
69
- except (TypeError, ValueError):
70
- parsed = default
71
- return max(1, min(parsed, QUESTION_LIMIT))
72
-
73
-
74
- def format_questions(items: list[str]) -> str:
75
- if not items:
76
- return "Không sinh được câu hỏi phù hợp."
77
- return "\n".join(f"{index}. {item}" for index, item in enumerate(items, 1))
78
-
79
-
80
- def resolve_model_dir(model_dir: str | Path, prefer_nested_model: bool = True) -> Path:
81
- model_root = Path(model_dir).expanduser().resolve()
82
- nested_candidates = [model_root / "best-model", model_root / "final-model"]
83
- candidates = [*nested_candidates, model_root] if prefer_nested_model else [model_root, *nested_candidates]
84
- for candidate in candidates:
85
- if candidate.is_dir() and (candidate / "config.json").exists():
86
- return candidate
87
- raise FileNotFoundError(f"Không tìm thấy thư mục mô hình hợp lệ: {model_root}")
88
-
89
-
90
- def parse_dtype(value: str) -> torch.dtype:
91
- normalized = value.strip().lower()
92
- mapping = {
93
- "float16": torch.float16,
94
- "fp16": torch.float16,
95
- "float32": torch.float32,
96
- "fp32": torch.float32,
97
- "bfloat16": torch.bfloat16,
98
- "bf16": torch.bfloat16,
99
- }
100
- if normalized not in mapping:
101
- raise ValueError(f"Không hỗ trợ gpu_dtype={value}")
102
- return mapping[normalized]
103
-
104
-
105
- class QuestionGenerator:
106
- def __init__(
107
- self,
108
- model_dir: str | Path = "t5-viet-qg-finetuned",
109
- task_prefix: str = TASK_PREFIX,
110
- max_source_length: int = 512,
111
- max_new_tokens: int = 64,
112
- device: str = "auto",
113
- cpu_threads: int | None = None,
114
- gpu_dtype: str = "auto",
115
- prefer_nested_model: bool = True,
116
- ) -> None:
117
- self.model_root = Path(model_dir).expanduser().resolve()
118
- self.model_dir = resolve_model_dir(model_dir, prefer_nested_model=prefer_nested_model)
119
- self.task_prefix = task_prefix
120
- self.max_source_length = max_source_length
121
- self.max_new_tokens = max_new_tokens
122
- self.requested_device = device
123
- self.cpu_threads = cpu_threads
124
- self.gpu_dtype = gpu_dtype
125
- self.prefer_nested_model = prefer_nested_model
126
- self.device: torch.device | None = None
127
- self.dtype: torch.dtype | None = None
128
- self.tokenizer = None
129
- self.model = None
130
- self._load_lock = threading.Lock()
131
-
132
- def _resolve_device(self) -> torch.device:
133
- requested = self.requested_device.lower()
134
- if requested == "cpu":
135
- return torch.device("cpu")
136
- if requested == "cuda":
137
- if not torch.cuda.is_available():
138
- raise RuntimeError("Bạn đã chọn device=cuda nhưng máy hiện tại không có CUDA.")
139
- return torch.device("cuda")
140
- return torch.device("cuda" if torch.cuda.is_available() else "cpu")
141
-
142
- def _resolve_dtype(self) -> torch.dtype:
143
- if self.device is None or self.device.type != "cuda":
144
- return torch.float32
145
- if self.gpu_dtype == "auto":
146
- if hasattr(torch.cuda, "is_bf16_supported") and torch.cuda.is_bf16_supported():
147
- return torch.bfloat16
148
- return torch.float16
149
- return parse_dtype(self.gpu_dtype)
150
-
151
- def _configure_runtime(self) -> None:
152
- if self.device is None:
153
- return
154
- if self.device.type == "cpu":
155
- if self.cpu_threads:
156
- torch.set_num_threads(max(1, int(self.cpu_threads)))
157
- if hasattr(torch, "set_num_interop_threads"):
158
- torch.set_num_interop_threads(max(1, min(int(self.cpu_threads), 4)))
159
- return
160
-
161
- if hasattr(torch.backends, "cuda") and hasattr(torch.backends.cuda, "matmul"):
162
- torch.backends.cuda.matmul.allow_tf32 = True
163
- if hasattr(torch.backends, "cudnn"):
164
- torch.backends.cudnn.allow_tf32 = True
165
- torch.backends.cudnn.benchmark = True
166
-
167
- def load(self) -> None:
168
- if self.model is not None and self.tokenizer is not None:
169
- return
170
-
171
- with self._load_lock:
172
- if self.model is not None and self.tokenizer is not None:
173
- return
174
-
175
- self.device = self._resolve_device()
176
- self.dtype = self._resolve_dtype()
177
- self._configure_runtime()
178
-
179
- model_kwargs: dict[str, Any] = {}
180
- if self.device.type == "cuda":
181
- model_kwargs["torch_dtype"] = self.dtype
182
- model_kwargs["low_cpu_mem_usage"] = True
183
-
184
- self.tokenizer = AutoTokenizer.from_pretrained(str(self.model_dir), use_fast=True)
185
- self.model = AutoModelForSeq2SeqLM.from_pretrained(str(self.model_dir), **model_kwargs)
186
- self.model.to(self.device)
187
- self.model.eval()
188
-
189
- def metadata(self) -> dict[str, Any]:
190
- active_device = self.device.type if self.device is not None else None
191
- predicted_device = "cuda" if torch.cuda.is_available() and self.requested_device != "cpu" else "cpu"
192
- return {
193
- "title": APP_TITLE,
194
- "model_root": str(self.model_root),
195
- "model_dir": str(self.model_dir),
196
- "requested_device": self.requested_device,
197
- "active_device": active_device,
198
- "predicted_device": predicted_device,
199
- "loaded": self.model is not None,
200
- "gpu_available": torch.cuda.is_available(),
201
- "gpu_dtype": None if self.dtype is None else str(self.dtype).replace("torch.", ""),
202
- "cpu_threads": torch.get_num_threads(),
203
- }
204
-
205
- def _candidate_answers(self, text: str, limit: int) -> list[str]:
206
- text = normalize_text(text)
207
- if not text:
208
- return []
209
-
210
- candidates: list[str] = []
211
- split_pattern = r"(?<=[.!?])\s+|\n+"
212
- for sentence in [normalize_text(part) for part in re.split(split_pattern, text) if normalize_text(part)]:
213
- if 3 <= len(sentence.split()) <= 30:
214
- candidates.append(sentence)
215
- for clause in (normalize_text(part) for part in re.split(r"\s*[,;:]\s*", sentence)):
216
- if 3 <= len(clause.split()) <= 20:
217
- candidates.append(clause)
218
-
219
- if not candidates:
220
- words = text.split()
221
- candidates = [" ".join(words[: min(12, len(words))])] if words else [text]
222
-
223
- ranked = sorted(unique_text(candidates), key=lambda item: (abs(len(item.split()) - 10), len(item)))
224
- return ranked[:limit]
225
-
226
- def _build_prompt(self, context: str, answer: str) -> str:
227
- return f"{self.task_prefix}:\nngữ cảnh: {context}\nđáp án: {answer}"
228
-
229
- @torch.inference_mode()
230
- def _sample(self, context: str, answer: str, count: int, temperature: float, top_p: float) -> list[str]:
231
- if self.tokenizer is None or self.model is None or self.device is None:
232
- raise RuntimeError("Model chưa được load.")
233
-
234
- inputs = self.tokenizer(
235
- self._build_prompt(context, answer),
236
- return_tensors="pt",
237
- truncation=True,
238
- max_length=self.max_source_length,
239
- ).to(self.device)
240
- outputs = self.model.generate(
241
- **inputs,
242
- max_new_tokens=self.max_new_tokens,
243
- do_sample=True,
244
- temperature=temperature,
245
- top_p=top_p,
246
- num_return_sequences=count,
247
- no_repeat_ngram_size=3,
248
- repetition_penalty=1.1,
249
- )
250
- questions: list[str] = []
251
- for token_ids in outputs:
252
- question = normalize_text(self.tokenizer.decode(token_ids, skip_special_tokens=True))
253
- if question:
254
- questions.append(question if question.endswith("?") else f"{question}?")
255
- return [question for question in unique_text(questions) if len(question.split()) >= 3]
256
-
257
- @torch.inference_mode()
258
- def _beam_search(self, context: str, answer: str, count: int) -> list[str]:
259
- if self.tokenizer is None or self.model is None or self.device is None:
260
- raise RuntimeError("Model chưa được load.")
261
-
262
- inputs = self.tokenizer(
263
- self._build_prompt(context, answer),
264
- return_tensors="pt",
265
- truncation=True,
266
- max_length=self.max_source_length,
267
- ).to(self.device)
268
- outputs = self.model.generate(
269
- **inputs,
270
- max_new_tokens=self.max_new_tokens,
271
- num_beams=max(4, count),
272
- num_return_sequences=min(count, 4),
273
- early_stopping=True,
274
- no_repeat_ngram_size=3,
275
- repetition_penalty=1.1,
276
- )
277
- questions: list[str] = []
278
- for token_ids in outputs:
279
- question = normalize_text(self.tokenizer.decode(token_ids, skip_special_tokens=True))
280
- if question:
281
- questions.append(question if question.endswith("?") else f"{question}?")
282
- return [question for question in unique_text(questions) if len(question.split()) >= 3]
283
-
284
- def generate(self, text: str, count: int = 5) -> list[str]:
285
- self.load()
286
- context = normalize_text(text)
287
- if not context:
288
- raise ValueError("Vui lòng nhập đoạn văn.")
289
-
290
- count = parse_question_count(count)
291
- pool = unique_text(
292
- self._candidate_answers(context, max(32, count * 5)) + [context[:180], context[:280], context]
293
- )
294
- output: list[str] = []
295
- seen: set[str] = set()
296
-
297
- for temperature, top_p, limit, rounds, floor in GENERATION_PASSES:
298
- answers = pool[:limit] if limit else pool
299
- for _ in range(rounds):
300
- for answer in answers:
301
- remaining = count - len(output)
302
- if remaining <= 0:
303
- return output[:count]
304
- sample_count = min(8, max(floor, remaining * 2))
305
- for question in self._sample(context, answer, sample_count, temperature, top_p):
306
- key = question.lower()
307
- if key not in seen:
308
- seen.add(key)
309
- output.append(question)
310
- if len(output) >= count:
311
- return output[:count]
312
-
313
- for answer in pool[: min(8, len(pool))]:
314
- remaining = count - len(output)
315
- if remaining <= 0:
316
- break
317
- for question in self._beam_search(context, answer, remaining):
318
- key = question.lower()
319
- if key not in seen:
320
- seen.add(key)
321
- output.append(question)
322
- if len(output) >= count:
323
- break
324
-
325
- return output[:count]
326
-
327
-
328
- def read_input_text(args: argparse.Namespace) -> str:
329
- if args.text:
330
- return args.text
331
- if args.input_file:
332
- return Path(args.input_file).read_text(encoding="utf-8")
333
- if sys.stdin.isatty():
334
- return input("Nhập đoạn văn cần sinh câu hỏi:\n").strip()
335
- return sys.stdin.read().strip()
336
-
337
-
338
- def build_parser() -> argparse.ArgumentParser:
339
- parser = argparse.ArgumentParser(description="Sinh câu hỏi từ đoạn văn bằng model T5 fine-tuned.")
340
- parser.add_argument("--model_dir", default="t5-viet-qg-finetuned")
341
- parser.add_argument("--task_prefix", default=TASK_PREFIX)
342
- parser.add_argument("--max_source_length", type=int, default=512)
343
- parser.add_argument("--max_new_tokens", type=int, default=64)
344
- parser.add_argument("--num_questions", type=int, default=100)
345
- parser.add_argument("--device", choices=["auto", "cpu", "cuda"], default="auto")
346
- parser.add_argument("--cpu_threads", type=int, default=None)
347
- parser.add_argument("--gpu_dtype", default="auto")
348
- parser.add_argument("--text", default=None)
349
- parser.add_argument("--input_file", default=None)
350
- parser.add_argument("--output_format", choices=["text", "json"], default="text")
351
- return parser
352
-
353
-
354
- def main() -> None:
355
- args = build_parser().parse_args()
356
- if hasattr(sys.stdout, "reconfigure"):
357
- sys.stdout.reconfigure(encoding="utf-8")
358
- generator = QuestionGenerator(
359
- model_dir=args.model_dir,
360
- task_prefix=args.task_prefix,
361
- max_source_length=args.max_source_length,
362
- max_new_tokens=args.max_new_tokens,
363
- device=args.device,
364
- cpu_threads=args.cpu_threads,
365
- gpu_dtype=args.gpu_dtype,
366
- prefer_nested_model=True,
367
- )
368
- text = read_input_text(args)
369
- questions = generator.generate(text, parse_question_count(args.num_questions))
370
- payload = {
371
- "text": normalize_text(text),
372
- "questions": questions,
373
- "formatted": format_questions(questions),
374
- "meta": generator.metadata(),
375
- }
376
- if args.output_format == "json":
377
- print(json.dumps(payload, ensure_ascii=False, indent=2))
378
- return
379
- print(payload["formatted"])
380
-
381
-
382
- if __name__ == "__main__":
383
- main()