DANGDOCAO commited on
Commit
20a473f
·
verified ·
1 Parent(s): f3578a0

Delete HVU_QA/HVU_QA_tool.py

Browse files
Files changed (1) hide show
  1. HVU_QA/HVU_QA_tool.py +0 -2003
HVU_QA/HVU_QA_tool.py DELETED
@@ -1,2003 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import argparse
4
- import fnmatch
5
- import importlib.util
6
- import os
7
- import shutil
8
- import subprocess
9
- import sys
10
- import textwrap
11
- from dataclasses import dataclass
12
- from pathlib import Path
13
-
14
- SCRIPT_ROOT = Path(__file__).resolve().parent
15
- IS_WINDOWS = os.name == "nt"
16
- TOOL_VENV_DIR = SCRIPT_ROOT / ".hvu_qa_tool_venv"
17
- TOOL_VENV_PYTHON = TOOL_VENV_DIR / ("Scripts/python.exe" if IS_WINDOWS else "bin/python")
18
-
19
- HF_DATASET_REPO_ID = "DANGDOCAO/GeneratingQuestions"
20
- HF_DATASET_REVISION = "main"
21
- HF_PROJECT_SUBDIR = "HVU_QA"
22
- HF_MODEL_SUBDIR = f"{HF_PROJECT_SUBDIR}/t5-viet-qg-finetuned"
23
- HF_BEST_MODEL_SUBDIR = f"{HF_MODEL_SUBDIR}/best-model"
24
-
25
- HF_HUB_REQUIREMENT = "huggingface_hub>=0.23.0,<1.0.0"
26
- RUNTIME_REQUIREMENTS = [
27
- "Flask>=3.0.0,<4.0.0",
28
- HF_HUB_REQUIREMENT,
29
- "numpy>=1.26.0,<3.0.0",
30
- "safetensors>=0.4.3,<1.0.0",
31
- "sentencepiece>=0.2.0,<1.0.0",
32
- "torch>=2.2.0,<3.0.0",
33
- "transformers>=4.41.0,<5.0.0",
34
- ]
35
- LOCAL_PROJECT_MARKERS = [
36
- "main.py",
37
- "backend/app.py",
38
- "frontend/index.html",
39
- "generate_question.py",
40
- ]
41
- DEPENDENCY_IMPORTS = {
42
- "Flask": "flask",
43
- "numpy": "numpy",
44
- "torch": "torch",
45
- "transformers": "transformers",
46
- "sentencepiece": "sentencepiece",
47
- "safetensors": "safetensors",
48
- "huggingface_hub": "huggingface_hub",
49
- }
50
- MODEL_IGNORE_PATTERNS = [
51
- f"{HF_MODEL_SUBDIR}/checkpoint-*/**",
52
- f"{HF_MODEL_SUBDIR}/all_results.json",
53
- f"{HF_MODEL_SUBDIR}/eval_results.json",
54
- f"{HF_MODEL_SUBDIR}/train_results.json",
55
- f"{HF_MODEL_SUBDIR}/trainer_state.json",
56
- f"{HF_MODEL_SUBDIR}/training_summary.json",
57
- f"{HF_MODEL_SUBDIR}/training_args.bin",
58
- f"{HF_BEST_MODEL_SUBDIR}/training_args.bin",
59
- ]
60
-
61
-
62
- @dataclass(frozen=True)
63
- class RuntimeContext:
64
- root: Path
65
- main_file: Path
66
- requirements_file: Path
67
- local_model_dir: Path
68
- local_best_model_dir: Path
69
- standalone_mode: bool
70
-
71
-
72
- def print_step(message: str) -> None:
73
- print(f"[HVU_QA_tool] {message}")
74
-
75
-
76
- def module_exists(module_name: str) -> bool:
77
- return importlib.util.find_spec(module_name) is not None
78
-
79
-
80
- def run_command(
81
- command: list[str],
82
- *,
83
- cwd: Path | None = None,
84
- env: dict[str, str] | None = None,
85
- ) -> None:
86
- subprocess.check_call(command, cwd=str(cwd) if cwd else None, env=env)
87
-
88
-
89
- def is_running_in_virtualenv() -> bool:
90
- return sys.prefix != getattr(sys, "base_prefix", sys.prefix) or bool(os.getenv("VIRTUAL_ENV"))
91
-
92
-
93
- def format_bytes(size: int) -> str:
94
- units = ["B", "KB", "MB", "GB", "TB"]
95
- value = float(size)
96
- for unit in units:
97
- if value < 1024 or unit == units[-1]:
98
- if unit == "B":
99
- return f"{int(value)} {unit}"
100
- return f"{value:.1f} {unit}"
101
- value /= 1024
102
- return f"{size} B"
103
-
104
-
105
- def render_progress_bar(current: int, total: int, width: int = 28) -> str:
106
- if total <= 0:
107
- return "[----------------------------] 0.0%"
108
-
109
- ratio = max(0.0, min(1.0, current / total))
110
- filled = int(ratio * width)
111
- bar = "#" * filled + "-" * (width - filled)
112
- percent = ratio * 100
113
- return f"[{bar}] {percent:5.1f}%"
114
-
115
-
116
- def matches_any_pattern(path: str, patterns: list[str]) -> bool:
117
- normalized = path.replace("\\", "/")
118
- return any(fnmatch.fnmatch(normalized, pattern) for pattern in patterns)
119
-
120
-
121
- def build_allow_patterns(best_model_only: bool) -> list[str]:
122
- if best_model_only:
123
- return [f"{HF_BEST_MODEL_SUBDIR}/**"]
124
- return [f"{HF_MODEL_SUBDIR}/**"]
125
-
126
-
127
- def has_local_project(root: Path) -> bool:
128
- return all((root / marker).exists() for marker in LOCAL_PROJECT_MARKERS)
129
-
130
-
131
- def build_runtime_requirements_text() -> str:
132
- lines = [
133
- "# Runtime dependencies for standalone HVU_QA launcher.",
134
- "# Nếu dùng GPU NVIDIA, hãy cài đúng bản torch theo CUDA của máy nếu cần.",
135
- *RUNTIME_REQUIREMENTS,
136
- "",
137
- ]
138
- return "\n".join(lines)
139
-
140
-
141
- def build_runtime_file_map() -> dict[str, str]:
142
- requirements_text = build_runtime_requirements_text()
143
- return {
144
- "requirements.txt": requirements_text,
145
- "main.py": textwrap.dedent(
146
- """
147
- from __future__ import annotations
148
-
149
- import os
150
- import threading
151
- import webbrowser
152
-
153
- from backend import create_app
154
-
155
- app = create_app()
156
-
157
-
158
- def _as_bool(value: str | None, default: bool) -> bool:
159
- if value is None:
160
- return default
161
- return value.strip().lower() not in {"0", "false", "no", "off"}
162
-
163
-
164
- def _open_browser_later(host: str, port: int) -> None:
165
- if not _as_bool(os.getenv("HVU_OPEN_BROWSER"), True):
166
- return
167
- target_host = "127.0.0.1" if host in {"0.0.0.0", "::"} else host
168
- url = f"http://{target_host}:{port}"
169
- threading.Timer(1.2, lambda: webbrowser.open(url)).start()
170
-
171
-
172
- if __name__ == "__main__":
173
- host = os.getenv("HVU_HOST", "127.0.0.1")
174
- port = int(os.getenv("HVU_PORT", "5000"))
175
- debug = _as_bool(os.getenv("HVU_DEBUG"), False)
176
- _open_browser_later(host, port)
177
- app.run(host=host, port=port, debug=debug, use_reloader=False)
178
- """
179
- ).strip()
180
- + "\n",
181
- "backend/__init__.py": 'from .app import create_app\n\n__all__ = ["create_app"]\n',
182
- "backend/app.py": textwrap.dedent(
183
- """
184
- from __future__ import annotations
185
-
186
- import os
187
- import time
188
- from pathlib import Path
189
-
190
- from flask import Flask, jsonify, request, send_from_directory
191
-
192
- from generate_question import (
193
- APP_TITLE,
194
- QUESTION_LIMIT,
195
- QuestionGenerator,
196
- format_questions,
197
- normalize_text,
198
- parse_question_count,
199
- resolve_model_dir,
200
- )
201
-
202
- IGNORED_MODEL_DIR_NAMES = {
203
- ".git",
204
- ".vscode",
205
- "__pycache__",
206
- "backend",
207
- "frontend",
208
- "venv",
209
- ".hvu_qa_tool_venv",
210
- "HVU_QA_runtime",
211
- }
212
-
213
-
214
- def project_root() -> Path:
215
- return Path(__file__).resolve().parents[1]
216
-
217
-
218
- def _read_optional_int(value: str | None) -> int | None:
219
- if value in (None, ""):
220
- return None
221
- return int(value)
222
-
223
-
224
- def build_generator(
225
- model_dir: str | Path | None = None,
226
- prefer_nested_model: bool = True,
227
- ) -> QuestionGenerator:
228
- root = project_root()
229
- selected_model_dir = (
230
- Path(model_dir).expanduser()
231
- if model_dir is not None
232
- else Path(os.getenv("HVU_MODEL_DIR", str(root / "t5-viet-qg-finetuned"))).expanduser()
233
- )
234
- if not selected_model_dir.is_absolute():
235
- selected_model_dir = root / selected_model_dir
236
-
237
- return QuestionGenerator(
238
- model_dir=str(selected_model_dir),
239
- task_prefix=os.getenv("HVU_TASK_PREFIX", "sinh câu hỏi"),
240
- max_source_length=int(os.getenv("HVU_MAX_SOURCE_LENGTH", "512")),
241
- max_new_tokens=int(os.getenv("HVU_MAX_NEW_TOKENS", "64")),
242
- device=os.getenv("HVU_DEVICE", "auto"),
243
- cpu_threads=_read_optional_int(os.getenv("HVU_CPU_THREADS")),
244
- gpu_dtype=os.getenv("HVU_GPU_DTYPE", "auto"),
245
- prefer_nested_model=prefer_nested_model,
246
- )
247
-
248
-
249
- def _model_label(relative_path: str | Path) -> str:
250
- path = Path(relative_path)
251
- return path.name or "model"
252
-
253
-
254
- def _iter_model_candidates(root: Path):
255
- for child in sorted(root.iterdir(), key=lambda path: path.name.lower()):
256
- if not child.is_dir() or child.name.startswith(".") or child.name in IGNORED_MODEL_DIR_NAMES:
257
- continue
258
-
259
- if (child / "config.json").exists():
260
- yield {"path": child, "prefer_nested_model": False}
261
-
262
- for nested_name in ("best-model", "final-model"):
263
- nested = child / nested_name
264
- if nested.is_dir() and (nested / "config.json").exists():
265
- yield {"path": nested, "prefer_nested_model": False}
266
-
267
-
268
- def _discover_available_models(
269
- root: Path,
270
- active_generator: QuestionGenerator | None = None,
271
- ) -> list[dict[str, str]]:
272
- models: list[dict[str, str]] = []
273
- seen_roots: set[str] = set()
274
- root = root.resolve()
275
-
276
- for candidate_info in _iter_model_candidates(root):
277
- candidate = candidate_info["path"]
278
- model_key = str(candidate.resolve())
279
- if model_key in seen_roots:
280
- continue
281
-
282
- try:
283
- relative_candidate = candidate.resolve().relative_to(root)
284
- except ValueError:
285
- continue
286
-
287
- seen_roots.add(model_key)
288
- models.append(
289
- {
290
- "id": relative_candidate.as_posix(),
291
- "label": _model_label(relative_candidate),
292
- "model_root": str(candidate.resolve()),
293
- "model_dir": str(resolve_model_dir(candidate, prefer_nested_model=False).resolve()),
294
- "prefer_nested_model": bool(candidate_info["prefer_nested_model"]),
295
- }
296
- )
297
-
298
- if active_generator is not None:
299
- current_root = active_generator.model_root.resolve()
300
- current_dir = active_generator.model_dir.resolve()
301
- exists = any(
302
- Path(item["model_root"]).resolve() == current_root
303
- or Path(item["model_dir"]).resolve() == current_dir
304
- for item in models
305
- )
306
- if not exists:
307
- models.append(
308
- {
309
- "id": current_root.as_posix(),
310
- "label": current_root.name,
311
- "model_root": str(current_root),
312
- "model_dir": str(current_dir),
313
- "prefer_nested_model": False,
314
- }
315
- )
316
-
317
- return models
318
-
319
-
320
- def _selected_model_id(
321
- app: Flask,
322
- models: list[dict[str, str]],
323
- active_generator: QuestionGenerator | None = None,
324
- ) -> str:
325
- explicit_selection = str(app.config.get("SELECTED_MODEL_ID") or "").strip()
326
- if explicit_selection and any(item["id"] == explicit_selection for item in models):
327
- return explicit_selection
328
-
329
- active_generator = active_generator or _generator(app)
330
- current_root = active_generator.model_root.resolve()
331
- current_dir = active_generator.model_dir.resolve()
332
-
333
- for item in models:
334
- if Path(item["model_dir"]).resolve() == current_dir:
335
- return item["id"]
336
-
337
- for item in models:
338
- if Path(item["model_root"]).resolve() == current_root:
339
- return item["id"]
340
-
341
- return models[0]["id"] if models else ""
342
-
343
-
344
- def _switch_generator(app: Flask, model_id: str) -> QuestionGenerator:
345
- available_models = _discover_available_models(app.config["PROJECT_ROOT"], _generator(app))
346
- selected_model = next((item for item in available_models if item["id"] == model_id), None)
347
- if selected_model is None:
348
- raise ValueError("Model được chọn không hợp lệ hoặc chưa tồn tại trong thư mục runtime.")
349
-
350
- current_model_id = _selected_model_id(app, available_models)
351
- if current_model_id != model_id:
352
- app.config["GENERATOR"] = build_generator(
353
- selected_model["model_root"],
354
- prefer_nested_model=bool(selected_model.get("prefer_nested_model")),
355
- )
356
-
357
- app.config["SELECTED_MODEL_ID"] = model_id
358
- return _generator(app)
359
-
360
-
361
- def _info_payload(app: Flask, active_generator: QuestionGenerator | None = None) -> dict[str, object]:
362
- active_generator = active_generator or _generator(app)
363
- available_models = _discover_available_models(app.config["PROJECT_ROOT"], active_generator)
364
- selected_model_id = _selected_model_id(app, available_models, active_generator)
365
- model_name = next(
366
- (item["label"] for item in available_models if item["id"] == selected_model_id),
367
- Path(active_generator.model_dir).name,
368
- )
369
- return {
370
- "ok": True,
371
- "title": APP_TITLE,
372
- "model_name": model_name,
373
- "selected_model_id": selected_model_id,
374
- "available_models": [{"id": item["id"], "label": item["label"]} for item in available_models],
375
- "meta": active_generator.metadata(),
376
- }
377
-
378
-
379
- def create_app(generator: QuestionGenerator | None = None) -> Flask:
380
- root = project_root()
381
- frontend_root = root / "frontend"
382
-
383
- app = Flask(__name__, static_folder=None)
384
- app.json.ensure_ascii = False
385
- app.config["GENERATOR"] = generator or build_generator()
386
- app.config["PROJECT_ROOT"] = root
387
- app.config["FRONTEND_ROOT"] = frontend_root
388
- app.config["SELECTED_MODEL_ID"] = ""
389
-
390
- @app.get("/")
391
- def index():
392
- return send_from_directory(app.config["FRONTEND_ROOT"], "index.html")
393
-
394
- @app.get("/frontend/<path:filename>")
395
- def frontend_file(filename: str):
396
- return send_from_directory(app.config["FRONTEND_ROOT"], filename)
397
-
398
- @app.get("/api/info")
399
- def info():
400
- return jsonify(_info_payload(app))
401
-
402
- @app.post("/api/model")
403
- def set_model():
404
- payload = request.get_json(silent=True) or {}
405
- model_id = str(payload.get("model_id") or "").strip()
406
- if not model_id:
407
- return jsonify({"ok": False, "error": "Vui lòng chọn model trước khi chuyển."}), 400
408
-
409
- try:
410
- active_generator = _switch_generator(app, model_id)
411
- except ValueError as exc:
412
- return jsonify({"ok": False, "error": str(exc)}), 404
413
-
414
- return jsonify(_info_payload(app, active_generator))
415
-
416
- @app.post("/api/generate")
417
- def generate():
418
- payload = request.get_json(silent=True) or {}
419
- requested_model_id = str(payload.get("model_id") or "").strip()
420
-
421
- if requested_model_id:
422
- try:
423
- active_generator = _switch_generator(app, requested_model_id)
424
- except ValueError as exc:
425
- return jsonify({"ok": False, "error": str(exc)}), 400
426
- else:
427
- active_generator = _generator(app)
428
-
429
- text = normalize_text(payload.get("text"))
430
- if not text:
431
- return jsonify({"ok": False, "error": "Vui lòng nhập đoạn văn bản trước khi sinh câu hỏi."}), 400
432
-
433
- raw_count = payload.get("num_questions")
434
- if raw_count in (None, ""):
435
- count = 5
436
- else:
437
- try:
438
- count = int(raw_count)
439
- except (TypeError, ValueError):
440
- return jsonify({"ok": False, "error": "Số câu hỏi phải là số nguyên trong khoảng 1 đến 100."}), 400
441
-
442
- if count < 1 or count > QUESTION_LIMIT:
443
- return jsonify({"ok": False, "error": f"Số câu hỏi phải nằm trong khoảng 1 đến {QUESTION_LIMIT}."}), 400
444
-
445
- started = time.perf_counter()
446
- try:
447
- questions = active_generator.generate(text, parse_question_count(count))
448
- except Exception as exc: # noqa: BLE001
449
- return jsonify({"ok": False, "error": str(exc)}), 500
450
-
451
- elapsed_ms = round((time.perf_counter() - started) * 1000, 2)
452
- info_payload = _info_payload(app, active_generator)
453
- return jsonify(
454
- {
455
- "ok": True,
456
- "text": text,
457
- "num_questions": count,
458
- "questions": questions,
459
- "formatted": format_questions(questions),
460
- "elapsed_ms": elapsed_ms,
461
- "model_name": info_payload["model_name"],
462
- "selected_model_id": info_payload["selected_model_id"],
463
- "meta": active_generator.metadata(),
464
- }
465
- )
466
-
467
- return app
468
-
469
-
470
- def _generator(app: Flask) -> QuestionGenerator:
471
- generator: QuestionGenerator = app.config["GENERATOR"]
472
- return generator
473
- """
474
- ).strip()
475
- + "\n",
476
- "generate_question.py": textwrap.dedent(
477
- """
478
- from __future__ import annotations
479
-
480
- import argparse
481
- import json
482
- import os
483
- import re
484
- import sys
485
- import threading
486
- from pathlib import Path
487
- from typing import Any
488
-
489
- os.environ.setdefault("TOKENIZERS_PARALLELISM", "false")
490
- os.environ.setdefault("PYTORCH_CUDA_ALLOC_CONF", "expandable_segments:True")
491
-
492
-
493
- def raise_missing_dependency_error(exc: ModuleNotFoundError) -> None:
494
- root = Path(__file__).resolve().parent
495
- requirements = root / "requirements.txt"
496
- message = [
497
- f"Thiếu thư viện Python: {exc.name}",
498
- f"Interpreter hiện tại: {sys.executable}",
499
- ]
500
- if requirements.exists():
501
- message.extend(
502
- [
503
- "Cài đặt dependencies bằng lệnh:",
504
- f"{sys.executable} -m pip install -r {requirements}",
505
- ]
506
- )
507
- raise SystemExit("\\n".join(message)) from exc
508
-
509
-
510
- try:
511
- import torch
512
- from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
513
- except ModuleNotFoundError as exc:
514
- raise_missing_dependency_error(exc)
515
-
516
-
517
- APP_TITLE = "HVU_QA Tool - Sinh câu hỏi"
518
- TASK_PREFIX = "sinh câu hỏi"
519
- QUESTION_LIMIT = 100
520
- GENERATION_PASSES = (
521
- (0.9, 0.95, 1, 4),
522
- (1.0, 0.97, 1, 5),
523
- (1.08, 0.99, 2, 6),
524
- )
525
-
526
-
527
- def normalize_text(text: Any) -> str:
528
- return " ".join(str(text or "").split())
529
-
530
-
531
- def unique_text(items: list[str]) -> list[str]:
532
- seen: set[str] = set()
533
- output: list[str] = []
534
- for item in items:
535
- value = normalize_text(item)
536
- key = value.lower()
537
- if key and key not in seen:
538
- seen.add(key)
539
- output.append(value)
540
- return output
541
-
542
-
543
- def parse_question_count(value: Any, default: int = 5) -> int:
544
- try:
545
- parsed = int(value)
546
- except (TypeError, ValueError):
547
- parsed = default
548
- return max(1, min(parsed, QUESTION_LIMIT))
549
-
550
-
551
- def format_questions(items: list[str]) -> str:
552
- if not items:
553
- return "Không sinh được câu hỏi phù hợp."
554
- return "\\n".join(f"{index}. {item}" for index, item in enumerate(items, 1))
555
-
556
-
557
- def resolve_model_dir(model_dir: str | Path, prefer_nested_model: bool = True) -> Path:
558
- model_root = Path(model_dir).expanduser().resolve()
559
- nested_candidates = [model_root / "best-model", model_root / "final-model"]
560
- candidates = [*nested_candidates, model_root] if prefer_nested_model else [model_root, *nested_candidates]
561
- for candidate in candidates:
562
- if candidate.is_dir() and (candidate / "config.json").exists():
563
- return candidate
564
- raise FileNotFoundError(f"Không tìm thấy thư mục mô hình hợp lệ: {model_root}")
565
-
566
-
567
- def parse_dtype(value: str) -> torch.dtype:
568
- normalized = value.strip().lower()
569
- mapping = {
570
- "float16": torch.float16,
571
- "fp16": torch.float16,
572
- "float32": torch.float32,
573
- "fp32": torch.float32,
574
- "bfloat16": torch.bfloat16,
575
- "bf16": torch.bfloat16,
576
- }
577
- if normalized not in mapping:
578
- raise ValueError(f"Không hỗ trợ gpu_dtype={value}")
579
- return mapping[normalized]
580
-
581
-
582
- class QuestionGenerator:
583
- def __init__(
584
- self,
585
- model_dir: str | Path = "t5-viet-qg-finetuned",
586
- task_prefix: str = TASK_PREFIX,
587
- max_source_length: int = 512,
588
- max_new_tokens: int = 64,
589
- device: str = "auto",
590
- cpu_threads: int | None = None,
591
- gpu_dtype: str = "auto",
592
- prefer_nested_model: bool = True,
593
- ) -> None:
594
- self.model_root = Path(model_dir).expanduser().resolve()
595
- self.model_dir = resolve_model_dir(model_dir, prefer_nested_model=prefer_nested_model)
596
- self.task_prefix = task_prefix
597
- self.max_source_length = max_source_length
598
- self.max_new_tokens = max_new_tokens
599
- self.requested_device = device
600
- self.cpu_threads = cpu_threads
601
- self.gpu_dtype = gpu_dtype
602
- self.device: torch.device | None = None
603
- self.dtype: torch.dtype | None = None
604
- self.tokenizer = None
605
- self.model = None
606
- self._load_lock = threading.Lock()
607
-
608
- def _resolve_device(self) -> torch.device:
609
- requested = self.requested_device.lower()
610
- if requested == "cpu":
611
- return torch.device("cpu")
612
- if requested == "cuda":
613
- if not torch.cuda.is_available():
614
- raise RuntimeError("Bạn đã chọn device=cuda nhưng máy hiện tại không có CUDA.")
615
- return torch.device("cuda")
616
- return torch.device("cuda" if torch.cuda.is_available() else "cpu")
617
-
618
- def _resolve_dtype(self) -> torch.dtype:
619
- if self.device is None or self.device.type != "cuda":
620
- return torch.float32
621
- if self.gpu_dtype == "auto":
622
- if hasattr(torch.cuda, "is_bf16_supported") and torch.cuda.is_bf16_supported():
623
- return torch.bfloat16
624
- return torch.float16
625
- return parse_dtype(self.gpu_dtype)
626
-
627
- def _configure_runtime(self) -> None:
628
- if self.device is None:
629
- return
630
- if self.device.type == "cpu":
631
- if self.cpu_threads:
632
- torch.set_num_threads(max(1, int(self.cpu_threads)))
633
- if hasattr(torch, "set_num_interop_threads"):
634
- torch.set_num_interop_threads(max(1, min(int(self.cpu_threads), 4)))
635
- return
636
-
637
- if hasattr(torch.backends, "cuda") and hasattr(torch.backends.cuda, "matmul"):
638
- torch.backends.cuda.matmul.allow_tf32 = True
639
- if hasattr(torch.backends, "cudnn"):
640
- torch.backends.cudnn.allow_tf32 = True
641
- torch.backends.cudnn.benchmark = True
642
-
643
- def load(self) -> None:
644
- if self.model is not None and self.tokenizer is not None:
645
- return
646
-
647
- with self._load_lock:
648
- if self.model is not None and self.tokenizer is not None:
649
- return
650
-
651
- self.device = self._resolve_device()
652
- self.dtype = self._resolve_dtype()
653
- self._configure_runtime()
654
-
655
- model_kwargs: dict[str, Any] = {}
656
- if self.device.type == "cuda":
657
- model_kwargs["torch_dtype"] = self.dtype
658
- model_kwargs["low_cpu_mem_usage"] = True
659
-
660
- self.tokenizer = AutoTokenizer.from_pretrained(str(self.model_dir), use_fast=True)
661
- self.model = AutoModelForSeq2SeqLM.from_pretrained(str(self.model_dir), **model_kwargs)
662
- self.model.to(self.device)
663
- self.model.eval()
664
-
665
- def metadata(self) -> dict[str, Any]:
666
- active_device = self.device.type if self.device is not None else None
667
- predicted_device = "cuda" if torch.cuda.is_available() and self.requested_device != "cpu" else "cpu"
668
- return {
669
- "title": APP_TITLE,
670
- "model_root": str(self.model_root),
671
- "model_dir": str(self.model_dir),
672
- "requested_device": self.requested_device,
673
- "active_device": active_device,
674
- "predicted_device": predicted_device,
675
- "loaded": self.model is not None,
676
- "gpu_available": torch.cuda.is_available(),
677
- "gpu_dtype": None if self.dtype is None else str(self.dtype).replace("torch.", ""),
678
- "cpu_threads": torch.get_num_threads(),
679
- }
680
-
681
- def _candidate_answers(self, text: str, limit: int) -> list[str]:
682
- text = normalize_text(text)
683
- if not text:
684
- return []
685
-
686
- candidates: list[str] = []
687
- split_pattern = r"(?<=[.!?])\\s+|\\n+"
688
- for sentence in [normalize_text(part) for part in re.split(split_pattern, text) if normalize_text(part)]:
689
- if 3 <= len(sentence.split()) <= 30:
690
- candidates.append(sentence)
691
- for clause in (normalize_text(part) for part in re.split(r"\\s*[,;:]\\s*", sentence)):
692
- if 3 <= len(clause.split()) <= 20:
693
- candidates.append(clause)
694
-
695
- if not candidates:
696
- words = text.split()
697
- candidates = [" ".join(words[: min(12, len(words))])] if words else [text]
698
-
699
- ranked = sorted(unique_text(candidates), key=lambda item: (abs(len(item.split()) - 10), len(item)))
700
- return ranked[:limit]
701
-
702
- def _build_prompt(self, context: str, answer: str) -> str:
703
- return f"{self.task_prefix}:\\nngữ cảnh: {context}\\nđáp án: {answer}"
704
-
705
- @torch.inference_mode()
706
- def _sample(self, context: str, answer: str, count: int, temperature: float, top_p: float) -> list[str]:
707
- if self.tokenizer is None or self.model is None or self.device is None:
708
- raise RuntimeError("Model chưa được load.")
709
-
710
- inputs = self.tokenizer(
711
- self._build_prompt(context, answer),
712
- return_tensors="pt",
713
- truncation=True,
714
- max_length=self.max_source_length,
715
- ).to(self.device)
716
- outputs = self.model.generate(
717
- **inputs,
718
- max_new_tokens=self.max_new_tokens,
719
- do_sample=True,
720
- temperature=temperature,
721
- top_p=top_p,
722
- num_return_sequences=max(1, min(count, 6)),
723
- no_repeat_ngram_size=3,
724
- repetition_penalty=1.1,
725
- )
726
- questions: list[str] = []
727
- for token_ids in outputs:
728
- question = normalize_text(self.tokenizer.decode(token_ids, skip_special_tokens=True))
729
- if question:
730
- questions.append(question if question.endswith("?") else f"{question}?")
731
- return [question for question in unique_text(questions) if len(question.split()) >= 3]
732
-
733
- def generate(self, text: str, num_questions: int = 5) -> list[str]:
734
- clean_text = normalize_text(text)
735
- requested_count = parse_question_count(num_questions)
736
- if not clean_text:
737
- return []
738
-
739
- self.load()
740
- answers = self._candidate_answers(clean_text, limit=max(requested_count * 3, 8))
741
- questions: list[str] = []
742
-
743
- for temperature, top_p, candidate_step, sample_count in GENERATION_PASSES:
744
- for index, answer in enumerate(answers):
745
- generated = self._sample(
746
- clean_text,
747
- answer,
748
- count=min(sample_count + requested_count, requested_count + 2),
749
- temperature=temperature,
750
- top_p=top_p,
751
- )
752
- questions.extend(generated)
753
- questions = unique_text(questions)
754
- if len(questions) >= requested_count:
755
- return questions[:requested_count]
756
- if candidate_step and (index + 1) % candidate_step == 0 and len(questions) >= requested_count:
757
- return questions[:requested_count]
758
-
759
- return questions[:requested_count]
760
-
761
-
762
- def _read_text_from_args(args: argparse.Namespace) -> str:
763
- if args.text:
764
- return normalize_text(args.text)
765
- if args.input_file:
766
- return normalize_text(Path(args.input_file).read_text(encoding="utf-8"))
767
- raise SystemExit("Vui lòng truyền --text hoặc --input_file.")
768
-
769
-
770
- def build_parser() -> argparse.ArgumentParser:
771
- parser = argparse.ArgumentParser(description="Sinh câu hỏi từ một đoạn văn bản bằng model T5 tiếng Việt.")
772
- parser.add_argument("--text", help="Đoạn văn bản đầu vào.")
773
- parser.add_argument("--input_file", help="Đọc đoạn văn bản từ file UTF-8.")
774
- parser.add_argument("--num_questions", type=int, default=5, help="Số câu hỏi cần sinh.")
775
- parser.add_argument("--model_dir", default=os.getenv("HVU_MODEL_DIR", "t5-viet-qg-finetuned"))
776
- parser.add_argument("--task_prefix", default=os.getenv("HVU_TASK_PREFIX", TASK_PREFIX))
777
- parser.add_argument("--device", default=os.getenv("HVU_DEVICE", "auto"), choices=["auto", "cpu", "cuda"])
778
- parser.add_argument("--cpu_threads", type=int, default=None)
779
- parser.add_argument("--gpu_dtype", default=os.getenv("HVU_GPU_DTYPE", "auto"))
780
- parser.add_argument("--max_source_length", type=int, default=int(os.getenv("HVU_MAX_SOURCE_LENGTH", "512")))
781
- parser.add_argument("--max_new_tokens", type=int, default=int(os.getenv("HVU_MAX_NEW_TOKENS", "64")))
782
- parser.add_argument("--output_format", choices=["text", "json"], default="text")
783
- return parser
784
-
785
-
786
- def main() -> int:
787
- if hasattr(sys.stdout, "reconfigure"):
788
- sys.stdout.reconfigure(encoding="utf-8")
789
- if hasattr(sys.stderr, "reconfigure"):
790
- sys.stderr.reconfigure(encoding="utf-8")
791
-
792
- args = build_parser().parse_args()
793
- text = _read_text_from_args(args)
794
- generator = QuestionGenerator(
795
- model_dir=args.model_dir,
796
- task_prefix=args.task_prefix,
797
- max_source_length=args.max_source_length,
798
- max_new_tokens=args.max_new_tokens,
799
- device=args.device,
800
- cpu_threads=args.cpu_threads,
801
- gpu_dtype=args.gpu_dtype,
802
- )
803
- questions = generator.generate(text, args.num_questions)
804
- payload = {
805
- "ok": True,
806
- "text": text,
807
- "num_questions": parse_question_count(args.num_questions),
808
- "questions": questions,
809
- "formatted": format_questions(questions),
810
- "meta": generator.metadata(),
811
- }
812
-
813
- if args.output_format == "json":
814
- print(json.dumps(payload, ensure_ascii=False, indent=2))
815
- else:
816
- print(payload["formatted"])
817
- return 0
818
-
819
-
820
- if __name__ == "__main__":
821
- raise SystemExit(main())
822
- """
823
- ).strip()
824
- + "\n",
825
- "frontend/index.html": textwrap.dedent(
826
- """
827
- <!doctype html>
828
- <html lang="vi">
829
- <head>
830
- <meta charset="utf-8">
831
- <meta name="viewport" content="width=device-width, initial-scale=1">
832
- <title>HVU_QA Tool</title>
833
- <link rel="stylesheet" href="/frontend/style.css">
834
- </head>
835
- <body>
836
- <div class="page-shell">
837
- <header class="hero">
838
- <span class="hero-badge">HVU_QA Tool</span>
839
- <h1>Sinh câu hỏi từ văn bản</h1>
840
- <p>Launcher nhẹ dành cho người dùng cuối. Chỉ cần một file tool để dựng runtime, tải model và chạy ứng dụng.</p>
841
- </header>
842
-
843
- <div class="layout">
844
- <aside class="sidebar">
845
- <section class="panel">
846
- <div class="panel-heading">
847
- <h2>Trạng thái model</h2>
848
- <span id="readyBadge" class="badge badge-soft">Đang tải</span>
849
- </div>
850
-
851
- <label class="field-label" for="modelSelect">Model đang dùng</label>
852
- <select id="modelSelect" class="select-field"></select>
853
-
854
- <dl class="status-list">
855
- <div>
856
- <dt>Tên hiển thị</dt>
857
- <dd id="modelName">-</dd>
858
- </div>
859
- <div>
860
- <dt>Thiết bị</dt>
861
- <dd id="deviceStatus">-</dd>
862
- </div>
863
- <div>
864
- <dt>Trạng thái nạp</dt>
865
- <dd id="loadedStatus">-</dd>
866
- </div>
867
- </dl>
868
- </section>
869
-
870
- <section class="panel">
871
- <div class="panel-heading">
872
- <h2>Ví dụ mẫu</h2>
873
- </div>
874
- <p class="panel-hint">Bấm vào một văn bản luật mẫu để chèn nhanh nội dung thử nghiệm.</p>
875
- <div id="sampleList" class="sample-list"></div>
876
- </section>
877
- </aside>
878
-
879
- <main class="main-panel">
880
- <section class="composer panel">
881
- <label class="field-label" for="sourceText">Đoạn văn bản đầu vào</label>
882
- <textarea id="sourceText" class="text-input" placeholder="Nhập đoạn văn bản ..."></textarea>
883
-
884
- <div class="composer-footer">
885
- <div class="count-field">
886
- <span class="field-label">Số câu hỏi</span>
887
- <div class="count-controls">
888
- <button id="decreaseCount" type="button" class="count-button">-</button>
889
- <input id="questionCount" class="count-input" type="number" min="1" max="100" value="5">
890
- <button id="increaseCount" type="button" class="count-button">+</button>
891
- </div>
892
- </div>
893
-
894
- <button id="generateButton" type="button" class="primary-button">
895
- <span id="generateButtonText">Sinh câu hỏi</span>
896
- </button>
897
- </div>
898
-
899
- <p id="formMessage" class="form-message"></p>
900
- </section>
901
-
902
- <section id="resultPanel" class="result-panel panel">
903
- <div id="resultPlaceholder" class="result-placeholder">
904
- Nhập văn bản và nhấn <strong>Sinh câu hỏi</strong> để xem kết quả.
905
- </div>
906
-
907
- <div id="resultContent" class="result-content hidden">
908
- <div class="result-header">
909
- <div>
910
- <h2>Kết quả sinh câu hỏi</h2>
911
- <p id="resultStats" class="result-stats"></p>
912
- </div>
913
- <button id="copyButton" type="button" class="secondary-button">Sao chép</button>
914
- </div>
915
-
916
- <ol id="resultList" class="result-list"></ol>
917
- <pre id="formattedOutput" class="formatted-output"></pre>
918
- </div>
919
- </section>
920
- </main>
921
- </div>
922
- </div>
923
-
924
- <script src="/frontend/app.js"></script>
925
- </body>
926
- </html>
927
- """
928
- ).strip()
929
- + "\n",
930
- "frontend/app.js": textwrap.dedent(
931
- """
932
- const sampleTexts = [
933
- {
934
- title: 'Luật Giáo dục đại học',
935
- text: 'Cơ sở giáo dục đại học có nhiệm vụ tổ chức đào tạo, nghiên cứu khoa học, chuyển giao công nghệ và phục vụ cộng đồng theo quy định của pháp luật.'
936
- },
937
- {
938
- title: 'Bộ luật Lao động',
939
- text: 'Người lao động là người làm việc cho người sử dụng lao động theo thỏa thuận, được trả lương và chịu sự quản lý, điều hành, giám sát của người sử dụng lao động.'
940
- },
941
- {
942
- title: 'Luật An toàn thông tin mạng',
943
- text: 'An toàn thông tin mạng là sự bảo vệ thông tin, hệ thống thông tin trên mạng khỏi bị truy nhập, sử dụng, tiết lộ, gián đoạn, sửa đổi hoặc phá hoại trái phép.'
944
- }
945
- ];
946
-
947
- const state = {
948
- info: null,
949
- loading: false,
950
- count: 5,
951
- lastFormatted: ''
952
- };
953
-
954
- const elements = {
955
- modelSelect: document.getElementById('modelSelect'),
956
- readyBadge: document.getElementById('readyBadge'),
957
- modelName: document.getElementById('modelName'),
958
- deviceStatus: document.getElementById('deviceStatus'),
959
- loadedStatus: document.getElementById('loadedStatus'),
960
- sampleList: document.getElementById('sampleList'),
961
- sourceText: document.getElementById('sourceText'),
962
- decreaseCount: document.getElementById('decreaseCount'),
963
- increaseCount: document.getElementById('increaseCount'),
964
- questionCount: document.getElementById('questionCount'),
965
- generateButton: document.getElementById('generateButton'),
966
- generateButtonText: document.getElementById('generateButtonText'),
967
- formMessage: document.getElementById('formMessage'),
968
- resultPanel: document.getElementById('resultPanel'),
969
- resultPlaceholder: document.getElementById('resultPlaceholder'),
970
- resultContent: document.getElementById('resultContent'),
971
- resultStats: document.getElementById('resultStats'),
972
- resultList: document.getElementById('resultList'),
973
- formattedOutput: document.getElementById('formattedOutput'),
974
- copyButton: document.getElementById('copyButton')
975
- };
976
-
977
- function normalizeCount(value) {
978
- const parsed = Number.parseInt(value, 10);
979
- if (Number.isNaN(parsed)) {
980
- return 1;
981
- }
982
- return Math.max(1, Math.min(100, parsed));
983
- }
984
-
985
- function setCount(value) {
986
- state.count = normalizeCount(value);
987
- elements.questionCount.value = String(state.count);
988
- }
989
-
990
- function setMessage(text, tone = 'muted') {
991
- elements.formMessage.textContent = text || '';
992
- elements.formMessage.dataset.tone = tone;
993
- }
994
-
995
- function setLoading(loading) {
996
- state.loading = loading;
997
- elements.generateButton.disabled = loading;
998
- elements.modelSelect.disabled = loading;
999
- elements.generateButtonText.textContent = loading ? 'Đang xử lý...' : 'Sinh câu hỏi';
1000
- elements.readyBadge.textContent = loading ? 'Đang chạy' : 'Sẵn sàng';
1001
- elements.readyBadge.classList.toggle('badge-busy', loading);
1002
- }
1003
-
1004
- async function fetchJson(url, options = {}) {
1005
- const response = await fetch(url, options);
1006
- const payload = await response.json().catch(() => ({}));
1007
- if (!response.ok || payload.ok === false) {
1008
- throw new Error(payload.error || `Yêu cầu thất bại (${response.status})`);
1009
- }
1010
- return payload;
1011
- }
1012
-
1013
- function renderSamples() {
1014
- elements.sampleList.innerHTML = '';
1015
- sampleTexts.forEach((sample) => {
1016
- const button = document.createElement('button');
1017
- button.type = 'button';
1018
- button.className = 'sample-card';
1019
- button.innerHTML = `<strong>${sample.title}</strong><span>${sample.text}</span>`;
1020
- button.addEventListener('click', () => {
1021
- elements.sourceText.value = sample.text;
1022
- setMessage(`Đã chèn mẫu: ${sample.title}`, 'muted');
1023
- elements.sourceText.focus();
1024
- });
1025
- elements.sampleList.appendChild(button);
1026
- });
1027
- }
1028
-
1029
- function renderInfo(info) {
1030
- state.info = info;
1031
- const models = Array.isArray(info.available_models) ? info.available_models : [];
1032
- const selectedId = info.selected_model_id || models[0]?.id || '';
1033
-
1034
- elements.modelSelect.innerHTML = '';
1035
- if (!models.length) {
1036
- const option = document.createElement('option');
1037
- option.value = '';
1038
- option.textContent = 'Không có model khả dụng';
1039
- elements.modelSelect.appendChild(option);
1040
- } else {
1041
- models.forEach((model) => {
1042
- const option = document.createElement('option');
1043
- option.value = model.id;
1044
- option.textContent = model.label;
1045
- elements.modelSelect.appendChild(option);
1046
- });
1047
- elements.modelSelect.value = selectedId;
1048
- }
1049
-
1050
- const meta = info.meta || {};
1051
- elements.modelName.textContent = info.model_name || '-';
1052
- elements.deviceStatus.textContent = meta.active_device
1053
- ? meta.active_device.toUpperCase()
1054
- : (meta.predicted_device ? `Dự đoán: ${String(meta.predicted_device).toUpperCase()}` : '-');
1055
- elements.loadedStatus.textContent = meta.loaded ? 'Đã nạp' : 'Chưa nạp';
1056
- elements.readyBadge.textContent = 'Sẵn sàng';
1057
- elements.readyBadge.classList.remove('badge-busy');
1058
- }
1059
-
1060
- function renderResult(result) {
1061
- const questions = Array.isArray(result.questions) ? result.questions : [];
1062
- elements.resultPlaceholder.classList.add('hidden');
1063
- elements.resultContent.classList.remove('hidden');
1064
- elements.resultList.innerHTML = '';
1065
-
1066
- questions.forEach((question) => {
1067
- const item = document.createElement('li');
1068
- item.textContent = question;
1069
- elements.resultList.appendChild(item);
1070
- });
1071
-
1072
- state.lastFormatted = result.formatted || '';
1073
- elements.formattedOutput.textContent = state.lastFormatted;
1074
- elements.resultStats.textContent = `${questions.length} câu hỏi • ${result.model_name || 'Không rõ model'} • ${result.elapsed_ms || 0} ms`;
1075
- }
1076
-
1077
- async function loadInfo() {
1078
- const info = await fetchJson('/api/info');
1079
- renderInfo(info);
1080
- setMessage('Sẵn sàng để sinh câu hỏi.', 'muted');
1081
- }
1082
-
1083
- async function changeModel() {
1084
- const modelId = elements.modelSelect.value;
1085
- if (!modelId) {
1086
- return;
1087
- }
1088
- setLoading(true);
1089
- setMessage('Đang chuyển model...', 'muted');
1090
- try {
1091
- const info = await fetchJson('/api/model', {
1092
- method: 'POST',
1093
- headers: { 'Content-Type': 'application/json' },
1094
- body: JSON.stringify({ model_id: modelId })
1095
- });
1096
- renderInfo(info);
1097
- setMessage(`Đã chuyển sang model: ${info.model_name}`, 'muted');
1098
- } catch (error) {
1099
- setMessage(error.message, 'error');
1100
- } finally {
1101
- setLoading(false);
1102
- }
1103
- }
1104
-
1105
- async function generateQuestions() {
1106
- const text = elements.sourceText.value.trim();
1107
- if (!text) {
1108
- setMessage('Vui lòng nhập đoạn văn bản trước khi sinh câu hỏi.', 'error');
1109
- elements.sourceText.focus();
1110
- return;
1111
- }
1112
-
1113
- setLoading(true);
1114
- setMessage('Đang sinh câu hỏi từ nội dung đã nhập...', 'muted');
1115
-
1116
- try {
1117
- const payload = await fetchJson('/api/generate', {
1118
- method: 'POST',
1119
- headers: { 'Content-Type': 'application/json' },
1120
- body: JSON.stringify({
1121
- text,
1122
- num_questions: state.count,
1123
- model_id: elements.modelSelect.value || undefined
1124
- })
1125
- });
1126
- renderResult(payload);
1127
- setMessage(`Đã sinh xong ${payload.questions.length} câu hỏi.`, 'muted');
1128
- } catch (error) {
1129
- setMessage(error.message, 'error');
1130
- } finally {
1131
- setLoading(false);
1132
- }
1133
- }
1134
-
1135
- async function copyOutput() {
1136
- if (!state.lastFormatted) {
1137
- setMessage('Chưa có nội dung để sao chép.', 'error');
1138
- return;
1139
- }
1140
-
1141
- try {
1142
- await navigator.clipboard.writeText(state.lastFormatted);
1143
- setMessage('Đã sao chép kết quả vào clipboard.', 'muted');
1144
- } catch (error) {
1145
- setMessage('Không thể sao chép tự động. Hãy sao chép thủ công.', 'error');
1146
- }
1147
- }
1148
-
1149
- function bindEvents() {
1150
- elements.decreaseCount.addEventListener('click', () => setCount(state.count - 1));
1151
- elements.increaseCount.addEventListener('click', () => setCount(state.count + 1));
1152
- elements.questionCount.addEventListener('change', (event) => setCount(event.target.value));
1153
- elements.modelSelect.addEventListener('change', changeModel);
1154
- elements.generateButton.addEventListener('click', generateQuestions);
1155
- elements.copyButton.addEventListener('click', copyOutput);
1156
- }
1157
-
1158
- async function init() {
1159
- renderSamples();
1160
- setCount(5);
1161
- bindEvents();
1162
- try {
1163
- await loadInfo();
1164
- } catch (error) {
1165
- setMessage(error.message || 'Không thể kết nối backend.', 'error');
1166
- elements.readyBadge.textContent = 'Lỗi';
1167
- }
1168
- }
1169
-
1170
- document.addEventListener('DOMContentLoaded', init);
1171
- """
1172
- ).strip()
1173
- + "\n",
1174
- "frontend/style.css": textwrap.dedent(
1175
- """
1176
- :root {
1177
- --bg-start: #f8f5ff;
1178
- --bg-end: #eef4ff;
1179
- --panel: rgba(255, 255, 255, 0.82);
1180
- --border: rgba(103, 102, 181, 0.18);
1181
- --text: #23244d;
1182
- --muted: #6c6d9a;
1183
- --primary-start: #6b73ff;
1184
- --primary-end: #d96ba2;
1185
- --shadow: 0 22px 60px rgba(52, 56, 121, 0.14);
1186
- }
1187
-
1188
- * {
1189
- box-sizing: border-box;
1190
- }
1191
-
1192
- body {
1193
- margin: 0;
1194
- min-height: 100vh;
1195
- font-family: "Be Vietnam Pro", "Segoe UI", sans-serif;
1196
- color: var(--text);
1197
- background:
1198
- radial-gradient(circle at top left, rgba(123, 135, 255, 0.14), transparent 28%),
1199
- radial-gradient(circle at bottom right, rgba(217, 107, 162, 0.18), transparent 25%),
1200
- linear-gradient(135deg, var(--bg-start), var(--bg-end));
1201
- }
1202
-
1203
- button,
1204
- input,
1205
- textarea,
1206
- select {
1207
- font: inherit;
1208
- }
1209
-
1210
- .page-shell {
1211
- width: min(1200px, calc(100% - 32px));
1212
- margin: 24px auto;
1213
- }
1214
-
1215
- .hero {
1216
- padding: 32px;
1217
- border: 1px solid var(--border);
1218
- border-radius: 28px;
1219
- background: var(--panel);
1220
- box-shadow: var(--shadow);
1221
- backdrop-filter: blur(18px);
1222
- }
1223
-
1224
- .hero-badge {
1225
- display: inline-flex;
1226
- padding: 8px 14px;
1227
- border-radius: 999px;
1228
- background: rgba(107, 115, 255, 0.12);
1229
- color: #5058d9;
1230
- font-size: 13px;
1231
- font-weight: 700;
1232
- letter-spacing: 0.04em;
1233
- text-transform: uppercase;
1234
- }
1235
-
1236
- .hero h1 {
1237
- margin: 18px 0 10px;
1238
- font-size: clamp(34px, 5vw, 56px);
1239
- line-height: 1.04;
1240
- }
1241
-
1242
- .hero p {
1243
- margin: 0;
1244
- max-width: 760px;
1245
- color: var(--muted);
1246
- font-size: 18px;
1247
- line-height: 1.65;
1248
- }
1249
-
1250
- .layout {
1251
- display: grid;
1252
- grid-template-columns: 320px minmax(0, 1fr);
1253
- gap: 20px;
1254
- margin-top: 20px;
1255
- }
1256
-
1257
- .panel {
1258
- border: 1px solid var(--border);
1259
- border-radius: 24px;
1260
- background: var(--panel);
1261
- box-shadow: var(--shadow);
1262
- backdrop-filter: blur(18px);
1263
- }
1264
-
1265
- .sidebar,
1266
- .main-panel {
1267
- display: grid;
1268
- gap: 20px;
1269
- align-content: start;
1270
- }
1271
-
1272
- .panel-heading {
1273
- display: flex;
1274
- align-items: center;
1275
- justify-content: space-between;
1276
- gap: 12px;
1277
- margin-bottom: 16px;
1278
- }
1279
-
1280
- .panel h2 {
1281
- margin: 0;
1282
- font-size: 18px;
1283
- }
1284
-
1285
- .sidebar .panel,
1286
- .composer,
1287
- .result-panel {
1288
- padding: 22px;
1289
- }
1290
-
1291
- .badge {
1292
- display: inline-flex;
1293
- align-items: center;
1294
- justify-content: center;
1295
- min-width: 92px;
1296
- padding: 8px 12px;
1297
- border-radius: 999px;
1298
- font-size: 13px;
1299
- font-weight: 700;
1300
- }
1301
-
1302
- .badge-soft {
1303
- background: rgba(39, 179, 112, 0.14);
1304
- color: #218b59;
1305
- }
1306
-
1307
- .badge-busy {
1308
- background: rgba(238, 160, 59, 0.16);
1309
- color: #b86a00;
1310
- }
1311
-
1312
- .field-label {
1313
- display: inline-block;
1314
- margin-bottom: 10px;
1315
- color: var(--muted);
1316
- font-size: 13px;
1317
- font-weight: 700;
1318
- letter-spacing: 0.02em;
1319
- }
1320
-
1321
- .select-field,
1322
- .text-input,
1323
- .count-input {
1324
- width: 100%;
1325
- border: 1px solid rgba(103, 102, 181, 0.14);
1326
- border-radius: 18px;
1327
- background: rgba(255, 255, 255, 0.92);
1328
- color: var(--text);
1329
- }
1330
-
1331
- .select-field {
1332
- min-height: 52px;
1333
- padding: 0 16px;
1334
- }
1335
-
1336
- .status-list {
1337
- display: grid;
1338
- gap: 14px;
1339
- margin: 18px 0 0;
1340
- }
1341
-
1342
- .status-list div {
1343
- padding: 14px 16px;
1344
- border-radius: 18px;
1345
- background: rgba(104, 109, 208, 0.07);
1346
- }
1347
-
1348
- .status-list dt {
1349
- margin: 0 0 6px;
1350
- color: var(--muted);
1351
- font-size: 12px;
1352
- font-weight: 700;
1353
- text-transform: uppercase;
1354
- letter-spacing: 0.04em;
1355
- }
1356
-
1357
- .status-list dd {
1358
- margin: 0;
1359
- font-size: 15px;
1360
- font-weight: 600;
1361
- word-break: break-word;
1362
- }
1363
-
1364
- .panel-hint {
1365
- margin: 0 0 14px;
1366
- color: var(--muted);
1367
- line-height: 1.6;
1368
- }
1369
-
1370
- .sample-list {
1371
- display: grid;
1372
- gap: 12px;
1373
- }
1374
-
1375
- .sample-card {
1376
- display: grid;
1377
- gap: 8px;
1378
- width: 100%;
1379
- padding: 16px;
1380
- border: 1px solid rgba(103, 102, 181, 0.14);
1381
- border-radius: 18px;
1382
- background: rgba(255, 255, 255, 0.92);
1383
- text-align: left;
1384
- color: var(--text);
1385
- cursor: pointer;
1386
- transition: transform 0.18s ease, border-color 0.18s ease, box-shadow 0.18s ease;
1387
- }
1388
-
1389
- .sample-card:hover {
1390
- transform: translateY(-2px);
1391
- border-color: rgba(86, 98, 218, 0.32);
1392
- box-shadow: 0 16px 30px rgba(61, 70, 154, 0.12);
1393
- }
1394
-
1395
- .sample-card span {
1396
- color: var(--muted);
1397
- line-height: 1.55;
1398
- }
1399
-
1400
- .text-input {
1401
- min-height: 250px;
1402
- padding: 18px 20px;
1403
- resize: vertical;
1404
- line-height: 1.7;
1405
- }
1406
-
1407
- .composer-footer {
1408
- display: flex;
1409
- align-items: end;
1410
- justify-content: space-between;
1411
- gap: 18px;
1412
- margin-top: 18px;
1413
- }
1414
-
1415
- .count-field {
1416
- min-width: 230px;
1417
- }
1418
-
1419
- .count-controls {
1420
- display: grid;
1421
- grid-template-columns: 48px 92px 48px;
1422
- gap: 10px;
1423
- align-items: center;
1424
- }
1425
-
1426
- .count-button,
1427
- .secondary-button {
1428
- min-height: 48px;
1429
- border: 1px solid rgba(103, 102, 181, 0.16);
1430
- border-radius: 16px;
1431
- background: rgba(255, 255, 255, 0.92);
1432
- color: var(--text);
1433
- cursor: pointer;
1434
- }
1435
-
1436
- .count-button {
1437
- font-size: 22px;
1438
- font-weight: 700;
1439
- }
1440
-
1441
- .count-input {
1442
- min-height: 48px;
1443
- padding: 0 12px;
1444
- text-align: center;
1445
- font-weight: 700;
1446
- }
1447
-
1448
- .primary-button {
1449
- min-width: 220px;
1450
- min-height: 56px;
1451
- padding: 0 24px;
1452
- border: none;
1453
- border-radius: 18px;
1454
- background: linear-gradient(135deg, var(--primary-start), var(--primary-end));
1455
- color: white;
1456
- font-size: 16px;
1457
- font-weight: 800;
1458
- cursor: pointer;
1459
- box-shadow: 0 18px 34px rgba(95, 105, 220, 0.24);
1460
- }
1461
-
1462
- .primary-button:disabled,
1463
- .secondary-button:disabled {
1464
- cursor: not-allowed;
1465
- opacity: 0.7;
1466
- }
1467
-
1468
- .form-message {
1469
- min-height: 22px;
1470
- margin: 14px 0 0;
1471
- color: var(--muted);
1472
- }
1473
-
1474
- .form-message[data-tone="error"] {
1475
- color: #c33b5f;
1476
- }
1477
-
1478
- .result-panel {
1479
- min-height: 320px;
1480
- }
1481
-
1482
- .result-placeholder {
1483
- display: grid;
1484
- place-items: center;
1485
- min-height: 260px;
1486
- padding: 24px;
1487
- border: 1px dashed rgba(103, 102, 181, 0.24);
1488
- border-radius: 20px;
1489
- color: var(--muted);
1490
- text-align: center;
1491
- line-height: 1.7;
1492
- }
1493
-
1494
- .result-content.hidden,
1495
- .result-placeholder.hidden {
1496
- display: none;
1497
- }
1498
-
1499
- .result-header {
1500
- display: flex;
1501
- align-items: start;
1502
- justify-content: space-between;
1503
- gap: 16px;
1504
- margin-bottom: 18px;
1505
- }
1506
-
1507
- .result-header h2 {
1508
- margin: 0 0 8px;
1509
- }
1510
-
1511
- .result-stats {
1512
- margin: 0;
1513
- color: var(--muted);
1514
- }
1515
-
1516
- .result-list {
1517
- margin: 0;
1518
- padding-left: 20px;
1519
- display: grid;
1520
- gap: 12px;
1521
- line-height: 1.65;
1522
- }
1523
-
1524
- .formatted-output {
1525
- margin: 20px 0 0;
1526
- padding: 18px;
1527
- border-radius: 18px;
1528
- background: rgba(104, 109, 208, 0.07);
1529
- white-space: pre-wrap;
1530
- word-break: break-word;
1531
- line-height: 1.65;
1532
- }
1533
-
1534
- @media (max-width: 980px) {
1535
- .layout {
1536
- grid-template-columns: 1fr;
1537
- }
1538
- }
1539
-
1540
- @media (max-width: 640px) {
1541
- .page-shell {
1542
- width: min(100% - 16px, 1000px);
1543
- margin: 16px auto;
1544
- }
1545
-
1546
- .hero,
1547
- .sidebar .panel,
1548
- .composer,
1549
- .result-panel {
1550
- padding: 18px;
1551
- }
1552
-
1553
- .composer-footer,
1554
- .result-header {
1555
- flex-direction: column;
1556
- align-items: stretch;
1557
- }
1558
-
1559
- .count-field,
1560
- .primary-button,
1561
- .secondary-button {
1562
- width: 100%;
1563
- }
1564
- }
1565
- """
1566
- ).strip()
1567
- + "\n",
1568
- }
1569
-
1570
-
1571
- def sync_text_file(destination_file: Path, content: str, force_write: bool) -> bool:
1572
- destination_file.parent.mkdir(parents=True, exist_ok=True)
1573
- if destination_file.exists() and not force_write:
1574
- current = destination_file.read_text(encoding="utf-8")
1575
- if current == content:
1576
- return False
1577
- destination_file.write_text(content, encoding="utf-8")
1578
- return True
1579
-
1580
-
1581
- def materialize_standalone_runtime(runtime_root: Path, force_refresh: bool) -> None:
1582
- runtime_files = build_runtime_file_map()
1583
- created = 0
1584
- reused = 0
1585
-
1586
- for relative_path, content in runtime_files.items():
1587
- destination = runtime_root / relative_path
1588
- if sync_text_file(destination, content, force_write=force_refresh):
1589
- created += 1
1590
- else:
1591
- reused += 1
1592
-
1593
- print_step(
1594
- f"Đã chuẩn bị runtime standalone tại {runtime_root}. "
1595
- f"File mới/cập nhật: {created}, file giữ nguyên: {reused}."
1596
- )
1597
-
1598
-
1599
- def resolve_runtime_context(args: argparse.Namespace) -> RuntimeContext:
1600
- use_local_project = has_local_project(SCRIPT_ROOT) and not args.force_standalone_runtime
1601
- if use_local_project:
1602
- runtime_root = SCRIPT_ROOT
1603
- standalone_mode = False
1604
- else:
1605
- requested_runtime_dir = Path(args.runtime_dir).expanduser()
1606
- if not requested_runtime_dir.is_absolute():
1607
- requested_runtime_dir = SCRIPT_ROOT / requested_runtime_dir
1608
- runtime_root = requested_runtime_dir.resolve()
1609
- standalone_mode = True
1610
- materialize_standalone_runtime(runtime_root, force_refresh=args.force_runtime_refresh)
1611
-
1612
- context = RuntimeContext(
1613
- root=runtime_root,
1614
- main_file=runtime_root / "main.py",
1615
- requirements_file=runtime_root / "requirements.txt",
1616
- local_model_dir=runtime_root / "t5-viet-qg-finetuned",
1617
- local_best_model_dir=runtime_root / "t5-viet-qg-finetuned" / "best-model",
1618
- standalone_mode=standalone_mode,
1619
- )
1620
- mode_label = "standalone" if standalone_mode else "full project"
1621
- print_step(f"Runtime mode: {mode_label}")
1622
- print_step(f"Runtime root: {context.root}")
1623
- return context
1624
-
1625
-
1626
- def maybe_bootstrap_tool_venv(args: argparse.Namespace) -> int | None:
1627
- if args.no_venv or is_running_in_virtualenv():
1628
- return None
1629
-
1630
- if not TOOL_VENV_PYTHON.exists():
1631
- print_step("Không phát hiện virtualenv hiện tại. Đang tạo môi trường riêng cho launcher...")
1632
- run_command([sys.executable, "-m", "venv", str(TOOL_VENV_DIR)], cwd=SCRIPT_ROOT)
1633
- run_command([str(TOOL_VENV_PYTHON), "-m", "pip", "install", "--upgrade", "pip"], cwd=SCRIPT_ROOT)
1634
-
1635
- relaunch_env = os.environ.copy()
1636
- relaunch_env["HVU_QA_TOOL_BOOTSTRAPPED"] = "1"
1637
- relaunch_command = [str(TOOL_VENV_PYTHON), str(Path(__file__).resolve()), *sys.argv[1:]]
1638
-
1639
- print_step("Đang chuyển sang môi trường Python riêng của launcher...")
1640
- return subprocess.call(relaunch_command, cwd=str(SCRIPT_ROOT), env=relaunch_env)
1641
-
1642
-
1643
- def ensure_huggingface_hub(skip_install: bool, context: RuntimeContext) -> None:
1644
- if module_exists("huggingface_hub"):
1645
- return
1646
-
1647
- if skip_install:
1648
- install_hint = (
1649
- f"{sys.executable} -m pip install {HF_HUB_REQUIREMENT}"
1650
- if not context.requirements_file.exists()
1651
- else f"{sys.executable} -m pip install -r {context.requirements_file}"
1652
- )
1653
- raise RuntimeError(
1654
- "Thiếu huggingface_hub. Hãy chạy "
1655
- f"`{install_hint}` hoặc bỏ `--skip-install`."
1656
- )
1657
-
1658
- print_step("Thiếu huggingface_hub. Đang cài tự động...")
1659
- if context.requirements_file.exists():
1660
- run_command([sys.executable, "-m", "pip", "install", "-r", str(context.requirements_file)], cwd=context.root)
1661
- else:
1662
- run_command([sys.executable, "-m", "pip", "install", HF_HUB_REQUIREMENT], cwd=context.root)
1663
-
1664
-
1665
- def find_missing_dependencies() -> list[str]:
1666
- missing: list[str] = []
1667
- for package_name, module_name in DEPENDENCY_IMPORTS.items():
1668
- if not module_exists(module_name):
1669
- missing.append(package_name)
1670
- return missing
1671
-
1672
-
1673
- def ensure_runtime_dependencies(skip_install: bool, context: RuntimeContext) -> None:
1674
- missing = find_missing_dependencies()
1675
- if not missing:
1676
- print_step("Môi trường Python đã có đủ dependency cần thiết.")
1677
- return
1678
-
1679
- if skip_install:
1680
- missing_text = ", ".join(missing)
1681
- install_hint = (
1682
- f"{sys.executable} -m pip install -r {context.requirements_file}"
1683
- if context.requirements_file.exists()
1684
- else f"{sys.executable} -m pip install {' '.join(RUNTIME_REQUIREMENTS)}"
1685
- )
1686
- raise RuntimeError(
1687
- f"Thiếu dependency: {missing_text}. "
1688
- f"Hãy chạy `{install_hint}` hoặc bỏ `--skip-install`."
1689
- )
1690
-
1691
- if context.requirements_file.exists():
1692
- print_step(f"Đang cài dependency còn thiếu: {', '.join(missing)}")
1693
- run_command([sys.executable, "-m", "pip", "install", "-r", str(context.requirements_file)], cwd=context.root)
1694
- return
1695
-
1696
- print_step(f"Đang cài dependency runtime còn thiếu: {', '.join(missing)}")
1697
- run_command([sys.executable, "-m", "pip", "install", *RUNTIME_REQUIREMENTS], cwd=context.root)
1698
-
1699
-
1700
- def select_repo_files(repo_files: list[str], best_model_only: bool) -> list[str]:
1701
- allow_patterns = build_allow_patterns(best_model_only)
1702
- selected: list[str] = []
1703
-
1704
- for repo_file in repo_files:
1705
- normalized = repo_file.replace("\\", "/")
1706
- if not matches_any_pattern(normalized, allow_patterns):
1707
- continue
1708
- if matches_any_pattern(normalized, MODEL_IGNORE_PATTERNS):
1709
- continue
1710
- selected.append(normalized)
1711
-
1712
- return sorted(selected)
1713
-
1714
-
1715
- def get_target_destination(context: RuntimeContext, repo_file: str) -> Path:
1716
- relative_path = Path(repo_file).relative_to(HF_MODEL_SUBDIR)
1717
- return context.local_model_dir / relative_path
1718
-
1719
-
1720
- def resolve_repo_files(repo_id: str, revision: str, best_model_only: bool) -> list[dict[str, int | str | None]]:
1721
- from huggingface_hub import HfApi
1722
-
1723
- api = HfApi()
1724
- repo_files = api.list_repo_tree(repo_id=repo_id, repo_type="dataset", revision=revision, recursive=True)
1725
-
1726
- file_entries: list[str] = []
1727
- size_map: dict[str, int | None] = {}
1728
- for entry in repo_files:
1729
- entry_path = str(getattr(entry, "path", "")).replace("\\", "/")
1730
- if not entry_path or entry_path.endswith("/"):
1731
- continue
1732
- file_entries.append(entry_path)
1733
- size_map[entry_path] = getattr(entry, "size", None)
1734
-
1735
- selected_paths = select_repo_files(file_entries, best_model_only=best_model_only)
1736
- if not selected_paths:
1737
- scope = "best-model" if best_model_only else "model"
1738
- raise FileNotFoundError(
1739
- f"Không tìm thấy file {scope} hợp lệ trong repo {repo_id}@{revision}. "
1740
- "Hãy kiểm tra lại cấu trúc repo trên Hugging Face."
1741
- )
1742
-
1743
- return [{"path": path, "size": size_map.get(path)} for path in selected_paths]
1744
-
1745
-
1746
- def sync_single_file(source_file: Path, destination_file: Path, force_copy: bool) -> tuple[bool, int]:
1747
- destination_file.parent.mkdir(parents=True, exist_ok=True)
1748
- size = source_file.stat().st_size
1749
-
1750
- if (
1751
- destination_file.exists()
1752
- and not force_copy
1753
- and destination_file.stat().st_size == size
1754
- ):
1755
- return False, size
1756
-
1757
- shutil.copy2(source_file, destination_file)
1758
- return True, size
1759
-
1760
-
1761
- def download_and_sync_model(
1762
- context: RuntimeContext,
1763
- repo_id: str,
1764
- revision: str,
1765
- force_download: bool,
1766
- best_model_only: bool,
1767
- ) -> tuple[int, int, int, int]:
1768
- from huggingface_hub import hf_hub_download
1769
-
1770
- repo_files = resolve_repo_files(repo_id=repo_id, revision=revision, best_model_only=best_model_only)
1771
- total_files = len(repo_files)
1772
- total_bytes = sum(int(item["size"] or 0) for item in repo_files)
1773
-
1774
- copied_files = 0
1775
- skipped_files = 0
1776
- copied_bytes = 0
1777
- skipped_bytes = 0
1778
- processed_bytes = 0
1779
- download_scope = "best-model" if best_model_only else "toàn bộ model"
1780
-
1781
- print_step(f"Tìm thấy {total_files} file cần đồng bộ cho {download_scope}.")
1782
-
1783
- for index, repo_item in enumerate(repo_files, start=1):
1784
- repo_file = str(repo_item["path"])
1785
- destination_path = get_target_destination(context, repo_file)
1786
- relative_label = destination_path.relative_to(context.root).as_posix()
1787
- print_step(f"[{index}/{total_files}] Đang tải {relative_label}")
1788
-
1789
- cached_file = hf_hub_download(
1790
- repo_id=repo_id,
1791
- repo_type="dataset",
1792
- revision=revision,
1793
- filename=repo_file,
1794
- force_download=force_download,
1795
- local_files_only=False,
1796
- )
1797
-
1798
- copied, size = sync_single_file(Path(cached_file), destination_path, force_copy=force_download)
1799
- if copied:
1800
- copied_files += 1
1801
- copied_bytes += size
1802
- print_step(f" Đã đồng bộ {relative_label} ({format_bytes(size)})")
1803
- else:
1804
- skipped_files += 1
1805
- skipped_bytes += size
1806
- print_step(f" Giữ nguyên {relative_label} ({format_bytes(size)})")
1807
-
1808
- processed_bytes += size
1809
- if processed_bytes > total_bytes:
1810
- total_bytes = processed_bytes
1811
-
1812
- print_step(
1813
- " Tổng tiến độ "
1814
- f"{render_progress_bar(processed_bytes, total_bytes)} "
1815
- f"({format_bytes(processed_bytes)}/{format_bytes(total_bytes)})"
1816
- )
1817
-
1818
- return copied_files, skipped_files, copied_bytes, skipped_bytes
1819
-
1820
-
1821
- def required_model_files(context: RuntimeContext, best_model_only: bool) -> list[Path]:
1822
- if best_model_only:
1823
- model_dir = context.local_best_model_dir
1824
- else:
1825
- model_dir = context.local_model_dir
1826
-
1827
- return [
1828
- model_dir / "config.json",
1829
- model_dir / "generation_config.json",
1830
- model_dir / "model.safetensors",
1831
- model_dir / "tokenizer_config.json",
1832
- model_dir / "special_tokens_map.json",
1833
- model_dir / "spiece.model",
1834
- ]
1835
-
1836
-
1837
- def validate_local_model_dir(context: RuntimeContext, best_model_only: bool) -> None:
1838
- missing_files = [
1839
- str(path.relative_to(context.root))
1840
- for path in required_model_files(context, best_model_only)
1841
- if not path.exists()
1842
- ]
1843
- if missing_files:
1844
- raise FileNotFoundError(
1845
- "Model chưa đầy đủ sau khi tải về. Thiếu các file: " + ", ".join(missing_files)
1846
- )
1847
-
1848
-
1849
- def prepare_model(
1850
- context: RuntimeContext,
1851
- repo_id: str,
1852
- revision: str,
1853
- force_download: bool,
1854
- skip_download: bool,
1855
- best_model_only: bool,
1856
- ) -> None:
1857
- if skip_download:
1858
- print_step("Bỏ qua bước tải model theo yêu cầu `--skip-download`.")
1859
- validate_local_model_dir(context, best_model_only=best_model_only)
1860
- return
1861
-
1862
- copied_files, skipped_files, copied_bytes, skipped_bytes = download_and_sync_model(
1863
- context=context,
1864
- repo_id=repo_id,
1865
- revision=revision,
1866
- force_download=force_download,
1867
- best_model_only=best_model_only,
1868
- )
1869
- validate_local_model_dir(context, best_model_only=best_model_only)
1870
-
1871
- scope = "best-model" if best_model_only else "toàn bộ model"
1872
- print_step(
1873
- f"Đồng bộ {scope} xong. "
1874
- f"File mới/cập nhật: {copied_files} ({format_bytes(copied_bytes)}), "
1875
- f"file giữ nguyên: {skipped_files} ({format_bytes(skipped_bytes)})."
1876
- )
1877
-
1878
-
1879
- def build_runtime_env(context: RuntimeContext, args: argparse.Namespace) -> dict[str, str]:
1880
- env = os.environ.copy()
1881
-
1882
- if args.host:
1883
- env["HVU_HOST"] = args.host
1884
- if args.port is not None:
1885
- env["HVU_PORT"] = str(args.port)
1886
- if args.device:
1887
- env["HVU_DEVICE"] = args.device
1888
- if args.debug:
1889
- env["HVU_DEBUG"] = "1"
1890
- if args.no_browser:
1891
- env["HVU_OPEN_BROWSER"] = "0"
1892
-
1893
- env["HVU_MODEL_DIR"] = str(context.local_model_dir)
1894
- return env
1895
-
1896
-
1897
- def launch_app(context: RuntimeContext, args: argparse.Namespace) -> int:
1898
- if not context.main_file.exists():
1899
- raise FileNotFoundError(f"Không tìm thấy file chạy ứng dụng: {context.main_file}")
1900
-
1901
- env = build_runtime_env(context, args)
1902
- command = [sys.executable, str(context.main_file)]
1903
-
1904
- print_step("Đang chạy ứng dụng web...")
1905
- print_step(
1906
- "Mở trình duyệt tại "
1907
- f"http://{env.get('HVU_HOST', '127.0.0.1')}:{env.get('HVU_PORT', '5000')}"
1908
- )
1909
- return subprocess.call(command, cwd=str(context.root), env=env)
1910
-
1911
-
1912
- def build_parser() -> argparse.ArgumentParser:
1913
- parser = argparse.ArgumentParser(
1914
- description=(
1915
- "Launcher cho HVU_QA: có thể chạy full project nếu đang đứng trong repo, "
1916
- "hoặc tự dựng runtime standalone khi chỉ có file HVU_QA_tool.py."
1917
- ),
1918
- )
1919
- parser.add_argument("--repo-id", default=HF_DATASET_REPO_ID, help="Repo dataset trên Hugging Face.")
1920
- parser.add_argument("--revision", default=HF_DATASET_REVISION, help="Revision trên Hugging Face.")
1921
- parser.add_argument("--host", default=None, help="Host chạy Flask. Mặc định dùng HVU_HOST hoặc 127.0.0.1.")
1922
- parser.add_argument("--port", type=int, default=None, help="Port chạy Flask. Mặc định dùng HVU_PORT hoặc 5000.")
1923
- parser.add_argument(
1924
- "--device",
1925
- choices=["auto", "cpu", "cuda"],
1926
- default=None,
1927
- help="Thiết bị chạy model. Mặc định dùng HVU_DEVICE hoặc auto.",
1928
- )
1929
- parser.add_argument("--debug", action="store_true", help="Bật Flask debug.")
1930
- parser.add_argument("--no-browser", action="store_true", help="Không tự mở trình duyệt.")
1931
- parser.add_argument("--no-venv", action="store_true", help="Không tự tạo virtualenv riêng cho launcher.")
1932
- parser.add_argument("--force-download", action="store_true", help="Tải lại model và ghi đè file local.")
1933
- parser.add_argument(
1934
- "--best-model-only",
1935
- action="store_true",
1936
- help="Chỉ tải thư mục best-model. Lệnh này chỉ dùng được khi repo thật sự có best-model.",
1937
- )
1938
- parser.add_argument("--skip-download", action="store_true", help="Bỏ qua bước tải model từ Hugging Face.")
1939
- parser.add_argument("--skip-install", action="store_true", help="Không tự cài dependency còn thiếu.")
1940
- parser.add_argument("--skip-run", action="store_true", help="Chỉ chuẩn bị môi trường và model, không chạy app.")
1941
- parser.add_argument(
1942
- "--runtime-dir",
1943
- default="HVU_QA_runtime",
1944
- help="Thư mục runtime standalone sẽ được tạo nếu không có full project hoặc khi ép standalone.",
1945
- )
1946
- parser.add_argument(
1947
- "--force-standalone-runtime",
1948
- action="store_true",
1949
- help="Luôn dựng runtime standalone, kể cả khi đang đứng trong full project.",
1950
- )
1951
- parser.add_argument(
1952
- "--force-runtime-refresh",
1953
- action="store_true",
1954
- help="Ghi đè lại các file runtime standalone được nhúng sẵn trong launcher.",
1955
- )
1956
- parser.add_argument(
1957
- "--prepare-runtime-only",
1958
- action="store_true",
1959
- help="Chỉ dựng runtime standalone hoặc kiểm tra full project hiện tại, không cài dependency, không tải model.",
1960
- )
1961
- return parser
1962
-
1963
-
1964
- def main() -> int:
1965
- if hasattr(sys.stdout, "reconfigure"):
1966
- sys.stdout.reconfigure(encoding="utf-8")
1967
- if hasattr(sys.stderr, "reconfigure"):
1968
- sys.stderr.reconfigure(encoding="utf-8")
1969
-
1970
- parser = build_parser()
1971
- args = parser.parse_args()
1972
-
1973
- bootstrap_exit_code = maybe_bootstrap_tool_venv(args)
1974
- if bootstrap_exit_code is not None:
1975
- return bootstrap_exit_code
1976
-
1977
- print_step("Bắt đầu chuẩn bị dự án HVU_QA...")
1978
- context = resolve_runtime_context(args)
1979
-
1980
- if args.prepare_runtime_only:
1981
- print_step("Đã chuẩn bị xong runtime. Bỏ qua các bước tiếp theo theo `--prepare-runtime-only`.")
1982
- return 0
1983
-
1984
- ensure_huggingface_hub(skip_install=args.skip_install, context=context)
1985
- prepare_model(
1986
- context=context,
1987
- repo_id=args.repo_id,
1988
- revision=args.revision,
1989
- force_download=args.force_download,
1990
- skip_download=args.skip_download,
1991
- best_model_only=args.best_model_only,
1992
- )
1993
- ensure_runtime_dependencies(skip_install=args.skip_install, context=context)
1994
-
1995
- if args.skip_run:
1996
- print_step("Đã chuẩn bị xong model và dependency. Bỏ qua chạy app theo `--skip-run`.")
1997
- return 0
1998
-
1999
- return launch_app(context, args)
2000
-
2001
-
2002
- if __name__ == "__main__":
2003
- raise SystemExit(main())