DANGDOCAO commited on
Commit
6684e94
·
verified ·
1 Parent(s): 8bd6459
Files changed (2) hide show
  1. HVU_QA/backend/__init__.py +0 -3
  2. HVU_QA/backend/app.py +0 -319
HVU_QA/backend/__init__.py DELETED
@@ -1,3 +0,0 @@
1
- from .app import create_app
2
-
3
- __all__ = ["create_app"]
 
 
 
 
HVU_QA/backend/app.py DELETED
@@ -1,319 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import os
4
- import time
5
- from pathlib import Path
6
-
7
- from flask import Flask, jsonify, request, send_from_directory
8
-
9
- from generate_question import (
10
- APP_TITLE,
11
- QUESTION_LIMIT,
12
- QuestionGenerator,
13
- format_questions,
14
- normalize_text,
15
- parse_question_count,
16
- resolve_model_dir,
17
- )
18
-
19
- IGNORED_MODEL_DIR_NAMES = {
20
- ".git",
21
- ".vscode",
22
- "__pycache__",
23
- "backend",
24
- "frontend",
25
- "venv",
26
- }
27
-
28
-
29
- def project_root() -> Path:
30
- return Path(__file__).resolve().parents[1]
31
-
32
-
33
- def build_generator(
34
- model_dir: str | Path | None = None,
35
- prefer_nested_model: bool = True,
36
- ) -> QuestionGenerator:
37
- root = project_root()
38
- selected_model_dir = (
39
- Path(model_dir).expanduser()
40
- if model_dir is not None
41
- else Path(os.getenv("HVU_MODEL_DIR", str(root / "t5-viet-qg-finetuned"))).expanduser()
42
- )
43
- if not selected_model_dir.is_absolute():
44
- selected_model_dir = root / selected_model_dir
45
-
46
- return QuestionGenerator(
47
- model_dir=str(selected_model_dir),
48
- task_prefix=os.getenv("HVU_TASK_PREFIX", "sinh câu hỏi"),
49
- max_source_length=int(os.getenv("HVU_MAX_SOURCE_LENGTH", "512")),
50
- max_new_tokens=int(os.getenv("HVU_MAX_NEW_TOKENS", "64")),
51
- device=os.getenv("HVU_DEVICE", "auto"),
52
- cpu_threads=_read_optional_int(os.getenv("HVU_CPU_THREADS")),
53
- gpu_dtype=os.getenv("HVU_GPU_DTYPE", "auto"),
54
- prefer_nested_model=prefer_nested_model,
55
- )
56
-
57
-
58
- def _read_optional_int(value: str | None) -> int | None:
59
- if value in (None, ""):
60
- return None
61
- return int(value)
62
-
63
-
64
- def _humanize_model_segment(value: str) -> str:
65
- normalized = value.replace("_", "-")
66
- parts: list[str] = []
67
- for part in normalized.split("-"):
68
- lowered = part.lower()
69
- if not lowered:
70
- continue
71
- if lowered in {"t5", "qg", "qa", "hvu"}:
72
- parts.append(lowered.upper())
73
- elif lowered == "seq2seq":
74
- parts.append("Seq2Seq")
75
- elif lowered == "checkpoint":
76
- parts.append("Checkpoint")
77
- elif part.isdigit():
78
- parts.append(part)
79
- else:
80
- parts.append(part.capitalize())
81
- return "-".join(parts) or "Model"
82
-
83
-
84
- def _display_model_name(meta: dict[str, object]) -> str:
85
- raw_name = Path(str(meta.get("model_root") or meta.get("model_dir") or "model")).name
86
- return _humanize_model_segment(raw_name)
87
-
88
-
89
- def _model_label(relative_path: str | Path) -> str:
90
- path = Path(relative_path)
91
- return path.name or "model"
92
-
93
-
94
- def _iter_model_candidates(root: Path):
95
- for child in sorted(root.iterdir(), key=lambda path: path.name.lower()):
96
- if not child.is_dir() or child.name.startswith(".") or child.name in IGNORED_MODEL_DIR_NAMES:
97
- continue
98
-
99
- if (child / "config.json").exists():
100
- yield {"path": child, "prefer_nested_model": False}
101
-
102
- for nested_name in ("best-model", "final-model"):
103
- nested = child / nested_name
104
- if nested.is_dir() and (nested / "config.json").exists():
105
- yield {"path": nested, "prefer_nested_model": False}
106
-
107
-
108
- def _discover_available_models(
109
- root: Path,
110
- active_generator: QuestionGenerator | None = None,
111
- ) -> list[dict[str, str]]:
112
- models: list[dict[str, str]] = []
113
- seen_model_roots: set[str] = set()
114
- root = root.resolve()
115
-
116
- for candidate_info in _iter_model_candidates(root):
117
- candidate = candidate_info["path"]
118
- prefer_nested_model = bool(candidate_info["prefer_nested_model"])
119
- model_key = str(candidate.resolve())
120
- if model_key in seen_model_roots:
121
- continue
122
-
123
- try:
124
- relative_candidate = candidate.resolve().relative_to(root)
125
- except ValueError:
126
- continue
127
-
128
- seen_model_roots.add(model_key)
129
- models.append(
130
- {
131
- "id": relative_candidate.as_posix(),
132
- "label": _model_label(relative_candidate),
133
- "model_root": str(candidate.resolve()),
134
- "model_dir": str(resolve_model_dir(candidate, prefer_nested_model=False).resolve()),
135
- "prefer_nested_model": prefer_nested_model,
136
- }
137
- )
138
-
139
- if active_generator is not None:
140
- current_root = active_generator.model_root.resolve()
141
- current_dir = active_generator.model_dir.resolve()
142
- exists = any(
143
- Path(item["model_root"]).resolve() == current_root
144
- or Path(item["model_dir"]).resolve() == current_dir
145
- for item in models
146
- )
147
- if not exists:
148
- models.append(
149
- {
150
- "id": current_root.as_posix(),
151
- "label": _display_model_name(active_generator.metadata()),
152
- "model_root": str(current_root),
153
- "model_dir": str(current_dir),
154
- "prefer_nested_model": False,
155
- }
156
- )
157
-
158
- return models
159
-
160
-
161
- def _selected_model_id(
162
- app: Flask,
163
- models: list[dict[str, str]],
164
- active_generator: QuestionGenerator | None = None,
165
- ) -> str:
166
- explicit_selection = str(app.config.get("SELECTED_MODEL_ID") or "").strip()
167
- if explicit_selection and any(item["id"] == explicit_selection for item in models):
168
- return explicit_selection
169
-
170
- active_generator = active_generator or _generator(app)
171
- current_root = active_generator.model_root.resolve()
172
- current_dir = active_generator.model_dir.resolve()
173
-
174
- for item in models:
175
- if Path(item["model_dir"]).resolve() == current_dir:
176
- return item["id"]
177
-
178
- for item in models:
179
- if Path(item["model_root"]).resolve() == current_root:
180
- return item["id"]
181
-
182
- return models[0]["id"] if models else ""
183
-
184
-
185
- def _switch_generator(app: Flask, model_id: str) -> QuestionGenerator:
186
- available_models = _discover_available_models(app.config["PROJECT_ROOT"], _generator(app))
187
- selected_model = next((item for item in available_models if item["id"] == model_id), None)
188
- if selected_model is None:
189
- raise ValueError("Model được chọn không hợp lệ hoặc chưa tồn tại trong thư mục dự án.")
190
-
191
- current_model_id = _selected_model_id(app, available_models)
192
- if current_model_id != model_id:
193
- app.config["GENERATOR"] = build_generator(
194
- selected_model["model_root"],
195
- prefer_nested_model=bool(selected_model.get("prefer_nested_model")),
196
- )
197
-
198
- app.config["SELECTED_MODEL_ID"] = model_id
199
- return _generator(app)
200
-
201
-
202
- def _info_payload(app: Flask, active_generator: QuestionGenerator | None = None) -> dict[str, object]:
203
- active_generator = active_generator or _generator(app)
204
- meta = active_generator.metadata()
205
- available_models = _discover_available_models(app.config["PROJECT_ROOT"], active_generator)
206
- selected_model_id = _selected_model_id(app, available_models, active_generator)
207
- model_name = next(
208
- (item["label"] for item in available_models if item["id"] == selected_model_id),
209
- _display_model_name(meta),
210
- )
211
-
212
- return {
213
- "ok": True,
214
- "title": APP_TITLE,
215
- "model_name": model_name,
216
- "selected_model_id": selected_model_id,
217
- "available_models": [{"id": item["id"], "label": item["label"]} for item in available_models],
218
- "meta": meta,
219
- }
220
-
221
-
222
- def create_app(generator: QuestionGenerator | None = None) -> Flask:
223
- root = project_root()
224
- frontend_root = root / "frontend"
225
-
226
- app = Flask(__name__, static_folder=None)
227
- app.json.ensure_ascii = False
228
- app.config["GENERATOR"] = generator or build_generator()
229
- app.config["PROJECT_ROOT"] = root
230
- app.config["FRONTEND_ROOT"] = frontend_root
231
- app.config["SELECTED_MODEL_ID"] = ""
232
-
233
- @app.get("/")
234
- def index():
235
- return send_from_directory(app.config["FRONTEND_ROOT"], "index.html")
236
-
237
- @app.get("/frontend/<path:filename>")
238
- def frontend_file(filename: str):
239
- return send_from_directory(app.config["FRONTEND_ROOT"], filename)
240
-
241
- @app.get("/assets/<path:filename>")
242
- def asset_file(filename: str):
243
- return send_from_directory(app.config["PROJECT_ROOT"], filename)
244
-
245
- @app.get("/api/info")
246
- def info():
247
- return jsonify(_info_payload(app))
248
-
249
- @app.post("/api/model")
250
- def set_model():
251
- payload = request.get_json(silent=True) or {}
252
- model_id = str(payload.get("model_id") or "").strip()
253
- if not model_id:
254
- return jsonify({"ok": False, "error": "Vui lòng chọn model trước khi chuyển."}), 400
255
-
256
- try:
257
- active_generator = _switch_generator(app, model_id)
258
- except ValueError as exc:
259
- return jsonify({"ok": False, "error": str(exc)}), 404
260
-
261
- return jsonify(_info_payload(app, active_generator))
262
-
263
- @app.post("/api/generate")
264
- def generate():
265
- payload = request.get_json(silent=True) or {}
266
- requested_model_id = str(payload.get("model_id") or "").strip()
267
-
268
- if requested_model_id:
269
- try:
270
- active_generator = _switch_generator(app, requested_model_id)
271
- except ValueError as exc:
272
- return jsonify({"ok": False, "error": str(exc)}), 400
273
- else:
274
- active_generator = _generator(app)
275
-
276
- text = normalize_text(payload.get("text"))
277
- if not text:
278
- return jsonify({"ok": False, "error": "Vui lòng nhập đoạn văn bản trước khi sinh câu hỏi."}), 400
279
-
280
- raw_count = payload.get("num_questions")
281
- if raw_count in (None, ""):
282
- count = 100
283
- else:
284
- try:
285
- count = int(raw_count)
286
- except (TypeError, ValueError):
287
- return jsonify({"ok": False, "error": "Số câu hỏi phải là số nguyên trong khoảng 1 đến 100."}), 400
288
-
289
- if count < 1 or count > QUESTION_LIMIT:
290
- return jsonify({"ok": False, "error": f"Số câu hỏi phải nằm trong khoảng 1 đến {QUESTION_LIMIT}."}), 400
291
-
292
- started = time.perf_counter()
293
- try:
294
- questions = active_generator.generate(text, parse_question_count(count))
295
- except Exception as exc: # noqa: BLE001
296
- return jsonify({"ok": False, "error": str(exc)}), 500
297
-
298
- elapsed_ms = round((time.perf_counter() - started) * 1000, 2)
299
- info_payload = _info_payload(app, active_generator)
300
- return jsonify(
301
- {
302
- "ok": True,
303
- "text": text,
304
- "num_questions": count,
305
- "questions": questions,
306
- "formatted": format_questions(questions),
307
- "elapsed_ms": elapsed_ms,
308
- "model_name": info_payload["model_name"],
309
- "selected_model_id": info_payload["selected_model_id"],
310
- "meta": active_generator.metadata(),
311
- }
312
- )
313
-
314
- return app
315
-
316
-
317
- def _generator(app: Flask) -> QuestionGenerator:
318
- generator: QuestionGenerator = app.config["GENERATOR"]
319
- return generator