Spaces:
Sleeping
Sleeping
k22056537 commited on
Commit Β·
f85441f
1
Parent(s): 645ffe4
refactor: single checkpoints/ dir, fix uvicorn cmd and hybrid config path
Browse files- README.md +1 -1
- evaluation/README.md +4 -4
- evaluation/feature_importance.py +0 -3
- evaluation/justify_thresholds.py +1 -1
- notebooks/xgboost.ipynb +2 -2
- ui/README.md +1 -1
- ui/pipeline.py +2 -10
README.md
CHANGED
|
@@ -40,7 +40,7 @@ cp -r dist/* static/
|
|
| 40 |
**Web application (API + frontend):**
|
| 41 |
|
| 42 |
```bash
|
| 43 |
-
uvicorn
|
| 44 |
```
|
| 45 |
|
| 46 |
Open http://localhost:7860 in a browser.
|
|
|
|
| 40 |
**Web application (API + frontend):**
|
| 41 |
|
| 42 |
```bash
|
| 43 |
+
uvicorn main:app --host 0.0.0.0 --port 7860
|
| 44 |
```
|
| 45 |
|
| 46 |
Open http://localhost:7860 in a browser.
|
evaluation/README.md
CHANGED
|
@@ -13,12 +13,12 @@ THRESHOLD_JUSTIFICATION.md # report (auto-generated by justify_thresholds)
|
|
| 13 |
feature_selection_justification.md # report (auto-generated by feature_importance)
|
| 14 |
```
|
| 15 |
|
| 16 |
-
**Logs (when present):**
|
| 17 |
```
|
| 18 |
logs/
|
| 19 |
-
βββ face_orientation_training_log.json
|
| 20 |
-
βββ
|
| 21 |
-
βββ
|
| 22 |
```
|
| 23 |
|
| 24 |
## 2. Log Format
|
|
|
|
| 13 |
feature_selection_justification.md # report (auto-generated by feature_importance)
|
| 14 |
```
|
| 15 |
|
| 16 |
+
**Logs (when present):** Scripts write to `evaluation/logs/`. MLP script: `face_orientation_training_log.json`; XGBoost script/notebook: `xgboost_face_orientation_training_log.json`; MLP notebook may write `mlp_face_orientation_training_log.json`.
|
| 17 |
```
|
| 18 |
logs/
|
| 19 |
+
βββ face_orientation_training_log.json # from models/mlp/train.py
|
| 20 |
+
βββ xgboost_face_orientation_training_log.json
|
| 21 |
+
βββ (optional) mlp_face_orientation_training_log.json # from notebooks/mlp.ipynb
|
| 22 |
```
|
| 23 |
|
| 24 |
## 2. Log Format
|
evaluation/feature_importance.py
CHANGED
|
@@ -27,9 +27,6 @@ FEATURES = SELECTED_FEATURES["face_orientation"]
|
|
| 27 |
|
| 28 |
|
| 29 |
def _resolve_xgb_path():
|
| 30 |
-
p = os.path.join(_PROJECT_ROOT, "models", "xgboost", "checkpoints", "face_orientation_best.json")
|
| 31 |
-
if os.path.isfile(p):
|
| 32 |
-
return p
|
| 33 |
return os.path.join(_PROJECT_ROOT, "checkpoints", "xgboost_face_orientation_best.json")
|
| 34 |
|
| 35 |
|
|
|
|
| 27 |
|
| 28 |
|
| 29 |
def _resolve_xgb_path():
|
|
|
|
|
|
|
|
|
|
| 30 |
return os.path.join(_PROJECT_ROOT, "checkpoints", "xgboost_face_orientation_best.json")
|
| 31 |
|
| 32 |
|
evaluation/justify_thresholds.py
CHANGED
|
@@ -927,7 +927,7 @@ def write_hybrid_config(use_xgb, best_w_mlp, best_w_xgb, config_path,
|
|
| 927 |
}
|
| 928 |
if combiner_path:
|
| 929 |
cfg["combiner"] = "logistic"
|
| 930 |
-
cfg["combiner_path"] = os.path.
|
| 931 |
with open(config_path, "w", encoding="utf-8") as f:
|
| 932 |
json.dump(cfg, f, indent=2)
|
| 933 |
print(f" Written {config_path} (use_xgb={cfg['use_xgb']}, combiner={cfg.get('combiner', 'heuristic')})")
|
|
|
|
| 927 |
}
|
| 928 |
if combiner_path:
|
| 929 |
cfg["combiner"] = "logistic"
|
| 930 |
+
cfg["combiner_path"] = os.path.basename(combiner_path)
|
| 931 |
with open(config_path, "w", encoding="utf-8") as f:
|
| 932 |
json.dump(cfg, f, indent=2)
|
| 933 |
print(f" Written {config_path} (use_xgb={cfg['use_xgb']}, combiner={cfg.get('combiner', 'heuristic')})")
|
notebooks/xgboost.ipynb
CHANGED
|
@@ -64,7 +64,7 @@
|
|
| 64 |
" \"reg_alpha\": 1.1407,\n",
|
| 65 |
" \"reg_lambda\": 2.4181,\n",
|
| 66 |
" \"eval_metric\": \"logloss\",\n",
|
| 67 |
-
" \"checkpoints_dir\": os.path.join(PROJECT_ROOT, \"
|
| 68 |
" \"logs_dir\": os.path.join(PROJECT_ROOT, \"evaluation\", \"logs\"),\n",
|
| 69 |
"}\n",
|
| 70 |
"\n",
|
|
@@ -306,7 +306,7 @@
|
|
| 306 |
"outputs": [],
|
| 307 |
"source": [
|
| 308 |
"os.makedirs(CFG[\"checkpoints_dir\"], exist_ok=True)\n",
|
| 309 |
-
"model_path = os.path.join(CFG[\"checkpoints_dir\"], f\"{CFG['model_name']}_best.json\")\n",
|
| 310 |
"model.save_model(model_path)\n",
|
| 311 |
"\n",
|
| 312 |
"history = {\n",
|
|
|
|
| 64 |
" \"reg_alpha\": 1.1407,\n",
|
| 65 |
" \"reg_lambda\": 2.4181,\n",
|
| 66 |
" \"eval_metric\": \"logloss\",\n",
|
| 67 |
+
" \"checkpoints_dir\": os.path.join(PROJECT_ROOT, \"checkpoints\"),\n",
|
| 68 |
" \"logs_dir\": os.path.join(PROJECT_ROOT, \"evaluation\", \"logs\"),\n",
|
| 69 |
"}\n",
|
| 70 |
"\n",
|
|
|
|
| 306 |
"outputs": [],
|
| 307 |
"source": [
|
| 308 |
"os.makedirs(CFG[\"checkpoints_dir\"], exist_ok=True)\n",
|
| 309 |
+
"model_path = os.path.join(CFG[\"checkpoints_dir\"], f\"xgboost_{CFG['model_name']}_best.json\")\n",
|
| 310 |
"model.save_model(model_path)\n",
|
| 311 |
"\n",
|
| 312 |
"history = {\n",
|
ui/README.md
CHANGED
|
@@ -15,7 +15,7 @@ Live camera demo and real-time inference pipeline.
|
|
| 15 |
|----------|----------|-------|--------|
|
| 16 |
| `FaceMeshPipeline` | Head pose + eye geometry | Rule-based fusion | `models/head_pose.py`, `models/eye_scorer.py` |
|
| 17 |
| `MLPPipeline` | 10 selected features | PyTorch MLP (10β64β32β2) | `checkpoints/mlp_best.pt` + `scaler_mlp.joblib` |
|
| 18 |
-
| `XGBoostPipeline` | 10 selected features | XGBoost | `
|
| 19 |
|
| 20 |
## 3. Running
|
| 21 |
|
|
|
|
| 15 |
|----------|----------|-------|--------|
|
| 16 |
| `FaceMeshPipeline` | Head pose + eye geometry | Rule-based fusion | `models/head_pose.py`, `models/eye_scorer.py` |
|
| 17 |
| `MLPPipeline` | 10 selected features | PyTorch MLP (10β64β32β2) | `checkpoints/mlp_best.pt` + `scaler_mlp.joblib` |
|
| 18 |
+
| `XGBoostPipeline` | 10 selected features | XGBoost | `checkpoints/xgboost_face_orientation_best.json` |
|
| 19 |
|
| 20 |
## 3. Running
|
| 21 |
|
ui/pipeline.py
CHANGED
|
@@ -329,9 +329,7 @@ def _load_hybrid_config(model_dir: str, config_path: str | None = None):
|
|
| 329 |
class MLPPipeline:
|
| 330 |
def __init__(self, model_dir=None, detector=None, threshold=0.23):
|
| 331 |
if model_dir is None:
|
| 332 |
-
model_dir = os.path.join(_PROJECT_ROOT, "
|
| 333 |
-
if not os.path.exists(model_dir):
|
| 334 |
-
model_dir = os.path.join(_PROJECT_ROOT, "checkpoints")
|
| 335 |
|
| 336 |
self._mlp, self._scaler, self._feature_names = _load_mlp_artifacts(model_dir)
|
| 337 |
self._indices = [FEATURE_NAMES.index(n) for n in self._feature_names]
|
|
@@ -404,9 +402,6 @@ class MLPPipeline:
|
|
| 404 |
|
| 405 |
|
| 406 |
def _resolve_xgb_path():
|
| 407 |
-
p = os.path.join(_PROJECT_ROOT, "models", "xgboost", "checkpoints", "face_orientation_best.json")
|
| 408 |
-
if os.path.isfile(p):
|
| 409 |
-
return p
|
| 410 |
return os.path.join(_PROJECT_ROOT, "checkpoints", "xgboost_face_orientation_best.json")
|
| 411 |
|
| 412 |
|
|
@@ -582,10 +577,7 @@ class XGBoostPipeline:
|
|
| 582 |
from xgboost import XGBClassifier
|
| 583 |
|
| 584 |
if model_path is None:
|
| 585 |
-
model_path = os.path.join(_PROJECT_ROOT, "
|
| 586 |
-
if not os.path.isfile(model_path):
|
| 587 |
-
# Fallback to legacy path
|
| 588 |
-
model_path = os.path.join(_PROJECT_ROOT, "checkpoints", "xgboost_face_orientation_best.json")
|
| 589 |
if not os.path.isfile(model_path):
|
| 590 |
raise FileNotFoundError(f"No XGBoost checkpoint at {model_path}")
|
| 591 |
|
|
|
|
| 329 |
class MLPPipeline:
|
| 330 |
def __init__(self, model_dir=None, detector=None, threshold=0.23):
|
| 331 |
if model_dir is None:
|
| 332 |
+
model_dir = os.path.join(_PROJECT_ROOT, "checkpoints")
|
|
|
|
|
|
|
| 333 |
|
| 334 |
self._mlp, self._scaler, self._feature_names = _load_mlp_artifacts(model_dir)
|
| 335 |
self._indices = [FEATURE_NAMES.index(n) for n in self._feature_names]
|
|
|
|
| 402 |
|
| 403 |
|
| 404 |
def _resolve_xgb_path():
|
|
|
|
|
|
|
|
|
|
| 405 |
return os.path.join(_PROJECT_ROOT, "checkpoints", "xgboost_face_orientation_best.json")
|
| 406 |
|
| 407 |
|
|
|
|
| 577 |
from xgboost import XGBClassifier
|
| 578 |
|
| 579 |
if model_path is None:
|
| 580 |
+
model_path = os.path.join(_PROJECT_ROOT, "checkpoints", "xgboost_face_orientation_best.json")
|
|
|
|
|
|
|
|
|
|
| 581 |
if not os.path.isfile(model_path):
|
| 582 |
raise FileNotFoundError(f"No XGBoost checkpoint at {model_path}")
|
| 583 |
|