Spaces:
Running on CPU Upgrade
Running on CPU Upgrade
k22056537 commited on
Commit ·
a75bb5a
1
Parent(s): e557410
feat: UI nav, onboarding, L2CS weights path + torch.load; trim dev files
Browse files- Home nav: drop avatar, add Home; simplify home CTAs
- Focus onboarding + pipeline/L2CS fixes; history clear in Help only
- Ignore coverage; Docker log info; remove dead FocusPage, test_data, fetch_sweep
- .gitignore +4 -0
- Dockerfile +1 -1
- data/README.md +24 -3
- evaluation/run_channel_ablation_only.py +0 -63
- main.py +158 -140
- models/L2CS-Net/l2cs/datasets.py +156 -157
- models/L2CS-Net/l2cs/pipeline.py +59 -17
- models/gaze_calibration.py +27 -1
- models/gaze_eye_fusion.py +30 -13
- models/xgboost/fetch_sweep_results.py +0 -46
- package-lock.json +0 -9
- public/test_data.json +0 -112
- src/App.css +412 -19
- src/App.jsx +24 -30
- src/components/CalibrationOverlay.jsx +55 -53
- src/components/Customise.jsx +1 -79
- src/components/FocusPage.jsx +0 -264
- src/components/FocusPageLocal.jsx +178 -162
- src/components/Help.jsx +40 -2
- src/components/Home.jsx +3 -111
- src/utils/VideoManagerLocal.js +178 -124
- tests/test_gaze_pipeline.py +363 -0
- ui/pipeline.py +92 -15
.gitignore
CHANGED
|
@@ -35,6 +35,10 @@ build/
|
|
| 35 |
Thumbs.db
|
| 36 |
ignore/
|
| 37 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
# Project specific
|
| 39 |
focus_guard.db
|
| 40 |
test_focus_guard.db
|
|
|
|
| 35 |
Thumbs.db
|
| 36 |
ignore/
|
| 37 |
|
| 38 |
+
# Coverage / caches
|
| 39 |
+
.coverage
|
| 40 |
+
htmlcov/
|
| 41 |
+
|
| 42 |
# Project specific
|
| 43 |
focus_guard.db
|
| 44 |
test_focus_guard.db
|
Dockerfile
CHANGED
|
@@ -31,4 +31,4 @@ RUN mkdir -p /app/data && chown -R user:user /app
|
|
| 31 |
USER user
|
| 32 |
EXPOSE 7860
|
| 33 |
|
| 34 |
-
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860", "--log-level", "
|
|
|
|
| 31 |
USER user
|
| 32 |
EXPOSE 7860
|
| 33 |
|
| 34 |
+
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860", "--log-level", "info"]
|
data/README.md
CHANGED
|
@@ -1,11 +1,32 @@
|
|
| 1 |
# data/
|
| 2 |
|
| 3 |
-
|
| 4 |
|
| 5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
```bash
|
| 8 |
python -m models.collect_features --name yourname
|
| 9 |
```
|
| 10 |
|
| 11 |
-
Webcam + overlay
|
|
|
|
| 1 |
# data/
|
| 2 |
|
| 3 |
+
## Layout
|
| 4 |
|
| 5 |
+
One directory per contributor: `collected_<name>/` with one or more `.npz` files per session.
|
| 6 |
+
`collect_features.py` appends timestamped files when someone records again (e.g. `collected_Kexin/` has two sessions).
|
| 7 |
+
|
| 8 |
+
Each `.npz` holds:
|
| 9 |
+
|
| 10 |
+
- `features` — N×17 (training uses **10** of these for the `face_orientation` set; see `data_preparation/`)
|
| 11 |
+
- `labels` — 0 = unfocused, 1 = focused (live key presses while recording)
|
| 12 |
+
- `feature_names` — names for all 17 columns
|
| 13 |
+
|
| 14 |
+
## What we have (pooled)
|
| 15 |
+
|
| 16 |
+
Roughly **144.8k** samples from **10** `.npz` sessions across **9** people. Session sizes vary a lot (~8.7k–17.6k samples), so the pool isn’t one uniform block — different setups, days, and recording lengths.
|
| 17 |
+
|
| 18 |
+
| Aspect | Snapshot |
|
| 19 |
+
|--------|----------|
|
| 20 |
+
| **Labels** | ~55.8k unfocused / ~89.0k focused (~39% / ~61%) |
|
| 21 |
+
| **Temporal mix** | Hundreds of focus ↔ unfocus **transitions** in the pooled timeline (not one long stuck label) |
|
| 22 |
+
| **Signals** | Same 10 inference features as in production: head deviation, face/eye scores, horizontal gaze, pitch, EAR (left/avg/right), gaze offset, PERCLOS — pose + eyes + short-window drowsiness |
|
| 23 |
+
|
| 24 |
+
Run **`data_preparation/data_exploration.ipynb`** for histograms, label-over-time plots, feature–label correlations, correlation matrix, and the small quality checklist (sample count, class balance band, transition count).
|
| 25 |
+
|
| 26 |
+
## Collect more
|
| 27 |
|
| 28 |
```bash
|
| 29 |
python -m models.collect_features --name yourname
|
| 30 |
```
|
| 31 |
|
| 32 |
+
Webcam + overlay: **1** = focused, **0** = unfocused, **p** = pause, **q** = save and quit.
|
evaluation/run_channel_ablation_only.py
DELETED
|
@@ -1,63 +0,0 @@
|
|
| 1 |
-
"""Run only channel ablation LOPO (no leave-one-out). Quick run for paper data."""
|
| 2 |
-
import os
|
| 3 |
-
import sys
|
| 4 |
-
import numpy as np
|
| 5 |
-
from sklearn.preprocessing import StandardScaler
|
| 6 |
-
from sklearn.metrics import f1_score
|
| 7 |
-
from xgboost import XGBClassifier
|
| 8 |
-
|
| 9 |
-
_PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
| 10 |
-
sys.path.insert(0, _PROJECT_ROOT)
|
| 11 |
-
from data_preparation.prepare_dataset import load_per_person, SELECTED_FEATURES
|
| 12 |
-
|
| 13 |
-
SEED = 42
|
| 14 |
-
FEATURES = SELECTED_FEATURES["face_orientation"]
|
| 15 |
-
CHANNEL_SUBSETS = {
|
| 16 |
-
"head_pose": ["head_deviation", "s_face", "pitch"],
|
| 17 |
-
"eye_state": ["ear_left", "ear_avg", "ear_right", "perclos"],
|
| 18 |
-
"gaze": ["h_gaze", "gaze_offset", "s_eye"],
|
| 19 |
-
}
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
def main():
|
| 23 |
-
by_person, _, _ = load_per_person("face_orientation")
|
| 24 |
-
persons = sorted(by_person.keys())
|
| 25 |
-
results = {}
|
| 26 |
-
for subset_name, feat_list in CHANNEL_SUBSETS.items():
|
| 27 |
-
idx_keep = [FEATURES.index(f) for f in feat_list]
|
| 28 |
-
f1s = []
|
| 29 |
-
for held_out in persons:
|
| 30 |
-
train_X = np.concatenate([by_person[p][0] for p in persons if p != held_out])
|
| 31 |
-
train_y = np.concatenate([by_person[p][1] for p in persons if p != held_out])
|
| 32 |
-
X_test, y_test = by_person[held_out]
|
| 33 |
-
X_tr = train_X[:, idx_keep]
|
| 34 |
-
X_te = X_test[:, idx_keep]
|
| 35 |
-
scaler = StandardScaler().fit(X_tr)
|
| 36 |
-
xgb = XGBClassifier(n_estimators=600, max_depth=8, learning_rate=0.05,
|
| 37 |
-
subsample=0.8, colsample_bytree=0.8, reg_alpha=0.1, reg_lambda=1.0,
|
| 38 |
-
eval_metric="logloss", random_state=SEED, verbosity=0)
|
| 39 |
-
xgb.fit(scaler.transform(X_tr), train_y)
|
| 40 |
-
pred = xgb.predict(scaler.transform(X_te))
|
| 41 |
-
f1s.append(f1_score(y_test, pred, average="weighted"))
|
| 42 |
-
results[subset_name] = np.mean(f1s)
|
| 43 |
-
print(f"{subset_name}: {results[subset_name]:.4f}")
|
| 44 |
-
# baseline
|
| 45 |
-
f1s = []
|
| 46 |
-
for held_out in persons:
|
| 47 |
-
train_X = np.concatenate([by_person[p][0] for p in persons if p != held_out])
|
| 48 |
-
train_y = np.concatenate([by_person[p][1] for p in persons if p != held_out])
|
| 49 |
-
X_test, y_test = by_person[held_out]
|
| 50 |
-
scaler = StandardScaler().fit(train_X)
|
| 51 |
-
xgb = XGBClassifier(n_estimators=600, max_depth=8, learning_rate=0.05,
|
| 52 |
-
subsample=0.8, colsample_bytree=0.8, reg_alpha=0.1, reg_lambda=1.0,
|
| 53 |
-
eval_metric="logloss", random_state=SEED, verbosity=0)
|
| 54 |
-
xgb.fit(scaler.transform(train_X), train_y)
|
| 55 |
-
pred = xgb.predict(scaler.transform(X_test))
|
| 56 |
-
f1s.append(f1_score(y_test, pred, average="weighted"))
|
| 57 |
-
results["all_10"] = np.mean(f1s)
|
| 58 |
-
print(f"all_10: {results['all_10']:.4f}")
|
| 59 |
-
return results
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
if __name__ == "__main__":
|
| 63 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
main.py
CHANGED
|
@@ -20,8 +20,11 @@ from contextlib import asynccontextmanager
|
|
| 20 |
import asyncio
|
| 21 |
import concurrent.futures
|
| 22 |
import threading
|
|
|
|
| 23 |
|
| 24 |
from aiortc import RTCPeerConnection, RTCSessionDescription, VideoStreamTrack
|
|
|
|
|
|
|
| 25 |
from av import VideoFrame
|
| 26 |
|
| 27 |
from mediapipe.tasks.python.vision import FaceLandmarksConnections
|
|
@@ -138,26 +141,33 @@ def _draw_hud(frame, result, model_name):
|
|
| 138 |
|
| 139 |
# Landmark indices used for face mesh drawing on client (union of all groups).
|
| 140 |
# Sending only these instead of all 478 saves ~60% of the landmarks payload.
|
| 141 |
-
_MESH_INDICES = sorted(
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 154 |
# Build a lookup: original_index -> position in sparse array, so client can reconstruct.
|
| 155 |
_MESH_INDEX_SET = set(_MESH_INDICES)
|
| 156 |
|
| 157 |
@asynccontextmanager
|
| 158 |
async def lifespan(app):
|
| 159 |
global _cached_model_name
|
| 160 |
-
print("
|
| 161 |
await init_database()
|
| 162 |
async with aiosqlite.connect(db_path) as db:
|
| 163 |
cursor = await db.execute("SELECT model_name FROM user_settings WHERE id = 1")
|
|
@@ -196,9 +206,13 @@ async def lifespan(app):
|
|
| 196 |
await db.commit()
|
| 197 |
if resolved_model is not None:
|
| 198 |
print(f"[OK] Active model set to {resolved_model}")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 199 |
yield
|
| 200 |
_inference_executor.shutdown(wait=False)
|
| 201 |
-
print("
|
| 202 |
|
| 203 |
app = FastAPI(title="Focus Guard API", lifespan=lifespan)
|
| 204 |
|
|
@@ -265,18 +279,14 @@ async def init_database():
|
|
| 265 |
await db.execute("""
|
| 266 |
CREATE TABLE IF NOT EXISTS user_settings (
|
| 267 |
id INTEGER PRIMARY KEY CHECK (id = 1),
|
| 268 |
-
sensitivity INTEGER DEFAULT 6,
|
| 269 |
-
notification_enabled BOOLEAN DEFAULT 1,
|
| 270 |
-
notification_threshold INTEGER DEFAULT 30,
|
| 271 |
-
frame_rate INTEGER DEFAULT 30,
|
| 272 |
model_name TEXT DEFAULT 'mlp'
|
| 273 |
)
|
| 274 |
""")
|
| 275 |
|
| 276 |
# Insert default settings if not exists
|
| 277 |
await db.execute("""
|
| 278 |
-
INSERT OR IGNORE INTO user_settings (id,
|
| 279 |
-
VALUES (1,
|
| 280 |
""")
|
| 281 |
|
| 282 |
await db.commit()
|
|
@@ -290,10 +300,6 @@ class SessionEnd(BaseModel):
|
|
| 290 |
session_id: int
|
| 291 |
|
| 292 |
class SettingsUpdate(BaseModel):
|
| 293 |
-
sensitivity: Optional[int] = None
|
| 294 |
-
notification_enabled: Optional[bool] = None
|
| 295 |
-
notification_threshold: Optional[int] = None
|
| 296 |
-
frame_rate: Optional[int] = None
|
| 297 |
model_name: Optional[str] = None
|
| 298 |
l2cs_boost: Optional[bool] = None
|
| 299 |
|
|
@@ -340,7 +346,12 @@ class VideoTransformTrack(VideoStreamTrack):
|
|
| 340 |
)
|
| 341 |
is_focused = out["is_focused"]
|
| 342 |
confidence = out.get("mlp_prob", out.get("raw_score", 0.0))
|
| 343 |
-
metadata = {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 344 |
|
| 345 |
# Draw face mesh + HUD on the video frame
|
| 346 |
h_f, w_f = img.shape[:2]
|
|
@@ -610,112 +621,52 @@ def _process_frame_with_l2cs_boost(base_pipeline, frame, base_model_name):
|
|
| 610 |
|
| 611 |
return base_out
|
| 612 |
|
| 613 |
-
@app.on_event("startup")
|
| 614 |
-
async def startup_event():
|
| 615 |
-
global pipelines, _cached_model_name
|
| 616 |
-
print(" Starting Focus Guard API...")
|
| 617 |
-
await init_database()
|
| 618 |
-
# Load cached model name from DB
|
| 619 |
-
async with aiosqlite.connect(db_path) as db:
|
| 620 |
-
cursor = await db.execute("SELECT model_name FROM user_settings WHERE id = 1")
|
| 621 |
-
row = await cursor.fetchone()
|
| 622 |
-
if row:
|
| 623 |
-
_cached_model_name = row[0]
|
| 624 |
-
print("[OK] Database initialized")
|
| 625 |
-
|
| 626 |
-
try:
|
| 627 |
-
pipelines["geometric"] = FaceMeshPipeline()
|
| 628 |
-
print("[OK] FaceMeshPipeline (geometric) loaded")
|
| 629 |
-
except Exception as e:
|
| 630 |
-
print(f"[WARN] FaceMeshPipeline unavailable: {e}")
|
| 631 |
-
|
| 632 |
-
try:
|
| 633 |
-
pipelines["mlp"] = MLPPipeline()
|
| 634 |
-
print("[OK] MLPPipeline loaded")
|
| 635 |
-
except Exception as e:
|
| 636 |
-
print(f"[ERR] Failed to load MLPPipeline: {e}")
|
| 637 |
-
|
| 638 |
-
try:
|
| 639 |
-
pipelines["hybrid"] = HybridFocusPipeline()
|
| 640 |
-
print("[OK] HybridFocusPipeline loaded")
|
| 641 |
-
except Exception as e:
|
| 642 |
-
print(f"[WARN] HybridFocusPipeline unavailable: {e}")
|
| 643 |
-
|
| 644 |
-
try:
|
| 645 |
-
pipelines["xgboost"] = XGBoostPipeline()
|
| 646 |
-
print("[OK] XGBoostPipeline loaded")
|
| 647 |
-
except Exception as e:
|
| 648 |
-
print(f"[ERR] Failed to load XGBoostPipeline: {e}")
|
| 649 |
-
|
| 650 |
-
if is_l2cs_weights_available():
|
| 651 |
-
print("[OK] L2CS weights found — pipeline will be lazy-loaded on first use")
|
| 652 |
-
else:
|
| 653 |
-
print("[WARN] L2CS weights not found — l2cs model unavailable")
|
| 654 |
-
|
| 655 |
-
@app.on_event("shutdown")
|
| 656 |
-
async def shutdown_event():
|
| 657 |
-
_inference_executor.shutdown(wait=False)
|
| 658 |
-
print(" Shutting down Focus Guard API...")
|
| 659 |
-
|
| 660 |
# ================ WEBRTC SIGNALING ================
|
| 661 |
|
| 662 |
@app.post("/api/webrtc/offer")
|
| 663 |
async def webrtc_offer(offer: dict):
|
| 664 |
try:
|
| 665 |
-
print(f"Received WebRTC offer")
|
| 666 |
-
|
| 667 |
pc = RTCPeerConnection()
|
| 668 |
pcs.add(pc)
|
| 669 |
|
| 670 |
session_id = await create_session()
|
| 671 |
-
print(f"Created session: {session_id}")
|
| 672 |
-
|
| 673 |
channel_ref = {"channel": None}
|
| 674 |
|
| 675 |
@pc.on("datachannel")
|
| 676 |
def on_datachannel(channel):
|
| 677 |
-
print(f"Data channel opened")
|
| 678 |
channel_ref["channel"] = channel
|
| 679 |
|
| 680 |
@pc.on("track")
|
| 681 |
def on_track(track):
|
| 682 |
-
print(f"Received track: {track.kind}")
|
| 683 |
if track.kind == "video":
|
| 684 |
local_track = VideoTransformTrack(track, session_id, lambda: channel_ref["channel"])
|
| 685 |
pc.addTrack(local_track)
|
| 686 |
-
print(f"Video track added")
|
| 687 |
|
| 688 |
@track.on("ended")
|
| 689 |
async def on_ended():
|
| 690 |
-
|
| 691 |
|
| 692 |
@pc.on("connectionstatechange")
|
| 693 |
async def on_connectionstatechange():
|
| 694 |
-
print(f"Connection state changed: {pc.connectionState}")
|
| 695 |
if pc.connectionState in ("failed", "closed", "disconnected"):
|
| 696 |
try:
|
| 697 |
await end_session(session_id)
|
| 698 |
except Exception as e:
|
| 699 |
-
|
| 700 |
pcs.discard(pc)
|
| 701 |
await pc.close()
|
| 702 |
|
| 703 |
await pc.setRemoteDescription(RTCSessionDescription(sdp=offer["sdp"], type=offer["type"]))
|
| 704 |
-
print(f"Remote description set")
|
| 705 |
|
| 706 |
answer = await pc.createAnswer()
|
| 707 |
await pc.setLocalDescription(answer)
|
| 708 |
-
print(f"Answer created")
|
| 709 |
|
| 710 |
await _wait_for_ice_gathering(pc)
|
| 711 |
-
print(f"ICE gathering complete")
|
| 712 |
|
| 713 |
return {"sdp": pc.localDescription.sdp, "type": pc.localDescription.type, "session_id": session_id}
|
| 714 |
|
| 715 |
except Exception as e:
|
| 716 |
-
|
| 717 |
-
import traceback
|
| 718 |
-
traceback.print_exc()
|
| 719 |
raise HTTPException(status_code=500, detail=f"WebRTC error: {str(e)}")
|
| 720 |
|
| 721 |
# ================ WEBSOCKET ================
|
|
@@ -732,7 +683,9 @@ async def websocket_endpoint(websocket: WebSocket):
|
|
| 732 |
event_buffer = _EventBuffer(flush_interval=2.0)
|
| 733 |
|
| 734 |
# Calibration state (per-connection)
|
| 735 |
-
|
|
|
|
|
|
|
| 736 |
|
| 737 |
# Latest frame slot — only the most recent frame is kept, older ones are dropped.
|
| 738 |
_slot = {"frame": None}
|
|
@@ -791,6 +744,10 @@ async def websocket_endpoint(websocket: WebSocket):
|
|
| 791 |
_cal["cal"] = GazeCalibration()
|
| 792 |
_cal["collecting"] = True
|
| 793 |
_cal["fusion"] = None
|
|
|
|
|
|
|
|
|
|
|
|
|
| 794 |
cal = _cal["cal"]
|
| 795 |
await websocket.send_json({
|
| 796 |
"type": "calibration_started",
|
|
@@ -801,7 +758,32 @@ async def websocket_endpoint(websocket: WebSocket):
|
|
| 801 |
|
| 802 |
elif data["type"] == "calibration_next":
|
| 803 |
cal = _cal.get("cal")
|
| 804 |
-
if
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 805 |
more = cal.advance()
|
| 806 |
if more:
|
| 807 |
await websocket.send_json({
|
|
@@ -810,18 +792,39 @@ async def websocket_endpoint(websocket: WebSocket):
|
|
| 810 |
"index": cal.current_index,
|
| 811 |
})
|
| 812 |
else:
|
|
|
|
| 813 |
_cal["collecting"] = False
|
| 814 |
ok = cal.fit()
|
| 815 |
if ok:
|
| 816 |
-
|
| 817 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 818 |
else:
|
| 819 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 820 |
|
| 821 |
elif data["type"] == "calibration_cancel":
|
| 822 |
_cal["cal"] = None
|
| 823 |
_cal["collecting"] = False
|
| 824 |
_cal["fusion"] = None
|
|
|
|
|
|
|
|
|
|
| 825 |
await websocket.send_json({"type": "calibration_cancelled"})
|
| 826 |
|
| 827 |
except WebSocketDisconnect:
|
|
@@ -914,18 +917,27 @@ async def websocket_endpoint(websocket: WebSocket):
|
|
| 914 |
if pipe_yaw is not None and pipe_pitch is not None:
|
| 915 |
_cal["cal"].collect_sample(pipe_yaw, pipe_pitch)
|
| 916 |
|
| 917 |
-
#
|
| 918 |
-
|
| 919 |
-
|
| 920 |
-
|
| 921 |
-
and model_name == "l2cs"
|
| 922 |
-
and out.get("gaze_yaw") is not None
|
| 923 |
-
):
|
| 924 |
-
fuse = fusion.update(
|
| 925 |
-
out["gaze_yaw"], out["gaze_pitch"], lm
|
| 926 |
)
|
| 927 |
-
|
| 928 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 929 |
|
| 930 |
if session_id:
|
| 931 |
metadata = {
|
|
@@ -958,17 +970,20 @@ async def websocket_endpoint(websocket: WebSocket):
|
|
| 958 |
resp["sf"] = round(out.get("s_face", 0), 3)
|
| 959 |
resp["se"] = round(out.get("s_eye", 0), 3)
|
| 960 |
|
| 961 |
-
#
|
| 962 |
-
|
| 963 |
-
has_gaze = out.get("gaze_yaw") is not None
|
| 964 |
-
if fusion is not None and has_gaze and (model_name == "l2cs" or use_boost):
|
| 965 |
-
fuse = fusion.update(out["gaze_yaw"], out["gaze_pitch"], out.get("landmarks"))
|
| 966 |
resp["gaze_x"] = fuse["gaze_x"]
|
| 967 |
resp["gaze_y"] = fuse["gaze_y"]
|
| 968 |
resp["on_screen"] = fuse["on_screen"]
|
| 969 |
if model_name == "l2cs":
|
| 970 |
resp["focused"] = fuse["focused"]
|
| 971 |
resp["confidence"] = round(fuse["focus_score"], 3)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 972 |
|
| 973 |
if out.get("boost_active"):
|
| 974 |
resp["boost"] = True
|
|
@@ -1002,7 +1017,8 @@ async def api_start_session():
|
|
| 1002 |
@app.post("/api/sessions/end")
|
| 1003 |
async def api_end_session(data: SessionEnd):
|
| 1004 |
summary = await end_session(data.session_id)
|
| 1005 |
-
if not summary:
|
|
|
|
| 1006 |
return summary
|
| 1007 |
|
| 1008 |
@app.get("/api/sessions")
|
|
@@ -1010,8 +1026,7 @@ async def get_sessions(filter: str = "all", limit: int = 50, offset: int = 0):
|
|
| 1010 |
async with aiosqlite.connect(db_path) as db:
|
| 1011 |
db.row_factory = aiosqlite.Row
|
| 1012 |
|
| 1013 |
-
#
|
| 1014 |
-
# For simplicity: if limit is -1, return all
|
| 1015 |
limit_clause = "LIMIT ? OFFSET ?"
|
| 1016 |
params = []
|
| 1017 |
|
|
@@ -1031,14 +1046,11 @@ async def get_sessions(filter: str = "all", limit: int = 50, offset: int = 0):
|
|
| 1031 |
where_clause = " WHERE start_time >= ?"
|
| 1032 |
params.append(date_filter.isoformat())
|
| 1033 |
elif filter == "all":
|
| 1034 |
-
# Just ensure we only get completed sessions or all sessions
|
| 1035 |
where_clause = " WHERE end_time IS NOT NULL"
|
| 1036 |
|
| 1037 |
query = f"{base_query}{where_clause} ORDER BY start_time DESC"
|
| 1038 |
-
|
| 1039 |
-
|
| 1040 |
-
if limit == -1:
|
| 1041 |
-
# No limit clause for export
|
| 1042 |
pass
|
| 1043 |
else:
|
| 1044 |
query += f" {limit_clause}"
|
|
@@ -1048,7 +1060,6 @@ async def get_sessions(filter: str = "all", limit: int = 50, offset: int = 0):
|
|
| 1048 |
rows = await cursor.fetchall()
|
| 1049 |
return [dict(row) for row in rows]
|
| 1050 |
|
| 1051 |
-
# --- NEW: Import Endpoint ---
|
| 1052 |
@app.post("/api/import")
|
| 1053 |
async def import_sessions(sessions: List[dict]):
|
| 1054 |
count = 0
|
|
@@ -1057,7 +1068,10 @@ async def import_sessions(sessions: List[dict]):
|
|
| 1057 |
for session in sessions:
|
| 1058 |
# Use .get() to handle potential missing fields from older versions or edits
|
| 1059 |
await db.execute("""
|
| 1060 |
-
INSERT INTO focus_sessions (
|
|
|
|
|
|
|
|
|
|
| 1061 |
VALUES (?, ?, ?, ?, ?, ?, ?)
|
| 1062 |
""", (
|
| 1063 |
session.get('start_time'),
|
|
@@ -1075,7 +1089,6 @@ async def import_sessions(sessions: List[dict]):
|
|
| 1075 |
print(f"Import Error: {e}")
|
| 1076 |
return {"status": "error", "message": str(e)}
|
| 1077 |
|
| 1078 |
-
# --- NEW: Clear History Endpoint ---
|
| 1079 |
@app.delete("/api/history")
|
| 1080 |
async def clear_history():
|
| 1081 |
try:
|
|
@@ -1094,7 +1107,8 @@ async def get_session(session_id: int):
|
|
| 1094 |
db.row_factory = aiosqlite.Row
|
| 1095 |
cursor = await db.execute("SELECT * FROM focus_sessions WHERE id = ?", (session_id,))
|
| 1096 |
row = await cursor.fetchone()
|
| 1097 |
-
if not row:
|
|
|
|
| 1098 |
session = dict(row)
|
| 1099 |
cursor = await db.execute("SELECT * FROM focus_events WHERE session_id = ? ORDER BY timestamp", (session_id,))
|
| 1100 |
events = [dict(r) for r in await cursor.fetchall()]
|
|
@@ -1107,7 +1121,9 @@ async def get_settings():
|
|
| 1107 |
db.row_factory = aiosqlite.Row
|
| 1108 |
cursor = await db.execute("SELECT * FROM user_settings WHERE id = 1")
|
| 1109 |
row = await cursor.fetchone()
|
| 1110 |
-
result = dict(row) if row else {
|
|
|
|
|
|
|
| 1111 |
result['l2cs_boost'] = _l2cs_boost_enabled
|
| 1112 |
return result
|
| 1113 |
|
|
@@ -1122,18 +1138,6 @@ async def update_settings(settings: SettingsUpdate):
|
|
| 1122 |
|
| 1123 |
updates = []
|
| 1124 |
params = []
|
| 1125 |
-
if settings.sensitivity is not None:
|
| 1126 |
-
updates.append("sensitivity = ?")
|
| 1127 |
-
params.append(max(1, min(10, settings.sensitivity)))
|
| 1128 |
-
if settings.notification_enabled is not None:
|
| 1129 |
-
updates.append("notification_enabled = ?")
|
| 1130 |
-
params.append(settings.notification_enabled)
|
| 1131 |
-
if settings.notification_threshold is not None:
|
| 1132 |
-
updates.append("notification_threshold = ?")
|
| 1133 |
-
params.append(max(5, min(300, settings.notification_threshold)))
|
| 1134 |
-
if settings.frame_rate is not None:
|
| 1135 |
-
updates.append("frame_rate = ?")
|
| 1136 |
-
params.append(max(5, min(60, settings.frame_rate)))
|
| 1137 |
if settings.model_name is not None and settings.model_name in pipelines:
|
| 1138 |
if settings.model_name == "l2cs":
|
| 1139 |
loop = asyncio.get_event_loop()
|
|
@@ -1158,7 +1162,7 @@ async def update_settings(settings: SettingsUpdate):
|
|
| 1158 |
|
| 1159 |
if updates:
|
| 1160 |
query = f"UPDATE user_settings SET {', '.join(updates)} WHERE id = 1"
|
| 1161 |
-
await db.execute(query, params)
|
| 1162 |
await db.commit()
|
| 1163 |
return {"status": "success", "updated": len(updates) > 0}
|
| 1164 |
|
|
@@ -1176,7 +1180,12 @@ async def get_system_stats():
|
|
| 1176 |
"memory_total_mb": round(mem.total / (1024 * 1024), 0),
|
| 1177 |
}
|
| 1178 |
except ImportError:
|
| 1179 |
-
return {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1180 |
|
| 1181 |
@app.get("/api/stats/summary")
|
| 1182 |
async def get_stats_summary():
|
|
@@ -1187,7 +1196,14 @@ async def get_stats_summary():
|
|
| 1187 |
total_focus_time = (await cursor.fetchone())[0] or 0
|
| 1188 |
cursor = await db.execute("SELECT AVG(focus_score) FROM focus_sessions WHERE end_time IS NOT NULL")
|
| 1189 |
avg_focus_score = (await cursor.fetchone())[0] or 0.0
|
| 1190 |
-
cursor = await db.execute(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1191 |
dates = [row[0] for row in await cursor.fetchall()]
|
| 1192 |
|
| 1193 |
streak_days = 0
|
|
@@ -1196,8 +1212,10 @@ async def get_stats_summary():
|
|
| 1196 |
for i, date_str in enumerate(dates):
|
| 1197 |
session_date = datetime.fromisoformat(date_str).date()
|
| 1198 |
expected_date = current_date - timedelta(days=i)
|
| 1199 |
-
if session_date == expected_date:
|
| 1200 |
-
|
|
|
|
|
|
|
| 1201 |
return {
|
| 1202 |
'total_sessions': total_sessions,
|
| 1203 |
'total_focus_time': int(total_focus_time),
|
|
|
|
| 20 |
import asyncio
|
| 21 |
import concurrent.futures
|
| 22 |
import threading
|
| 23 |
+
import logging
|
| 24 |
|
| 25 |
from aiortc import RTCPeerConnection, RTCSessionDescription, VideoStreamTrack
|
| 26 |
+
|
| 27 |
+
logger = logging.getLogger(__name__)
|
| 28 |
from av import VideoFrame
|
| 29 |
|
| 30 |
from mediapipe.tasks.python.vision import FaceLandmarksConnections
|
|
|
|
| 141 |
|
| 142 |
# Landmark indices used for face mesh drawing on client (union of all groups).
|
| 143 |
# Sending only these instead of all 478 saves ~60% of the landmarks payload.
|
| 144 |
+
_MESH_INDICES = sorted(
|
| 145 |
+
set(
|
| 146 |
+
[
|
| 147 |
+
10, 338, 297, 332, 284, 251, 389, 356, 454,
|
| 148 |
+
323, 361, 288, 397, 365, 379, 378, 400, 377,
|
| 149 |
+
152, 148, 176, 149, 150, 136, 172, 58, 132,
|
| 150 |
+
93, 234, 127, 162, 21, 54, 103, 67, 109,
|
| 151 |
+
] # face oval
|
| 152 |
+
+ [33, 7, 163, 144, 145, 153, 154, 155, 133, 173, 157, 158, 159, 160, 161, 246] # left eye
|
| 153 |
+
+ [362, 382, 381, 380, 374, 373, 390, 249, 263, 466, 388, 387, 386, 385, 384, 398] # right eye
|
| 154 |
+
+ [468, 469, 470, 471, 472, 473, 474, 475, 476, 477] # irises
|
| 155 |
+
+ [70, 63, 105, 66, 107, 55, 65, 52, 53, 46] # left eyebrow
|
| 156 |
+
+ [300, 293, 334, 296, 336, 285, 295, 282, 283, 276] # right eyebrow
|
| 157 |
+
+ [6, 197, 195, 5, 4, 1, 19, 94, 2] # nose bridge
|
| 158 |
+
+ [61, 146, 91, 181, 84, 17, 314, 405, 321, 375, 291, 409, 270, 269, 267, 0, 37, 39, 40, 185] # lips outer
|
| 159 |
+
+ [78, 95, 88, 178, 87, 14, 317, 402, 318, 324, 308, 415, 310, 311, 312, 13, 82, 81, 80, 191] # lips inner
|
| 160 |
+
+ [33, 160, 158, 133, 153, 145] # left EAR key points
|
| 161 |
+
+ [362, 385, 387, 263, 373, 380] # right EAR key points
|
| 162 |
+
)
|
| 163 |
+
)
|
| 164 |
# Build a lookup: original_index -> position in sparse array, so client can reconstruct.
|
| 165 |
_MESH_INDEX_SET = set(_MESH_INDICES)
|
| 166 |
|
| 167 |
@asynccontextmanager
|
| 168 |
async def lifespan(app):
|
| 169 |
global _cached_model_name
|
| 170 |
+
print("Starting Focus Guard API")
|
| 171 |
await init_database()
|
| 172 |
async with aiosqlite.connect(db_path) as db:
|
| 173 |
cursor = await db.execute("SELECT model_name FROM user_settings WHERE id = 1")
|
|
|
|
| 206 |
await db.commit()
|
| 207 |
if resolved_model is not None:
|
| 208 |
print(f"[OK] Active model set to {resolved_model}")
|
| 209 |
+
if is_l2cs_weights_available():
|
| 210 |
+
print("[OK] L2CS weights found (lazy-loaded on first use)")
|
| 211 |
+
else:
|
| 212 |
+
print("[WARN] L2CS weights not found")
|
| 213 |
yield
|
| 214 |
_inference_executor.shutdown(wait=False)
|
| 215 |
+
print("Shutting down Focus Guard API")
|
| 216 |
|
| 217 |
app = FastAPI(title="Focus Guard API", lifespan=lifespan)
|
| 218 |
|
|
|
|
| 279 |
await db.execute("""
|
| 280 |
CREATE TABLE IF NOT EXISTS user_settings (
|
| 281 |
id INTEGER PRIMARY KEY CHECK (id = 1),
|
|
|
|
|
|
|
|
|
|
|
|
|
| 282 |
model_name TEXT DEFAULT 'mlp'
|
| 283 |
)
|
| 284 |
""")
|
| 285 |
|
| 286 |
# Insert default settings if not exists
|
| 287 |
await db.execute("""
|
| 288 |
+
INSERT OR IGNORE INTO user_settings (id, model_name)
|
| 289 |
+
VALUES (1, 'mlp')
|
| 290 |
""")
|
| 291 |
|
| 292 |
await db.commit()
|
|
|
|
| 300 |
session_id: int
|
| 301 |
|
| 302 |
class SettingsUpdate(BaseModel):
|
|
|
|
|
|
|
|
|
|
|
|
|
| 303 |
model_name: Optional[str] = None
|
| 304 |
l2cs_boost: Optional[bool] = None
|
| 305 |
|
|
|
|
| 346 |
)
|
| 347 |
is_focused = out["is_focused"]
|
| 348 |
confidence = out.get("mlp_prob", out.get("raw_score", 0.0))
|
| 349 |
+
metadata = {
|
| 350 |
+
"s_face": out.get("s_face", 0.0),
|
| 351 |
+
"s_eye": out.get("s_eye", 0.0),
|
| 352 |
+
"mar": out.get("mar", 0.0),
|
| 353 |
+
"model": model_name,
|
| 354 |
+
}
|
| 355 |
|
| 356 |
# Draw face mesh + HUD on the video frame
|
| 357 |
h_f, w_f = img.shape[:2]
|
|
|
|
| 621 |
|
| 622 |
return base_out
|
| 623 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 624 |
# ================ WEBRTC SIGNALING ================
|
| 625 |
|
| 626 |
@app.post("/api/webrtc/offer")
|
| 627 |
async def webrtc_offer(offer: dict):
|
| 628 |
try:
|
|
|
|
|
|
|
| 629 |
pc = RTCPeerConnection()
|
| 630 |
pcs.add(pc)
|
| 631 |
|
| 632 |
session_id = await create_session()
|
|
|
|
|
|
|
| 633 |
channel_ref = {"channel": None}
|
| 634 |
|
| 635 |
@pc.on("datachannel")
|
| 636 |
def on_datachannel(channel):
|
|
|
|
| 637 |
channel_ref["channel"] = channel
|
| 638 |
|
| 639 |
@pc.on("track")
|
| 640 |
def on_track(track):
|
|
|
|
| 641 |
if track.kind == "video":
|
| 642 |
local_track = VideoTransformTrack(track, session_id, lambda: channel_ref["channel"])
|
| 643 |
pc.addTrack(local_track)
|
|
|
|
| 644 |
|
| 645 |
@track.on("ended")
|
| 646 |
async def on_ended():
|
| 647 |
+
pass
|
| 648 |
|
| 649 |
@pc.on("connectionstatechange")
|
| 650 |
async def on_connectionstatechange():
|
|
|
|
| 651 |
if pc.connectionState in ("failed", "closed", "disconnected"):
|
| 652 |
try:
|
| 653 |
await end_session(session_id)
|
| 654 |
except Exception as e:
|
| 655 |
+
logger.warning("WebRTC session end failed: %s", e)
|
| 656 |
pcs.discard(pc)
|
| 657 |
await pc.close()
|
| 658 |
|
| 659 |
await pc.setRemoteDescription(RTCSessionDescription(sdp=offer["sdp"], type=offer["type"]))
|
|
|
|
| 660 |
|
| 661 |
answer = await pc.createAnswer()
|
| 662 |
await pc.setLocalDescription(answer)
|
|
|
|
| 663 |
|
| 664 |
await _wait_for_ice_gathering(pc)
|
|
|
|
| 665 |
|
| 666 |
return {"sdp": pc.localDescription.sdp, "type": pc.localDescription.type, "session_id": session_id}
|
| 667 |
|
| 668 |
except Exception as e:
|
| 669 |
+
logger.exception("WebRTC offer failed")
|
|
|
|
|
|
|
| 670 |
raise HTTPException(status_code=500, detail=f"WebRTC error: {str(e)}")
|
| 671 |
|
| 672 |
# ================ WEBSOCKET ================
|
|
|
|
| 683 |
event_buffer = _EventBuffer(flush_interval=2.0)
|
| 684 |
|
| 685 |
# Calibration state (per-connection)
|
| 686 |
+
# verifying: after fit, show a verification target and check gaze accuracy
|
| 687 |
+
_cal: dict = {"cal": None, "collecting": False, "fusion": None,
|
| 688 |
+
"verifying": False, "verify_target": None, "verify_samples": []}
|
| 689 |
|
| 690 |
# Latest frame slot — only the most recent frame is kept, older ones are dropped.
|
| 691 |
_slot = {"frame": None}
|
|
|
|
| 744 |
_cal["cal"] = GazeCalibration()
|
| 745 |
_cal["collecting"] = True
|
| 746 |
_cal["fusion"] = None
|
| 747 |
+
# Tell L2CS pipeline to run every frame during calibration
|
| 748 |
+
l2cs_pipe = pipelines.get("l2cs")
|
| 749 |
+
if l2cs_pipe is not None and hasattr(l2cs_pipe, '_calibrating'):
|
| 750 |
+
l2cs_pipe._calibrating = True
|
| 751 |
cal = _cal["cal"]
|
| 752 |
await websocket.send_json({
|
| 753 |
"type": "calibration_started",
|
|
|
|
| 758 |
|
| 759 |
elif data["type"] == "calibration_next":
|
| 760 |
cal = _cal.get("cal")
|
| 761 |
+
if _cal.get("verifying"):
|
| 762 |
+
# Verification phase complete — user clicked next
|
| 763 |
+
_cal["verifying"] = False
|
| 764 |
+
_cal["collecting"] = False
|
| 765 |
+
# Re-enable frame skipping
|
| 766 |
+
l2cs_pipe = pipelines.get("l2cs")
|
| 767 |
+
if l2cs_pipe is not None and hasattr(l2cs_pipe, '_calibrating'):
|
| 768 |
+
l2cs_pipe._calibrating = False
|
| 769 |
+
# Check verification samples
|
| 770 |
+
v_samples = _cal.get("verify_samples", [])
|
| 771 |
+
vt = _cal.get("verify_target", [0.5, 0.5])
|
| 772 |
+
if len(v_samples) >= 3:
|
| 773 |
+
med_yaw = float(np.median([s[0] for s in v_samples]))
|
| 774 |
+
med_pitch = float(np.median([s[1] for s in v_samples]))
|
| 775 |
+
px, py, err, passed = cal.verify(med_yaw, med_pitch, vt[0], vt[1])
|
| 776 |
+
print(f"[CAL] Verification: target=({vt[0]:.2f},{vt[1]:.2f}) "
|
| 777 |
+
f"predicted=({px:.3f},{py:.3f}) error={err:.3f} passed={passed}")
|
| 778 |
+
else:
|
| 779 |
+
passed = True # not enough samples, trust the fit
|
| 780 |
+
_cal["fusion"] = GazeEyeFusion(cal)
|
| 781 |
+
await websocket.send_json({
|
| 782 |
+
"type": "calibration_done",
|
| 783 |
+
"success": True,
|
| 784 |
+
"verified": passed,
|
| 785 |
+
})
|
| 786 |
+
elif cal is not None:
|
| 787 |
more = cal.advance()
|
| 788 |
if more:
|
| 789 |
await websocket.send_json({
|
|
|
|
| 792 |
"index": cal.current_index,
|
| 793 |
})
|
| 794 |
else:
|
| 795 |
+
# All 9 points collected — try to fit
|
| 796 |
_cal["collecting"] = False
|
| 797 |
ok = cal.fit()
|
| 798 |
if ok:
|
| 799 |
+
# Enter verification phase: show center target
|
| 800 |
+
_cal["verifying"] = True
|
| 801 |
+
_cal["verify_target"] = [0.5, 0.5]
|
| 802 |
+
_cal["verify_samples"] = []
|
| 803 |
+
await websocket.send_json({
|
| 804 |
+
"type": "calibration_verify",
|
| 805 |
+
"target": [0.5, 0.5],
|
| 806 |
+
"message": "Look at the dot to verify calibration",
|
| 807 |
+
})
|
| 808 |
else:
|
| 809 |
+
# Re-enable frame skipping
|
| 810 |
+
l2cs_pipe = pipelines.get("l2cs")
|
| 811 |
+
if l2cs_pipe is not None and hasattr(l2cs_pipe, '_calibrating'):
|
| 812 |
+
l2cs_pipe._calibrating = False
|
| 813 |
+
await websocket.send_json(
|
| 814 |
+
{
|
| 815 |
+
"type": "calibration_done",
|
| 816 |
+
"success": False,
|
| 817 |
+
"error": "Not enough samples",
|
| 818 |
+
}
|
| 819 |
+
)
|
| 820 |
|
| 821 |
elif data["type"] == "calibration_cancel":
|
| 822 |
_cal["cal"] = None
|
| 823 |
_cal["collecting"] = False
|
| 824 |
_cal["fusion"] = None
|
| 825 |
+
l2cs_pipe = pipelines.get("l2cs")
|
| 826 |
+
if l2cs_pipe is not None and hasattr(l2cs_pipe, '_calibrating'):
|
| 827 |
+
l2cs_pipe._calibrating = False
|
| 828 |
await websocket.send_json({"type": "calibration_cancelled"})
|
| 829 |
|
| 830 |
except WebSocketDisconnect:
|
|
|
|
| 917 |
if pipe_yaw is not None and pipe_pitch is not None:
|
| 918 |
_cal["cal"].collect_sample(pipe_yaw, pipe_pitch)
|
| 919 |
|
| 920 |
+
# Verification sample collection
|
| 921 |
+
if _cal.get("verifying") and out.get("gaze_yaw") is not None:
|
| 922 |
+
_cal["verify_samples"].append(
|
| 923 |
+
(out["gaze_yaw"], out["gaze_pitch"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 924 |
)
|
| 925 |
+
|
| 926 |
+
# Gaze fusion (single call — applied before event logging
|
| 927 |
+
# and response to avoid double-EMA smoothing)
|
| 928 |
+
fusion = _cal.get("fusion")
|
| 929 |
+
has_gaze = out.get("gaze_yaw") is not None
|
| 930 |
+
fuse = None
|
| 931 |
+
if fusion is not None and has_gaze and (model_name == "l2cs" or use_boost):
|
| 932 |
+
fuse = fusion.update(out["gaze_yaw"], out["gaze_pitch"], lm)
|
| 933 |
+
if model_name == "l2cs":
|
| 934 |
+
# L2CS standalone: fusion fully controls focus decision
|
| 935 |
+
is_focused = fuse["focused"]
|
| 936 |
+
confidence = fuse["focus_score"]
|
| 937 |
+
elif use_boost and not fuse["on_screen"]:
|
| 938 |
+
# Boost mode: if gaze is clearly off-screen, override to unfocused
|
| 939 |
+
is_focused = False
|
| 940 |
+
confidence = min(confidence, 0.1)
|
| 941 |
|
| 942 |
if session_id:
|
| 943 |
metadata = {
|
|
|
|
| 970 |
resp["sf"] = round(out.get("s_face", 0), 3)
|
| 971 |
resp["se"] = round(out.get("s_eye", 0), 3)
|
| 972 |
|
| 973 |
+
# Attach gaze fusion fields + raw gaze angles for visualization
|
| 974 |
+
if fuse is not None:
|
|
|
|
|
|
|
|
|
|
| 975 |
resp["gaze_x"] = fuse["gaze_x"]
|
| 976 |
resp["gaze_y"] = fuse["gaze_y"]
|
| 977 |
resp["on_screen"] = fuse["on_screen"]
|
| 978 |
if model_name == "l2cs":
|
| 979 |
resp["focused"] = fuse["focused"]
|
| 980 |
resp["confidence"] = round(fuse["focus_score"], 3)
|
| 981 |
+
elif use_boost and not fuse["on_screen"]:
|
| 982 |
+
resp["focused"] = False
|
| 983 |
+
resp["confidence"] = min(resp["confidence"], 0.1)
|
| 984 |
+
if has_gaze:
|
| 985 |
+
resp["gaze_yaw"] = round(out["gaze_yaw"], 4)
|
| 986 |
+
resp["gaze_pitch"] = round(out["gaze_pitch"], 4)
|
| 987 |
|
| 988 |
if out.get("boost_active"):
|
| 989 |
resp["boost"] = True
|
|
|
|
| 1017 |
@app.post("/api/sessions/end")
|
| 1018 |
async def api_end_session(data: SessionEnd):
|
| 1019 |
summary = await end_session(data.session_id)
|
| 1020 |
+
if not summary:
|
| 1021 |
+
raise HTTPException(status_code=404, detail="Session not found")
|
| 1022 |
return summary
|
| 1023 |
|
| 1024 |
@app.get("/api/sessions")
|
|
|
|
| 1026 |
async with aiosqlite.connect(db_path) as db:
|
| 1027 |
db.row_factory = aiosqlite.Row
|
| 1028 |
|
| 1029 |
+
# limit=-1 returns all rows (export); otherwise paginate
|
|
|
|
| 1030 |
limit_clause = "LIMIT ? OFFSET ?"
|
| 1031 |
params = []
|
| 1032 |
|
|
|
|
| 1046 |
where_clause = " WHERE start_time >= ?"
|
| 1047 |
params.append(date_filter.isoformat())
|
| 1048 |
elif filter == "all":
|
|
|
|
| 1049 |
where_clause = " WHERE end_time IS NOT NULL"
|
| 1050 |
|
| 1051 |
query = f"{base_query}{where_clause} ORDER BY start_time DESC"
|
| 1052 |
+
|
| 1053 |
+
if limit == -1:
|
|
|
|
|
|
|
| 1054 |
pass
|
| 1055 |
else:
|
| 1056 |
query += f" {limit_clause}"
|
|
|
|
| 1060 |
rows = await cursor.fetchall()
|
| 1061 |
return [dict(row) for row in rows]
|
| 1062 |
|
|
|
|
| 1063 |
@app.post("/api/import")
|
| 1064 |
async def import_sessions(sessions: List[dict]):
|
| 1065 |
count = 0
|
|
|
|
| 1068 |
for session in sessions:
|
| 1069 |
# Use .get() to handle potential missing fields from older versions or edits
|
| 1070 |
await db.execute("""
|
| 1071 |
+
INSERT INTO focus_sessions (
|
| 1072 |
+
start_time, end_time, duration_seconds, focus_score,
|
| 1073 |
+
total_frames, focused_frames, created_at
|
| 1074 |
+
)
|
| 1075 |
VALUES (?, ?, ?, ?, ?, ?, ?)
|
| 1076 |
""", (
|
| 1077 |
session.get('start_time'),
|
|
|
|
| 1089 |
print(f"Import Error: {e}")
|
| 1090 |
return {"status": "error", "message": str(e)}
|
| 1091 |
|
|
|
|
| 1092 |
@app.delete("/api/history")
|
| 1093 |
async def clear_history():
|
| 1094 |
try:
|
|
|
|
| 1107 |
db.row_factory = aiosqlite.Row
|
| 1108 |
cursor = await db.execute("SELECT * FROM focus_sessions WHERE id = ?", (session_id,))
|
| 1109 |
row = await cursor.fetchone()
|
| 1110 |
+
if not row:
|
| 1111 |
+
raise HTTPException(status_code=404, detail="Session not found")
|
| 1112 |
session = dict(row)
|
| 1113 |
cursor = await db.execute("SELECT * FROM focus_events WHERE session_id = ? ORDER BY timestamp", (session_id,))
|
| 1114 |
events = [dict(r) for r in await cursor.fetchall()]
|
|
|
|
| 1121 |
db.row_factory = aiosqlite.Row
|
| 1122 |
cursor = await db.execute("SELECT * FROM user_settings WHERE id = 1")
|
| 1123 |
row = await cursor.fetchone()
|
| 1124 |
+
result = dict(row) if row else {
|
| 1125 |
+
"model_name": "mlp",
|
| 1126 |
+
}
|
| 1127 |
result['l2cs_boost'] = _l2cs_boost_enabled
|
| 1128 |
return result
|
| 1129 |
|
|
|
|
| 1138 |
|
| 1139 |
updates = []
|
| 1140 |
params = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1141 |
if settings.model_name is not None and settings.model_name in pipelines:
|
| 1142 |
if settings.model_name == "l2cs":
|
| 1143 |
loop = asyncio.get_event_loop()
|
|
|
|
| 1162 |
|
| 1163 |
if updates:
|
| 1164 |
query = f"UPDATE user_settings SET {', '.join(updates)} WHERE id = 1"
|
| 1165 |
+
await db.execute(query, tuple(params))
|
| 1166 |
await db.commit()
|
| 1167 |
return {"status": "success", "updated": len(updates) > 0}
|
| 1168 |
|
|
|
|
| 1180 |
"memory_total_mb": round(mem.total / (1024 * 1024), 0),
|
| 1181 |
}
|
| 1182 |
except ImportError:
|
| 1183 |
+
return {
|
| 1184 |
+
"cpu_percent": None,
|
| 1185 |
+
"memory_percent": None,
|
| 1186 |
+
"memory_used_mb": None,
|
| 1187 |
+
"memory_total_mb": None,
|
| 1188 |
+
}
|
| 1189 |
|
| 1190 |
@app.get("/api/stats/summary")
|
| 1191 |
async def get_stats_summary():
|
|
|
|
| 1196 |
total_focus_time = (await cursor.fetchone())[0] or 0
|
| 1197 |
cursor = await db.execute("SELECT AVG(focus_score) FROM focus_sessions WHERE end_time IS NOT NULL")
|
| 1198 |
avg_focus_score = (await cursor.fetchone())[0] or 0.0
|
| 1199 |
+
cursor = await db.execute(
|
| 1200 |
+
"""
|
| 1201 |
+
SELECT DISTINCT DATE(start_time) as session_date
|
| 1202 |
+
FROM focus_sessions
|
| 1203 |
+
WHERE end_time IS NOT NULL
|
| 1204 |
+
ORDER BY session_date DESC
|
| 1205 |
+
"""
|
| 1206 |
+
)
|
| 1207 |
dates = [row[0] for row in await cursor.fetchall()]
|
| 1208 |
|
| 1209 |
streak_days = 0
|
|
|
|
| 1212 |
for i, date_str in enumerate(dates):
|
| 1213 |
session_date = datetime.fromisoformat(date_str).date()
|
| 1214 |
expected_date = current_date - timedelta(days=i)
|
| 1215 |
+
if session_date == expected_date:
|
| 1216 |
+
streak_days += 1
|
| 1217 |
+
else:
|
| 1218 |
+
break
|
| 1219 |
return {
|
| 1220 |
'total_sessions': total_sessions,
|
| 1221 |
'total_focus_time': int(total_focus_time),
|
models/L2CS-Net/l2cs/datasets.py
CHANGED
|
@@ -1,157 +1,156 @@
|
|
| 1 |
-
import os
|
| 2 |
-
import numpy as np
|
| 3 |
-
import cv2
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
import torch
|
| 7 |
-
from torch.utils.data.dataset import Dataset
|
| 8 |
-
from torchvision import transforms
|
| 9 |
-
from PIL import Image, ImageFilter
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
class Gaze360(Dataset):
|
| 13 |
-
def __init__(self, path, root, transform, angle, binwidth, train=True):
|
| 14 |
-
self.transform = transform
|
| 15 |
-
self.root = root
|
| 16 |
-
self.orig_list_len = 0
|
| 17 |
-
self.angle = angle
|
| 18 |
-
if train==False:
|
| 19 |
-
angle=90
|
| 20 |
-
self.binwidth=binwidth
|
| 21 |
-
self.lines = []
|
| 22 |
-
if isinstance(path, list):
|
| 23 |
-
for i in path:
|
| 24 |
-
with open(i) as f:
|
| 25 |
-
|
| 26 |
-
line
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
lines
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
label =
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
line =
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
label =
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
# fimg = cv2.
|
| 64 |
-
# fimg =
|
| 65 |
-
#
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
self.
|
| 84 |
-
self.
|
| 85 |
-
self.
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
lines
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
label =
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
lines
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
label =
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
line =
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
label =
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
# fimg = cv2.
|
| 140 |
-
# fimg =
|
| 141 |
-
#
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import numpy as np
|
| 3 |
+
import cv2
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
from torch.utils.data.dataset import Dataset
|
| 8 |
+
from torchvision import transforms
|
| 9 |
+
from PIL import Image, ImageFilter
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class Gaze360(Dataset):
|
| 13 |
+
def __init__(self, path, root, transform, angle, binwidth, train=True):
|
| 14 |
+
self.transform = transform
|
| 15 |
+
self.root = root
|
| 16 |
+
self.orig_list_len = 0
|
| 17 |
+
self.angle = angle
|
| 18 |
+
if train==False:
|
| 19 |
+
angle=90
|
| 20 |
+
self.binwidth=binwidth
|
| 21 |
+
self.lines = []
|
| 22 |
+
if isinstance(path, list):
|
| 23 |
+
for i in path:
|
| 24 |
+
with open(i) as f:
|
| 25 |
+
line = f.readlines()
|
| 26 |
+
line.pop(0)
|
| 27 |
+
self.lines.extend(line)
|
| 28 |
+
else:
|
| 29 |
+
with open(path) as f:
|
| 30 |
+
lines = f.readlines()
|
| 31 |
+
lines.pop(0)
|
| 32 |
+
self.orig_list_len = len(lines)
|
| 33 |
+
for line in lines:
|
| 34 |
+
gaze2d = line.strip().split(" ")[5]
|
| 35 |
+
label = np.array(gaze2d.split(",")).astype("float")
|
| 36 |
+
if abs((label[0]*180/np.pi)) <= angle and abs((label[1]*180/np.pi)) <= angle:
|
| 37 |
+
self.lines.append(line)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
print("{} items removed from dataset that have an angle > {}".format(self.orig_list_len-len(self.lines), angle))
|
| 41 |
+
|
| 42 |
+
def __len__(self):
|
| 43 |
+
return len(self.lines)
|
| 44 |
+
|
| 45 |
+
def __getitem__(self, idx):
|
| 46 |
+
line = self.lines[idx]
|
| 47 |
+
line = line.strip().split(" ")
|
| 48 |
+
|
| 49 |
+
face = line[0]
|
| 50 |
+
lefteye = line[1]
|
| 51 |
+
righteye = line[2]
|
| 52 |
+
name = line[3]
|
| 53 |
+
gaze2d = line[5]
|
| 54 |
+
label = np.array(gaze2d.split(",")).astype("float")
|
| 55 |
+
label = torch.from_numpy(label).type(torch.FloatTensor)
|
| 56 |
+
|
| 57 |
+
pitch = label[0]* 180 / np.pi
|
| 58 |
+
yaw = label[1]* 180 / np.pi
|
| 59 |
+
|
| 60 |
+
img = Image.open(os.path.join(self.root, face))
|
| 61 |
+
|
| 62 |
+
# fimg = cv2.imread(os.path.join(self.root, face))
|
| 63 |
+
# fimg = cv2.resize(fimg, (448, 448))/255.0
|
| 64 |
+
# fimg = fimg.transpose(2, 0, 1)
|
| 65 |
+
# img=torch.from_numpy(fimg).type(torch.FloatTensor)
|
| 66 |
+
|
| 67 |
+
if self.transform:
|
| 68 |
+
img = self.transform(img)
|
| 69 |
+
|
| 70 |
+
# Bin values
|
| 71 |
+
bins = np.array(range(-1*self.angle, self.angle, self.binwidth))
|
| 72 |
+
binned_pose = np.digitize([pitch, yaw], bins) - 1
|
| 73 |
+
|
| 74 |
+
labels = binned_pose
|
| 75 |
+
cont_labels = torch.FloatTensor([pitch, yaw])
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
return img, labels, cont_labels, name
|
| 79 |
+
|
| 80 |
+
class Mpiigaze(Dataset):
|
| 81 |
+
def __init__(self, pathorg, root, transform, train, angle,fold=0):
|
| 82 |
+
self.transform = transform
|
| 83 |
+
self.root = root
|
| 84 |
+
self.orig_list_len = 0
|
| 85 |
+
self.lines = []
|
| 86 |
+
path=pathorg.copy()
|
| 87 |
+
if train==True:
|
| 88 |
+
path.pop(fold)
|
| 89 |
+
else:
|
| 90 |
+
path=path[fold]
|
| 91 |
+
if isinstance(path, list):
|
| 92 |
+
for i in path:
|
| 93 |
+
with open(i) as f:
|
| 94 |
+
lines = f.readlines()
|
| 95 |
+
lines.pop(0)
|
| 96 |
+
self.orig_list_len += len(lines)
|
| 97 |
+
for line in lines:
|
| 98 |
+
gaze2d = line.strip().split(" ")[7]
|
| 99 |
+
label = np.array(gaze2d.split(",")).astype("float")
|
| 100 |
+
if abs((label[0]*180/np.pi)) <= angle and abs((label[1]*180/np.pi)) <= angle:
|
| 101 |
+
self.lines.append(line)
|
| 102 |
+
else:
|
| 103 |
+
with open(path) as f:
|
| 104 |
+
lines = f.readlines()
|
| 105 |
+
lines.pop(0)
|
| 106 |
+
self.orig_list_len += len(lines)
|
| 107 |
+
for line in lines:
|
| 108 |
+
gaze2d = line.strip().split(" ")[7]
|
| 109 |
+
label = np.array(gaze2d.split(",")).astype("float")
|
| 110 |
+
if abs((label[0]*180/np.pi)) <= 42 and abs((label[1]*180/np.pi)) <= 42:
|
| 111 |
+
self.lines.append(line)
|
| 112 |
+
|
| 113 |
+
print("{} items removed from dataset that have an angle > {}".format(self.orig_list_len-len(self.lines),angle))
|
| 114 |
+
|
| 115 |
+
def __len__(self):
|
| 116 |
+
return len(self.lines)
|
| 117 |
+
|
| 118 |
+
def __getitem__(self, idx):
|
| 119 |
+
line = self.lines[idx]
|
| 120 |
+
line = line.strip().split(" ")
|
| 121 |
+
|
| 122 |
+
name = line[3]
|
| 123 |
+
gaze2d = line[7]
|
| 124 |
+
head2d = line[8]
|
| 125 |
+
lefteye = line[1]
|
| 126 |
+
righteye = line[2]
|
| 127 |
+
face = line[0]
|
| 128 |
+
|
| 129 |
+
label = np.array(gaze2d.split(",")).astype("float")
|
| 130 |
+
label = torch.from_numpy(label).type(torch.FloatTensor)
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
pitch = label[0]* 180 / np.pi
|
| 134 |
+
yaw = label[1]* 180 / np.pi
|
| 135 |
+
|
| 136 |
+
img = Image.open(os.path.join(self.root, face))
|
| 137 |
+
|
| 138 |
+
# fimg = cv2.imread(os.path.join(self.root, face))
|
| 139 |
+
# fimg = cv2.resize(fimg, (448, 448))/255.0
|
| 140 |
+
# fimg = fimg.transpose(2, 0, 1)
|
| 141 |
+
# img=torch.from_numpy(fimg).type(torch.FloatTensor)
|
| 142 |
+
|
| 143 |
+
if self.transform:
|
| 144 |
+
img = self.transform(img)
|
| 145 |
+
|
| 146 |
+
# Bin values
|
| 147 |
+
bins = np.array(range(-42, 42,3))
|
| 148 |
+
binned_pose = np.digitize([pitch, yaw], bins) - 1
|
| 149 |
+
|
| 150 |
+
labels = binned_pose
|
| 151 |
+
cont_labels = torch.FloatTensor([pitch, yaw])
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
return img, labels, cont_labels, name
|
| 155 |
+
|
| 156 |
+
|
|
|
models/L2CS-Net/l2cs/pipeline.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
import pathlib
|
|
|
|
| 2 |
from typing import Union
|
| 3 |
|
| 4 |
import cv2
|
|
@@ -15,10 +16,10 @@ from .results import GazeResultContainer
|
|
| 15 |
class Pipeline:
|
| 16 |
|
| 17 |
def __init__(
|
| 18 |
-
self,
|
| 19 |
-
weights: pathlib.Path,
|
| 20 |
arch: str,
|
| 21 |
-
device: str = 'cpu',
|
| 22 |
include_detector:bool = True,
|
| 23 |
confidence_threshold:float = 0.5
|
| 24 |
):
|
|
@@ -31,10 +32,18 @@ class Pipeline:
|
|
| 31 |
|
| 32 |
# Create L2CS model
|
| 33 |
self.model = getArch(arch, 90)
|
| 34 |
-
|
|
|
|
|
|
|
|
|
|
| 35 |
self.model.to(self.device)
|
| 36 |
self.model.eval()
|
| 37 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
# Create RetinaFace if requested
|
| 39 |
if self.include_detector:
|
| 40 |
|
|
@@ -47,6 +56,19 @@ class Pipeline:
|
|
| 47 |
self.idx_tensor = [idx for idx in range(90)]
|
| 48 |
self.idx_tensor = torch.FloatTensor(self.idx_tensor).to(self.device)
|
| 49 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
def step(self, frame: np.ndarray) -> GazeResultContainer:
|
| 51 |
|
| 52 |
# Creating containers
|
|
@@ -56,9 +78,12 @@ class Pipeline:
|
|
| 56 |
scores = []
|
| 57 |
|
| 58 |
if self.include_detector:
|
|
|
|
| 59 |
faces = self.detector(frame)
|
|
|
|
| 60 |
|
| 61 |
-
if faces is not None:
|
|
|
|
| 62 |
for box, landmark, score in faces:
|
| 63 |
|
| 64 |
# Apply threshold
|
|
@@ -74,7 +99,7 @@ class Pipeline:
|
|
| 74 |
y_min = 0
|
| 75 |
x_max=int(box[2])
|
| 76 |
y_max=int(box[3])
|
| 77 |
-
|
| 78 |
# Crop image
|
| 79 |
img = frame[y_min:y_max, x_min:x_max]
|
| 80 |
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
|
@@ -86,8 +111,20 @@ class Pipeline:
|
|
| 86 |
landmarks.append(landmark)
|
| 87 |
scores.append(score)
|
| 88 |
|
|
|
|
|
|
|
| 89 |
# Predict gaze
|
| 90 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 91 |
|
| 92 |
else:
|
| 93 |
|
|
@@ -95,7 +132,8 @@ class Pipeline:
|
|
| 95 |
yaw = np.empty((0,1))
|
| 96 |
|
| 97 |
else:
|
| 98 |
-
|
|
|
|
| 99 |
|
| 100 |
# Save data
|
| 101 |
results = GazeResultContainer(
|
|
@@ -109,7 +147,7 @@ class Pipeline:
|
|
| 109 |
return results
|
| 110 |
|
| 111 |
def predict_gaze(self, frame: Union[np.ndarray, torch.Tensor]):
|
| 112 |
-
|
| 113 |
# Prepare input
|
| 114 |
if isinstance(frame, np.ndarray):
|
| 115 |
img = prep_input_numpy(frame, self.device)
|
|
@@ -117,17 +155,21 @@ class Pipeline:
|
|
| 117 |
img = frame
|
| 118 |
else:
|
| 119 |
raise RuntimeError("Invalid dtype for input")
|
| 120 |
-
|
| 121 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
| 122 |
gaze_pitch, gaze_yaw = self.model(img)
|
| 123 |
-
pitch_predicted = self.softmax(gaze_pitch)
|
| 124 |
-
yaw_predicted = self.softmax(gaze_yaw)
|
| 125 |
-
|
| 126 |
# Get continuous predictions in degrees.
|
| 127 |
pitch_predicted = torch.sum(pitch_predicted.data * self.idx_tensor, dim=1) * 4 - 180
|
| 128 |
yaw_predicted = torch.sum(yaw_predicted.data * self.idx_tensor, dim=1) * 4 - 180
|
| 129 |
-
|
| 130 |
-
pitch_predicted= pitch_predicted.cpu().detach().numpy()* np.pi/180.0
|
| 131 |
-
yaw_predicted= yaw_predicted.cpu().detach().numpy()* np.pi/180.0
|
| 132 |
|
| 133 |
return pitch_predicted, yaw_predicted
|
|
|
|
| 1 |
import pathlib
|
| 2 |
+
import time
|
| 3 |
from typing import Union
|
| 4 |
|
| 5 |
import cv2
|
|
|
|
| 16 |
class Pipeline:
|
| 17 |
|
| 18 |
def __init__(
|
| 19 |
+
self,
|
| 20 |
+
weights: pathlib.Path,
|
| 21 |
arch: str,
|
| 22 |
+
device: str = 'cpu',
|
| 23 |
include_detector:bool = True,
|
| 24 |
confidence_threshold:float = 0.5
|
| 25 |
):
|
|
|
|
| 32 |
|
| 33 |
# Create L2CS model
|
| 34 |
self.model = getArch(arch, 90)
|
| 35 |
+
# PyTorch 2.6+ defaults weights_only=True; these checkpoints need full unpickle
|
| 36 |
+
self.model.load_state_dict(
|
| 37 |
+
torch.load(self.weights, map_location=device, weights_only=False)
|
| 38 |
+
)
|
| 39 |
self.model.to(self.device)
|
| 40 |
self.model.eval()
|
| 41 |
|
| 42 |
+
# Half precision on GPU for ~2x speedup
|
| 43 |
+
self._use_half = (device.type != 'cpu')
|
| 44 |
+
if self._use_half:
|
| 45 |
+
self.model.half()
|
| 46 |
+
|
| 47 |
# Create RetinaFace if requested
|
| 48 |
if self.include_detector:
|
| 49 |
|
|
|
|
| 56 |
self.idx_tensor = [idx for idx in range(90)]
|
| 57 |
self.idx_tensor = torch.FloatTensor(self.idx_tensor).to(self.device)
|
| 58 |
|
| 59 |
+
# Warmup: dummy forward pass to avoid cold-start latency
|
| 60 |
+
self._warmup()
|
| 61 |
+
|
| 62 |
+
def _warmup(self):
|
| 63 |
+
"""Run a dummy forward pass to warm up the model and CUDA kernels."""
|
| 64 |
+
dummy = np.zeros((224, 224, 3), dtype=np.uint8)
|
| 65 |
+
try:
|
| 66 |
+
with torch.no_grad():
|
| 67 |
+
self.predict_gaze(dummy)
|
| 68 |
+
print("[L2CS] Model warmup complete")
|
| 69 |
+
except Exception as e:
|
| 70 |
+
print(f"[L2CS] Warmup failed (non-fatal): {e}")
|
| 71 |
+
|
| 72 |
def step(self, frame: np.ndarray) -> GazeResultContainer:
|
| 73 |
|
| 74 |
# Creating containers
|
|
|
|
| 78 |
scores = []
|
| 79 |
|
| 80 |
if self.include_detector:
|
| 81 |
+
t0 = time.perf_counter()
|
| 82 |
faces = self.detector(frame)
|
| 83 |
+
t_detect = (time.perf_counter() - t0) * 1000
|
| 84 |
|
| 85 |
+
if faces is not None:
|
| 86 |
+
t0 = time.perf_counter()
|
| 87 |
for box, landmark, score in faces:
|
| 88 |
|
| 89 |
# Apply threshold
|
|
|
|
| 99 |
y_min = 0
|
| 100 |
x_max=int(box[2])
|
| 101 |
y_max=int(box[3])
|
| 102 |
+
|
| 103 |
# Crop image
|
| 104 |
img = frame[y_min:y_max, x_min:x_max]
|
| 105 |
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
|
|
|
| 111 |
landmarks.append(landmark)
|
| 112 |
scores.append(score)
|
| 113 |
|
| 114 |
+
t_preprocess = (time.perf_counter() - t0) * 1000
|
| 115 |
+
|
| 116 |
# Predict gaze
|
| 117 |
+
t0 = time.perf_counter()
|
| 118 |
+
with torch.no_grad():
|
| 119 |
+
pitch, yaw = self.predict_gaze(np.stack(face_imgs))
|
| 120 |
+
t_inference = (time.perf_counter() - t0) * 1000
|
| 121 |
+
|
| 122 |
+
# Log timing every 30 frames (avoid spamming)
|
| 123 |
+
if not hasattr(self, '_step_count'):
|
| 124 |
+
self._step_count = 0
|
| 125 |
+
self._step_count += 1
|
| 126 |
+
if self._step_count % 30 == 1:
|
| 127 |
+
print(f"[L2CS timing] detect={t_detect:.1f}ms preprocess={t_preprocess:.1f}ms inference={t_inference:.1f}ms total={t_detect+t_preprocess+t_inference:.1f}ms")
|
| 128 |
|
| 129 |
else:
|
| 130 |
|
|
|
|
| 132 |
yaw = np.empty((0,1))
|
| 133 |
|
| 134 |
else:
|
| 135 |
+
with torch.no_grad():
|
| 136 |
+
pitch, yaw = self.predict_gaze(frame)
|
| 137 |
|
| 138 |
# Save data
|
| 139 |
results = GazeResultContainer(
|
|
|
|
| 147 |
return results
|
| 148 |
|
| 149 |
def predict_gaze(self, frame: Union[np.ndarray, torch.Tensor]):
|
| 150 |
+
|
| 151 |
# Prepare input
|
| 152 |
if isinstance(frame, np.ndarray):
|
| 153 |
img = prep_input_numpy(frame, self.device)
|
|
|
|
| 155 |
img = frame
|
| 156 |
else:
|
| 157 |
raise RuntimeError("Invalid dtype for input")
|
| 158 |
+
|
| 159 |
+
# Half precision on GPU
|
| 160 |
+
if self._use_half:
|
| 161 |
+
img = img.half()
|
| 162 |
+
|
| 163 |
+
# Forward pass (caller should wrap in torch.no_grad())
|
| 164 |
gaze_pitch, gaze_yaw = self.model(img)
|
| 165 |
+
pitch_predicted = self.softmax(gaze_pitch.float())
|
| 166 |
+
yaw_predicted = self.softmax(gaze_yaw.float())
|
| 167 |
+
|
| 168 |
# Get continuous predictions in degrees.
|
| 169 |
pitch_predicted = torch.sum(pitch_predicted.data * self.idx_tensor, dim=1) * 4 - 180
|
| 170 |
yaw_predicted = torch.sum(yaw_predicted.data * self.idx_tensor, dim=1) * 4 - 180
|
| 171 |
+
|
| 172 |
+
pitch_predicted = pitch_predicted.cpu().detach().numpy() * np.pi / 180.0
|
| 173 |
+
yaw_predicted = yaw_predicted.cpu().detach().numpy() * np.pi / 180.0
|
| 174 |
|
| 175 |
return pitch_predicted, yaw_predicted
|
models/gaze_calibration.py
CHANGED
|
@@ -73,6 +73,12 @@ class GazeCalibration:
|
|
| 73 |
pt.pitches.append(float(pitch_rad))
|
| 74 |
|
| 75 |
def advance(self):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
self._current_idx += 1
|
| 77 |
return self._current_idx < len(self._targets)
|
| 78 |
|
|
@@ -112,6 +118,17 @@ class GazeCalibration:
|
|
| 112 |
W, _, _, _ = np.linalg.lstsq(A, B, rcond=None)
|
| 113 |
self._W = W
|
| 114 |
self._fitted = True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 115 |
return True
|
| 116 |
except np.linalg.LinAlgError:
|
| 117 |
return False
|
|
@@ -121,7 +138,16 @@ class GazeCalibration:
|
|
| 121 |
return 0.5, 0.5
|
| 122 |
feat = self._poly_features(yaw_rad - self._yaw_bias, pitch_rad - self._pitch_bias)
|
| 123 |
xy = feat @ self._W
|
| 124 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 125 |
|
| 126 |
def to_dict(self):
|
| 127 |
return {
|
|
|
|
| 73 |
pt.pitches.append(float(pitch_rad))
|
| 74 |
|
| 75 |
def advance(self):
|
| 76 |
+
# Log sample count for the point we're leaving
|
| 77 |
+
if self._current_idx < len(self._points):
|
| 78 |
+
pt = self._points[self._current_idx]
|
| 79 |
+
print(f"[CAL] Point {self._current_idx} "
|
| 80 |
+
f"target=({pt.target_x:.2f},{pt.target_y:.2f}) "
|
| 81 |
+
f"collected {len(pt.yaws)} samples")
|
| 82 |
self._current_idx += 1
|
| 83 |
return self._current_idx < len(self._targets)
|
| 84 |
|
|
|
|
| 118 |
W, _, _, _ = np.linalg.lstsq(A, B, rcond=None)
|
| 119 |
self._W = W
|
| 120 |
self._fitted = True
|
| 121 |
+
# Log calibration quality
|
| 122 |
+
predicted = A @ W
|
| 123 |
+
residuals = B - predicted
|
| 124 |
+
rmse = float(np.sqrt(np.mean(residuals ** 2)))
|
| 125 |
+
print(f"[CAL] Fitted with {len(rows_A)} points, "
|
| 126 |
+
f"yaw_bias={self._yaw_bias:.4f} pitch_bias={self._pitch_bias:.4f} "
|
| 127 |
+
f"RMSE={rmse:.4f}")
|
| 128 |
+
# Verify center prediction
|
| 129 |
+
cx, cy = self.predict(self._yaw_bias, self._pitch_bias)
|
| 130 |
+
print(f"[CAL] Center prediction: ({cx:.3f}, {cy:.3f}) — "
|
| 131 |
+
f"should be near (0.5, 0.5)")
|
| 132 |
return True
|
| 133 |
except np.linalg.LinAlgError:
|
| 134 |
return False
|
|
|
|
| 138 |
return 0.5, 0.5
|
| 139 |
feat = self._poly_features(yaw_rad - self._yaw_bias, pitch_rad - self._pitch_bias)
|
| 140 |
xy = feat @ self._W
|
| 141 |
+
# Allow out-of-bounds values so on_screen detection can work.
|
| 142 |
+
# Clamp to [-0.5, 1.5] to prevent polynomial extrapolation going wild.
|
| 143 |
+
return float(np.clip(xy[0], -0.5, 1.5)), float(np.clip(xy[1], -0.5, 1.5))
|
| 144 |
+
|
| 145 |
+
def verify(self, yaw_rad, pitch_rad, target_x=0.5, target_y=0.5):
|
| 146 |
+
"""Check if a gaze prediction lands near the expected target.
|
| 147 |
+
Returns (predicted_x, predicted_y, error, passed)."""
|
| 148 |
+
px, py = self.predict(yaw_rad, pitch_rad)
|
| 149 |
+
err = float(np.sqrt((px - target_x) ** 2 + (py - target_y) ** 2))
|
| 150 |
+
return px, py, err, err < 0.25
|
| 151 |
|
| 152 |
def to_dict(self):
|
| 153 |
return {
|
models/gaze_eye_fusion.py
CHANGED
|
@@ -8,12 +8,15 @@ from .gaze_calibration import GazeCalibration
|
|
| 8 |
from .eye_scorer import compute_avg_ear
|
| 9 |
|
| 10 |
_EAR_BLINK = 0.18
|
| 11 |
-
_ON_SCREEN_MARGIN = 0.
|
|
|
|
|
|
|
|
|
|
| 12 |
|
| 13 |
|
| 14 |
class GazeEyeFusion:
|
| 15 |
|
| 16 |
-
def __init__(self, calibration, ear_weight=0.
|
| 17 |
if not calibration.is_fitted:
|
| 18 |
raise ValueError("Calibration must be fitted first")
|
| 19 |
self._cal = calibration
|
|
@@ -22,7 +25,8 @@ class GazeEyeFusion:
|
|
| 22 |
self._threshold = focus_threshold
|
| 23 |
self._smooth_x = 0.5
|
| 24 |
self._smooth_y = 0.5
|
| 25 |
-
self._alpha = 0.
|
|
|
|
| 26 |
|
| 27 |
def update(self, yaw_rad, pitch_rad, landmarks):
|
| 28 |
gx, gy = self._cal.predict(yaw_rad, pitch_rad)
|
|
@@ -41,20 +45,32 @@ class GazeEyeFusion:
|
|
| 41 |
ear_score = 1.0
|
| 42 |
if landmarks is not None:
|
| 43 |
ear = compute_avg_ear(landmarks)
|
| 44 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
|
| 46 |
-
#
|
| 47 |
-
|
| 48 |
-
if on_screen:
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
|
|
|
|
|
|
|
|
|
| 52 |
|
| 53 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
|
| 55 |
return {
|
| 56 |
-
"gaze_x": round(float(gx), 4),
|
| 57 |
-
"gaze_y": round(float(gy), 4),
|
| 58 |
"on_screen": on_screen,
|
| 59 |
"ear": round(ear, 4) if ear is not None else None,
|
| 60 |
"focus_score": round(score, 4),
|
|
@@ -64,3 +80,4 @@ class GazeEyeFusion:
|
|
| 64 |
def reset(self):
|
| 65 |
self._smooth_x = 0.5
|
| 66 |
self._smooth_y = 0.5
|
|
|
|
|
|
| 8 |
from .eye_scorer import compute_avg_ear
|
| 9 |
|
| 10 |
_EAR_BLINK = 0.18
|
| 11 |
+
_ON_SCREEN_MARGIN = 0.15
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
_SUSTAINED_CLOSE_FRAMES = 4 # ~250ms at 15fps — ignore brief blinks
|
| 15 |
|
| 16 |
|
| 17 |
class GazeEyeFusion:
|
| 18 |
|
| 19 |
+
def __init__(self, calibration, ear_weight=0.25, gaze_weight=0.75, focus_threshold=0.42):
|
| 20 |
if not calibration.is_fitted:
|
| 21 |
raise ValueError("Calibration must be fitted first")
|
| 22 |
self._cal = calibration
|
|
|
|
| 25 |
self._threshold = focus_threshold
|
| 26 |
self._smooth_x = 0.5
|
| 27 |
self._smooth_y = 0.5
|
| 28 |
+
self._alpha = 0.35
|
| 29 |
+
self._closed_streak = 0
|
| 30 |
|
| 31 |
def update(self, yaw_rad, pitch_rad, landmarks):
|
| 32 |
gx, gy = self._cal.predict(yaw_rad, pitch_rad)
|
|
|
|
| 45 |
ear_score = 1.0
|
| 46 |
if landmarks is not None:
|
| 47 |
ear = compute_avg_ear(landmarks)
|
| 48 |
+
if ear < _EAR_BLINK:
|
| 49 |
+
ear_score = 0.0
|
| 50 |
+
self._closed_streak += 1
|
| 51 |
+
else:
|
| 52 |
+
ear_score = min(ear / 0.30, 1.0)
|
| 53 |
+
self._closed_streak = 0
|
| 54 |
|
| 55 |
+
# Gaze score: 1.0 anywhere on screen, gentle falloff near edges,
|
| 56 |
+
# 0.0 when clearly off screen.
|
| 57 |
+
if not on_screen:
|
| 58 |
+
gaze_score = 0.0
|
| 59 |
+
else:
|
| 60 |
+
dx = max(0.0, abs(gx - 0.5) - 0.4)
|
| 61 |
+
dy = max(0.0, abs(gy - 0.5) - 0.4)
|
| 62 |
+
dist = math.sqrt(dx ** 2 + dy ** 2)
|
| 63 |
+
gaze_score = max(0.0, 1.0 - dist * 2.5)
|
| 64 |
|
| 65 |
+
# Sustained eye closure veto — ignore brief blinks (< 4 frames)
|
| 66 |
+
if self._closed_streak >= _SUSTAINED_CLOSE_FRAMES:
|
| 67 |
+
score = 0.0
|
| 68 |
+
else:
|
| 69 |
+
score = float(np.clip(self._gaze_w * gaze_score + self._ear_w * ear_score, 0, 1))
|
| 70 |
|
| 71 |
return {
|
| 72 |
+
"gaze_x": round(float(np.clip(gx, 0, 1)), 4),
|
| 73 |
+
"gaze_y": round(float(np.clip(gy, 0, 1)), 4),
|
| 74 |
"on_screen": on_screen,
|
| 75 |
"ear": round(ear, 4) if ear is not None else None,
|
| 76 |
"focus_score": round(score, 4),
|
|
|
|
| 80 |
def reset(self):
|
| 81 |
self._smooth_x = 0.5
|
| 82 |
self._smooth_y = 0.5
|
| 83 |
+
self._closed_streak = 0
|
models/xgboost/fetch_sweep_results.py
DELETED
|
@@ -1,46 +0,0 @@
|
|
| 1 |
-
import csv
|
| 2 |
-
from clearml import Task
|
| 3 |
-
|
| 4 |
-
print("Fetching tasks from ClearML...")
|
| 5 |
-
tasks = Task.get_tasks(
|
| 6 |
-
project_name='FocusGuards Large Group Project',
|
| 7 |
-
tags=['optuna_manual'],
|
| 8 |
-
task_filter={'status': ['completed', 'failed']}
|
| 9 |
-
)
|
| 10 |
-
|
| 11 |
-
results = []
|
| 12 |
-
for t in tasks:
|
| 13 |
-
if t.get_status() != 'completed': continue
|
| 14 |
-
params = t.get_parameters()
|
| 15 |
-
|
| 16 |
-
# We reported logloss as Loss/Val, and F1 as Summary/val_f1
|
| 17 |
-
metrics = t.get_last_scalar_metrics()
|
| 18 |
-
val_loss = metrics.get('Loss', {}).get('Val', {}).get('last', float('inf'))
|
| 19 |
-
val_f1 = metrics.get('Summary', {}).get('val_f1', {}).get('last', 0.0)
|
| 20 |
-
val_acc = metrics.get('Summary', {}).get('val_accuracy', {}).get('last', 0.0)
|
| 21 |
-
|
| 22 |
-
row = {
|
| 23 |
-
'task_id': t.id,
|
| 24 |
-
'val_loss': round(val_loss, 4) if val_loss != float('inf') else val_loss,
|
| 25 |
-
'val_f1': round(val_f1, 4),
|
| 26 |
-
'val_acc': round(val_acc, 4),
|
| 27 |
-
}
|
| 28 |
-
|
| 29 |
-
# Default Optuna parameter names parsed back from ClearML storage format
|
| 30 |
-
for k in ['n_estimators', 'max_depth', 'learning_rate', 'subsample', 'colsample_bytree', 'reg_alpha', 'reg_lambda']:
|
| 31 |
-
val = params.get(f"General/{k}") or params.get(k)
|
| 32 |
-
row[k] = val
|
| 33 |
-
|
| 34 |
-
results.append(row)
|
| 35 |
-
|
| 36 |
-
# Sort by lowest validation loss
|
| 37 |
-
results.sort(key=lambda x: x['val_loss'])
|
| 38 |
-
|
| 39 |
-
filepath = 'models/xgboost/sweep_results_all_40.csv'
|
| 40 |
-
with open(filepath, 'w', newline='') as f:
|
| 41 |
-
fieldnames = ['task_id', 'val_loss', 'val_f1', 'val_acc', 'n_estimators', 'max_depth', 'learning_rate', 'subsample', 'colsample_bytree', 'reg_alpha', 'reg_lambda']
|
| 42 |
-
writer = csv.DictWriter(f, fieldnames=fieldnames, extrasaction='ignore')
|
| 43 |
-
writer.writeheader()
|
| 44 |
-
writer.writerows(results)
|
| 45 |
-
|
| 46 |
-
print(f"Successfully grabbed {len(results)} trials and saved to {filepath}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
package-lock.json
CHANGED
|
@@ -54,7 +54,6 @@
|
|
| 54 |
"integrity": "sha512-CGOfOJqWjg2qW/Mb6zNsDm+u5vFQ8DxXfbM09z69p5Z6+mE1ikP2jUXw+j42Pf1XTYED2Rni5f95npYeuwMDQA==",
|
| 55 |
"dev": true,
|
| 56 |
"license": "MIT",
|
| 57 |
-
"peer": true,
|
| 58 |
"dependencies": {
|
| 59 |
"@babel/code-frame": "^7.29.0",
|
| 60 |
"@babel/generator": "^7.29.0",
|
|
@@ -1429,7 +1428,6 @@
|
|
| 1429 |
"integrity": "sha512-tORuanb01iEzWvMGVGv2ZDhYZVeRMrw453DCSAIn/5yvcSVnMoUMTyf33nQJLahYEnv9xqrTNbgz4qY5EfSh0g==",
|
| 1430 |
"dev": true,
|
| 1431 |
"license": "MIT",
|
| 1432 |
-
"peer": true,
|
| 1433 |
"dependencies": {
|
| 1434 |
"csstype": "^3.2.2"
|
| 1435 |
}
|
|
@@ -1471,7 +1469,6 @@
|
|
| 1471 |
"integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==",
|
| 1472 |
"dev": true,
|
| 1473 |
"license": "MIT",
|
| 1474 |
-
"peer": true,
|
| 1475 |
"bin": {
|
| 1476 |
"acorn": "bin/acorn"
|
| 1477 |
},
|
|
@@ -1577,7 +1574,6 @@
|
|
| 1577 |
}
|
| 1578 |
],
|
| 1579 |
"license": "MIT",
|
| 1580 |
-
"peer": true,
|
| 1581 |
"dependencies": {
|
| 1582 |
"baseline-browser-mapping": "^2.9.0",
|
| 1583 |
"caniuse-lite": "^1.0.30001759",
|
|
@@ -1799,7 +1795,6 @@
|
|
| 1799 |
"integrity": "sha512-LEyamqS7W5HB3ujJyvi0HQK/dtVINZvd5mAAp9eT5S/ujByGjiZLCzPcHVzuXbpJDJF/cxwHlfceVUDZ2lnSTw==",
|
| 1800 |
"dev": true,
|
| 1801 |
"license": "MIT",
|
| 1802 |
-
"peer": true,
|
| 1803 |
"dependencies": {
|
| 1804 |
"@eslint-community/eslint-utils": "^4.8.0",
|
| 1805 |
"@eslint-community/regexpp": "^4.12.1",
|
|
@@ -2486,7 +2481,6 @@
|
|
| 2486 |
"integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
|
| 2487 |
"dev": true,
|
| 2488 |
"license": "MIT",
|
| 2489 |
-
"peer": true,
|
| 2490 |
"engines": {
|
| 2491 |
"node": ">=12"
|
| 2492 |
},
|
|
@@ -2548,7 +2542,6 @@
|
|
| 2548 |
"resolved": "https://registry.npmjs.org/react/-/react-19.2.4.tgz",
|
| 2549 |
"integrity": "sha512-9nfp2hYpCwOjAN+8TZFGhtWEwgvWHXqESH8qT89AT/lWklpLON22Lc8pEtnpsZz7VmawabSU0gCjnj8aC0euHQ==",
|
| 2550 |
"license": "MIT",
|
| 2551 |
-
"peer": true,
|
| 2552 |
"engines": {
|
| 2553 |
"node": ">=0.10.0"
|
| 2554 |
}
|
|
@@ -2782,7 +2775,6 @@
|
|
| 2782 |
"integrity": "sha512-w+N7Hifpc3gRjZ63vYBXA56dvvRlNWRczTdmCBBa+CotUzAPf5b7YMdMR/8CQoeYE5LX3W4wj6RYTgonm1b9DA==",
|
| 2783 |
"dev": true,
|
| 2784 |
"license": "MIT",
|
| 2785 |
-
"peer": true,
|
| 2786 |
"dependencies": {
|
| 2787 |
"esbuild": "^0.27.0",
|
| 2788 |
"fdir": "^6.5.0",
|
|
@@ -2904,7 +2896,6 @@
|
|
| 2904 |
"integrity": "sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==",
|
| 2905 |
"dev": true,
|
| 2906 |
"license": "MIT",
|
| 2907 |
-
"peer": true,
|
| 2908 |
"funding": {
|
| 2909 |
"url": "https://github.com/sponsors/colinhacks"
|
| 2910 |
}
|
|
|
|
| 54 |
"integrity": "sha512-CGOfOJqWjg2qW/Mb6zNsDm+u5vFQ8DxXfbM09z69p5Z6+mE1ikP2jUXw+j42Pf1XTYED2Rni5f95npYeuwMDQA==",
|
| 55 |
"dev": true,
|
| 56 |
"license": "MIT",
|
|
|
|
| 57 |
"dependencies": {
|
| 58 |
"@babel/code-frame": "^7.29.0",
|
| 59 |
"@babel/generator": "^7.29.0",
|
|
|
|
| 1428 |
"integrity": "sha512-tORuanb01iEzWvMGVGv2ZDhYZVeRMrw453DCSAIn/5yvcSVnMoUMTyf33nQJLahYEnv9xqrTNbgz4qY5EfSh0g==",
|
| 1429 |
"dev": true,
|
| 1430 |
"license": "MIT",
|
|
|
|
| 1431 |
"dependencies": {
|
| 1432 |
"csstype": "^3.2.2"
|
| 1433 |
}
|
|
|
|
| 1469 |
"integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==",
|
| 1470 |
"dev": true,
|
| 1471 |
"license": "MIT",
|
|
|
|
| 1472 |
"bin": {
|
| 1473 |
"acorn": "bin/acorn"
|
| 1474 |
},
|
|
|
|
| 1574 |
}
|
| 1575 |
],
|
| 1576 |
"license": "MIT",
|
|
|
|
| 1577 |
"dependencies": {
|
| 1578 |
"baseline-browser-mapping": "^2.9.0",
|
| 1579 |
"caniuse-lite": "^1.0.30001759",
|
|
|
|
| 1795 |
"integrity": "sha512-LEyamqS7W5HB3ujJyvi0HQK/dtVINZvd5mAAp9eT5S/ujByGjiZLCzPcHVzuXbpJDJF/cxwHlfceVUDZ2lnSTw==",
|
| 1796 |
"dev": true,
|
| 1797 |
"license": "MIT",
|
|
|
|
| 1798 |
"dependencies": {
|
| 1799 |
"@eslint-community/eslint-utils": "^4.8.0",
|
| 1800 |
"@eslint-community/regexpp": "^4.12.1",
|
|
|
|
| 2481 |
"integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
|
| 2482 |
"dev": true,
|
| 2483 |
"license": "MIT",
|
|
|
|
| 2484 |
"engines": {
|
| 2485 |
"node": ">=12"
|
| 2486 |
},
|
|
|
|
| 2542 |
"resolved": "https://registry.npmjs.org/react/-/react-19.2.4.tgz",
|
| 2543 |
"integrity": "sha512-9nfp2hYpCwOjAN+8TZFGhtWEwgvWHXqESH8qT89AT/lWklpLON22Lc8pEtnpsZz7VmawabSU0gCjnj8aC0euHQ==",
|
| 2544 |
"license": "MIT",
|
|
|
|
| 2545 |
"engines": {
|
| 2546 |
"node": ">=0.10.0"
|
| 2547 |
}
|
|
|
|
| 2775 |
"integrity": "sha512-w+N7Hifpc3gRjZ63vYBXA56dvvRlNWRczTdmCBBa+CotUzAPf5b7YMdMR/8CQoeYE5LX3W4wj6RYTgonm1b9DA==",
|
| 2776 |
"dev": true,
|
| 2777 |
"license": "MIT",
|
|
|
|
| 2778 |
"dependencies": {
|
| 2779 |
"esbuild": "^0.27.0",
|
| 2780 |
"fdir": "^6.5.0",
|
|
|
|
| 2896 |
"integrity": "sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==",
|
| 2897 |
"dev": true,
|
| 2898 |
"license": "MIT",
|
|
|
|
| 2899 |
"funding": {
|
| 2900 |
"url": "https://github.com/sponsors/colinhacks"
|
| 2901 |
}
|
public/test_data.json
DELETED
|
@@ -1,112 +0,0 @@
|
|
| 1 |
-
[
|
| 2 |
-
{
|
| 3 |
-
"id": 29,
|
| 4 |
-
"start_time": "2026-03-10T19:27:26.313545",
|
| 5 |
-
"end_time": "2026-03-10T19:27:59.454192",
|
| 6 |
-
"duration_seconds": 33,
|
| 7 |
-
"focus_score": 0.8848484848484849,
|
| 8 |
-
"total_frames": 165,
|
| 9 |
-
"focused_frames": 146,
|
| 10 |
-
"created_at": "2026-03-10 19:27:26"
|
| 11 |
-
},
|
| 12 |
-
{
|
| 13 |
-
"id": 27,
|
| 14 |
-
"start_time": "2026-03-10T15:09:42.264889",
|
| 15 |
-
"end_time": "2026-03-10T15:09:52.066905",
|
| 16 |
-
"duration_seconds": 9,
|
| 17 |
-
"focus_score": 0.575,
|
| 18 |
-
"total_frames": 120,
|
| 19 |
-
"focused_frames": 69,
|
| 20 |
-
"created_at": "2026-03-10 15:09:42"
|
| 21 |
-
},
|
| 22 |
-
{
|
| 23 |
-
"id": 20,
|
| 24 |
-
"start_time": "2026-03-10T12:10:08.838619",
|
| 25 |
-
"end_time": "2026-03-10T12:10:22.613099",
|
| 26 |
-
"duration_seconds": 13,
|
| 27 |
-
"focus_score": 0.3076923076923077,
|
| 28 |
-
"total_frames": 65,
|
| 29 |
-
"focused_frames": 20,
|
| 30 |
-
"created_at": "2026-03-10 12:10:08"
|
| 31 |
-
},
|
| 32 |
-
{
|
| 33 |
-
"id": 19,
|
| 34 |
-
"start_time": "2026-03-10T12:10:04.664410",
|
| 35 |
-
"end_time": "2026-03-10T12:21:08.528749",
|
| 36 |
-
"duration_seconds": 663,
|
| 37 |
-
"focus_score": 1,
|
| 38 |
-
"total_frames": 19,
|
| 39 |
-
"focused_frames": 19,
|
| 40 |
-
"created_at": "2026-03-10 12:10:04"
|
| 41 |
-
},
|
| 42 |
-
{
|
| 43 |
-
"id": 15,
|
| 44 |
-
"start_time": "2026-03-10T12:03:07.498863",
|
| 45 |
-
"end_time": "2026-03-10T12:03:19.454642",
|
| 46 |
-
"duration_seconds": 11,
|
| 47 |
-
"focus_score": 0.896551724137931,
|
| 48 |
-
"total_frames": 58,
|
| 49 |
-
"focused_frames": 52,
|
| 50 |
-
"created_at": "2026-03-10 12:03:07"
|
| 51 |
-
},
|
| 52 |
-
{
|
| 53 |
-
"id": 13,
|
| 54 |
-
"start_time": "2026-03-10T12:02:40.121044",
|
| 55 |
-
"end_time": "2026-03-10T12:03:03.700510",
|
| 56 |
-
"duration_seconds": 23,
|
| 57 |
-
"focus_score": 0.6923076923076923,
|
| 58 |
-
"total_frames": 117,
|
| 59 |
-
"focused_frames": 81,
|
| 60 |
-
"created_at": "2026-03-10 12:02:40"
|
| 61 |
-
},
|
| 62 |
-
{
|
| 63 |
-
"id": 12,
|
| 64 |
-
"start_time": "2026-03-10T12:02:36.741156",
|
| 65 |
-
"end_time": "2026-03-10T12:05:59.301680",
|
| 66 |
-
"duration_seconds": 202,
|
| 67 |
-
"focus_score": 0.13333333333333333,
|
| 68 |
-
"total_frames": 15,
|
| 69 |
-
"focused_frames": 2,
|
| 70 |
-
"created_at": "2026-03-10 12:02:36"
|
| 71 |
-
},
|
| 72 |
-
{
|
| 73 |
-
"id": 8,
|
| 74 |
-
"start_time": "2026-03-10T11:35:26.680264",
|
| 75 |
-
"end_time": "2026-03-10T11:36:07.574547",
|
| 76 |
-
"duration_seconds": 40,
|
| 77 |
-
"focus_score": 0.6225490196078431,
|
| 78 |
-
"total_frames": 204,
|
| 79 |
-
"focused_frames": 127,
|
| 80 |
-
"created_at": "2026-03-10 11:35:26"
|
| 81 |
-
},
|
| 82 |
-
{
|
| 83 |
-
"id": 7,
|
| 84 |
-
"start_time": "2026-03-10T11:35:16.587504",
|
| 85 |
-
"end_time": "2026-03-10T12:17:35.889266",
|
| 86 |
-
"duration_seconds": 2539,
|
| 87 |
-
"focus_score": 0.9387755102040817,
|
| 88 |
-
"total_frames": 49,
|
| 89 |
-
"focused_frames": 46,
|
| 90 |
-
"created_at": "2026-03-10 11:35:16"
|
| 91 |
-
},
|
| 92 |
-
{
|
| 93 |
-
"id": 4,
|
| 94 |
-
"start_time": "2026-03-10T11:16:31.204287",
|
| 95 |
-
"end_time": "2026-03-10T11:16:52.632759",
|
| 96 |
-
"duration_seconds": 21,
|
| 97 |
-
"focus_score": 0.75,
|
| 98 |
-
"total_frames": 104,
|
| 99 |
-
"focused_frames": 78,
|
| 100 |
-
"created_at": "2026-03-10 11:16:31"
|
| 101 |
-
},
|
| 102 |
-
{
|
| 103 |
-
"id": 2,
|
| 104 |
-
"start_time": "2026-03-10T11:15:37.543154",
|
| 105 |
-
"end_time": "2026-03-10T11:17:41.674593",
|
| 106 |
-
"duration_seconds": 124,
|
| 107 |
-
"focus_score": 0.9557522123893806,
|
| 108 |
-
"total_frames": 113,
|
| 109 |
-
"focused_frames": 108,
|
| 110 |
-
"created_at": "2026-03-10 11:15:37"
|
| 111 |
-
}
|
| 112 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/App.css
CHANGED
|
@@ -39,9 +39,9 @@ body {
|
|
| 39 |
background-color: white;
|
| 40 |
display: flex;
|
| 41 |
align-items: center;
|
| 42 |
-
justify-content:
|
| 43 |
gap: 0;
|
| 44 |
-
padding: 0
|
| 45 |
box-sizing: border-box;
|
| 46 |
box-shadow: 0 2px 5px rgba(0,0,0,0.05);
|
| 47 |
position: fixed;
|
|
@@ -55,6 +55,16 @@ body {
|
|
| 55 |
white-space: nowrap;
|
| 56 |
}
|
| 57 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 58 |
.menu-btn {
|
| 59 |
background: none;
|
| 60 |
border: none;
|
|
@@ -175,7 +185,7 @@ body {
|
|
| 175 |
}
|
| 176 |
|
| 177 |
.focus-display-shell {
|
| 178 |
-
background:
|
| 179 |
}
|
| 180 |
|
| 181 |
.focus-flow-overlay {
|
|
@@ -459,29 +469,182 @@ body {
|
|
| 459 |
display: flex;
|
| 460 |
align-items: center;
|
| 461 |
justify-content: center;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 462 |
gap: 8px;
|
| 463 |
-
|
| 464 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 465 |
border-radius: 8px;
|
| 466 |
-
|
| 467 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 468 |
}
|
| 469 |
|
| 470 |
.focus-model-label {
|
| 471 |
-
color: #
|
| 472 |
font-size: 13px;
|
|
|
|
| 473 |
margin-right: 4px;
|
| 474 |
}
|
| 475 |
|
| 476 |
.focus-model-button {
|
| 477 |
-
padding:
|
| 478 |
border-radius: 16px;
|
| 479 |
-
border: 1px solid #
|
| 480 |
-
background:
|
| 481 |
-
color: #
|
| 482 |
font-size: 12px;
|
| 483 |
font-weight: 600;
|
| 484 |
text-transform: uppercase;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 485 |
}
|
| 486 |
|
| 487 |
.focus-model-button.active {
|
|
@@ -490,6 +653,87 @@ body {
|
|
| 490 |
color: #fff;
|
| 491 |
}
|
| 492 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 493 |
#display-area video {
|
| 494 |
width: 100%;
|
| 495 |
height: 100%;
|
|
@@ -546,8 +790,8 @@ body {
|
|
| 546 |
}
|
| 547 |
|
| 548 |
.action-btn.green { background-color: #28a745; }
|
| 549 |
-
.action-btn.
|
| 550 |
-
.action-btn.
|
| 551 |
.action-btn.red { background-color: #dc3545; }
|
| 552 |
|
| 553 |
/* 4. Frame Control */
|
|
@@ -1548,14 +1792,163 @@ border: 2px solid transparent;
|
|
| 1548 |
|
| 1549 |
.avatar-circle.user { background-color: #555; }
|
| 1550 |
.avatar-circle.admin { background-color: #ffaa00; border-color: #fff; box-shadow: 0 0 10px rgba(255, 170, 0, 0.5); }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1551 |
/* ================= Home page 2x2 responsive button grid ================= */
|
| 1552 |
.home-button-grid {
|
| 1553 |
-
display:
|
| 1554 |
-
|
| 1555 |
-
gap: 20px; /* Spacing between buttons. */
|
| 1556 |
width: 100%;
|
| 1557 |
-
max-width:
|
| 1558 |
-
margin: 40px auto 0
|
| 1559 |
}
|
| 1560 |
|
| 1561 |
.home-button-grid .btn-main {
|
|
|
|
| 39 |
background-color: white;
|
| 40 |
display: flex;
|
| 41 |
align-items: center;
|
| 42 |
+
justify-content: flex-start;
|
| 43 |
gap: 0;
|
| 44 |
+
padding: 0 16px 0 20px;
|
| 45 |
box-sizing: border-box;
|
| 46 |
box-shadow: 0 2px 5px rgba(0,0,0,0.05);
|
| 47 |
position: fixed;
|
|
|
|
| 55 |
white-space: nowrap;
|
| 56 |
}
|
| 57 |
|
| 58 |
+
.top-menu-links {
|
| 59 |
+
flex: 1;
|
| 60 |
+
display: flex;
|
| 61 |
+
align-items: center;
|
| 62 |
+
justify-content: center;
|
| 63 |
+
flex-wrap: wrap;
|
| 64 |
+
gap: 0;
|
| 65 |
+
min-width: 0;
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
.menu-btn {
|
| 69 |
background: none;
|
| 70 |
border: none;
|
|
|
|
| 185 |
}
|
| 186 |
|
| 187 |
.focus-display-shell {
|
| 188 |
+
background: #101010;
|
| 189 |
}
|
| 190 |
|
| 191 |
.focus-flow-overlay {
|
|
|
|
| 469 |
display: flex;
|
| 470 |
align-items: center;
|
| 471 |
justify-content: center;
|
| 472 |
+
flex-wrap: wrap;
|
| 473 |
+
gap: 8px;
|
| 474 |
+
padding: 10px 16px;
|
| 475 |
+
background: #fff;
|
| 476 |
+
border: 1px solid #e0e0e0;
|
| 477 |
+
border-radius: 12px;
|
| 478 |
+
margin: 10px auto;
|
| 479 |
+
max-width: 700px;
|
| 480 |
+
box-shadow: 0 2px 8px rgba(0,0,0,0.06);
|
| 481 |
+
}
|
| 482 |
+
|
| 483 |
+
/* --- Model info card --- */
|
| 484 |
+
.model-card {
|
| 485 |
+
width: 60%;
|
| 486 |
+
margin: 14px auto 0;
|
| 487 |
+
background: #fff;
|
| 488 |
+
border: 1px solid #e0e0e0;
|
| 489 |
+
border-radius: 14px;
|
| 490 |
+
padding: 18px 22px 14px;
|
| 491 |
+
box-shadow: 0 2px 10px rgba(0,0,0,0.06);
|
| 492 |
+
animation: cardFadeIn 0.25s ease;
|
| 493 |
+
box-sizing: border-box;
|
| 494 |
+
}
|
| 495 |
+
|
| 496 |
+
.model-card-details {
|
| 497 |
+
display: grid;
|
| 498 |
+
grid-template-columns: repeat(3, 1fr);
|
| 499 |
+
gap: 12px;
|
| 500 |
+
}
|
| 501 |
+
|
| 502 |
+
@keyframes cardFadeIn {
|
| 503 |
+
from { opacity: 0; transform: translateY(4px); }
|
| 504 |
+
to { opacity: 1; transform: translateY(0); }
|
| 505 |
+
}
|
| 506 |
+
|
| 507 |
+
.model-card-header {
|
| 508 |
+
display: flex;
|
| 509 |
+
align-items: center;
|
| 510 |
+
gap: 10px;
|
| 511 |
+
margin-bottom: 4px;
|
| 512 |
+
}
|
| 513 |
+
|
| 514 |
+
.model-card-title {
|
| 515 |
+
margin: 0;
|
| 516 |
+
font-size: 1.05rem;
|
| 517 |
+
color: #1a1a2e;
|
| 518 |
+
}
|
| 519 |
+
|
| 520 |
+
.model-card-badge {
|
| 521 |
+
padding: 3px 10px;
|
| 522 |
+
border-radius: 999px;
|
| 523 |
+
background: #e7f3ff;
|
| 524 |
+
color: #007BFF;
|
| 525 |
+
font-size: 0.7rem;
|
| 526 |
+
font-weight: 800;
|
| 527 |
+
letter-spacing: 0.04em;
|
| 528 |
+
text-transform: uppercase;
|
| 529 |
+
}
|
| 530 |
+
|
| 531 |
+
.model-card-badge-baseline {
|
| 532 |
+
padding: 3px 10px;
|
| 533 |
+
border-radius: 999px;
|
| 534 |
+
background: #fff3e0;
|
| 535 |
+
color: #e67e22;
|
| 536 |
+
font-size: 0.7rem;
|
| 537 |
+
font-weight: 800;
|
| 538 |
+
letter-spacing: 0.04em;
|
| 539 |
+
text-transform: uppercase;
|
| 540 |
+
}
|
| 541 |
+
|
| 542 |
+
.model-card-tagline {
|
| 543 |
+
margin: 0 0 12px;
|
| 544 |
+
color: #667281;
|
| 545 |
+
font-size: 0.85rem;
|
| 546 |
+
line-height: 1.4;
|
| 547 |
+
}
|
| 548 |
+
|
| 549 |
+
.model-card-metrics {
|
| 550 |
+
display: grid;
|
| 551 |
+
grid-template-columns: repeat(4, 1fr);
|
| 552 |
gap: 8px;
|
| 553 |
+
margin-bottom: 14px;
|
| 554 |
+
}
|
| 555 |
+
|
| 556 |
+
.model-card-metric {
|
| 557 |
+
text-align: center;
|
| 558 |
+
padding: 8px 4px;
|
| 559 |
+
background: #f8fbff;
|
| 560 |
+
border: 1px solid #e8f0fe;
|
| 561 |
+
border-radius: 10px;
|
| 562 |
+
}
|
| 563 |
+
|
| 564 |
+
.model-card-metric-value {
|
| 565 |
+
display: block;
|
| 566 |
+
font-size: 1.1rem;
|
| 567 |
+
font-weight: 800;
|
| 568 |
+
color: #007BFF;
|
| 569 |
+
line-height: 1.2;
|
| 570 |
+
}
|
| 571 |
+
|
| 572 |
+
.model-card-metric-label {
|
| 573 |
+
display: block;
|
| 574 |
+
font-size: 0.65rem;
|
| 575 |
+
color: #8899aa;
|
| 576 |
+
font-weight: 700;
|
| 577 |
+
text-transform: uppercase;
|
| 578 |
+
letter-spacing: 0.04em;
|
| 579 |
+
margin-top: 2px;
|
| 580 |
+
}
|
| 581 |
+
|
| 582 |
+
.model-card-section {
|
| 583 |
+
margin-bottom: 8px;
|
| 584 |
+
}
|
| 585 |
+
|
| 586 |
+
.model-card-section h4 {
|
| 587 |
+
margin: 0 0 2px;
|
| 588 |
+
font-size: 0.78rem;
|
| 589 |
+
color: #555;
|
| 590 |
+
font-weight: 800;
|
| 591 |
+
text-transform: uppercase;
|
| 592 |
+
letter-spacing: 0.03em;
|
| 593 |
+
}
|
| 594 |
+
|
| 595 |
+
.model-card-section p {
|
| 596 |
+
margin: 0;
|
| 597 |
+
font-size: 0.82rem;
|
| 598 |
+
color: #4a4a4a;
|
| 599 |
+
line-height: 1.5;
|
| 600 |
+
}
|
| 601 |
+
|
| 602 |
+
.model-card-eval {
|
| 603 |
+
margin-top: 10px;
|
| 604 |
+
padding: 6px 10px;
|
| 605 |
+
background: #f5f7fa;
|
| 606 |
border-radius: 8px;
|
| 607 |
+
font-size: 0.72rem;
|
| 608 |
+
color: #7a8a9a;
|
| 609 |
+
font-weight: 600;
|
| 610 |
+
}
|
| 611 |
+
|
| 612 |
+
@media (max-width: 768px) {
|
| 613 |
+
.model-card {
|
| 614 |
+
width: 90%;
|
| 615 |
+
}
|
| 616 |
+
.model-card-metrics {
|
| 617 |
+
grid-template-columns: repeat(2, 1fr);
|
| 618 |
+
}
|
| 619 |
+
.model-card-details {
|
| 620 |
+
grid-template-columns: 1fr;
|
| 621 |
+
}
|
| 622 |
}
|
| 623 |
|
| 624 |
.focus-model-label {
|
| 625 |
+
color: #666;
|
| 626 |
font-size: 13px;
|
| 627 |
+
font-weight: 700;
|
| 628 |
margin-right: 4px;
|
| 629 |
}
|
| 630 |
|
| 631 |
.focus-model-button {
|
| 632 |
+
padding: 6px 16px;
|
| 633 |
border-radius: 16px;
|
| 634 |
+
border: 1px solid #d0d0d0;
|
| 635 |
+
background: #f5f5f5;
|
| 636 |
+
color: #555;
|
| 637 |
font-size: 12px;
|
| 638 |
font-weight: 600;
|
| 639 |
text-transform: uppercase;
|
| 640 |
+
cursor: pointer;
|
| 641 |
+
transition: all 0.2s;
|
| 642 |
+
}
|
| 643 |
+
|
| 644 |
+
.focus-model-button:hover {
|
| 645 |
+
border-color: #007BFF;
|
| 646 |
+
color: #007BFF;
|
| 647 |
+
background: #f0f7ff;
|
| 648 |
}
|
| 649 |
|
| 650 |
.focus-model-button.active {
|
|
|
|
| 653 |
color: #fff;
|
| 654 |
}
|
| 655 |
|
| 656 |
+
.focus-model-sep {
|
| 657 |
+
width: 1px;
|
| 658 |
+
height: 24px;
|
| 659 |
+
background: #d0d0d0;
|
| 660 |
+
margin: 0 4px;
|
| 661 |
+
}
|
| 662 |
+
|
| 663 |
+
.eye-gaze-toggle {
|
| 664 |
+
display: inline-flex;
|
| 665 |
+
align-items: center;
|
| 666 |
+
gap: 6px;
|
| 667 |
+
padding: 6px 14px;
|
| 668 |
+
border-radius: 16px;
|
| 669 |
+
font-size: 12px;
|
| 670 |
+
font-weight: 700;
|
| 671 |
+
cursor: pointer;
|
| 672 |
+
transition: all 0.25s ease;
|
| 673 |
+
}
|
| 674 |
+
|
| 675 |
+
.eye-gaze-toggle.off {
|
| 676 |
+
border: 1px solid #d0d0d0;
|
| 677 |
+
background: #f5f5f5;
|
| 678 |
+
color: #888;
|
| 679 |
+
}
|
| 680 |
+
|
| 681 |
+
.eye-gaze-toggle.off:hover {
|
| 682 |
+
border-color: #007BFF;
|
| 683 |
+
color: #007BFF;
|
| 684 |
+
background: #f0f7ff;
|
| 685 |
+
}
|
| 686 |
+
|
| 687 |
+
.eye-gaze-toggle.on {
|
| 688 |
+
border: 2px solid #007BFF;
|
| 689 |
+
background: #007BFF;
|
| 690 |
+
color: #fff;
|
| 691 |
+
box-shadow: 0 2px 8px rgba(0, 123, 255, 0.25);
|
| 692 |
+
}
|
| 693 |
+
|
| 694 |
+
.eye-gaze-toggle.on:hover {
|
| 695 |
+
background: #0069d9;
|
| 696 |
+
border-color: #0069d9;
|
| 697 |
+
}
|
| 698 |
+
|
| 699 |
+
.eye-gaze-icon {
|
| 700 |
+
flex-shrink: 0;
|
| 701 |
+
}
|
| 702 |
+
|
| 703 |
+
.focus-model-button.recalibrate {
|
| 704 |
+
border: 1px solid #007BFF;
|
| 705 |
+
background: transparent;
|
| 706 |
+
color: #007BFF;
|
| 707 |
+
font-weight: 600;
|
| 708 |
+
font-size: 11px;
|
| 709 |
+
}
|
| 710 |
+
|
| 711 |
+
.focus-model-button.recalibrate:hover {
|
| 712 |
+
background: #f0f7ff;
|
| 713 |
+
}
|
| 714 |
+
|
| 715 |
+
.focus-system-stats {
|
| 716 |
+
display: flex;
|
| 717 |
+
align-items: center;
|
| 718 |
+
justify-content: center;
|
| 719 |
+
gap: 12px;
|
| 720 |
+
padding: 4px 16px;
|
| 721 |
+
margin: 4px auto;
|
| 722 |
+
max-width: 400px;
|
| 723 |
+
font-size: 12px;
|
| 724 |
+
color: #888;
|
| 725 |
+
}
|
| 726 |
+
|
| 727 |
+
.focus-system-stats strong {
|
| 728 |
+
color: #555;
|
| 729 |
+
}
|
| 730 |
+
|
| 731 |
+
.focus-system-stats-sep {
|
| 732 |
+
width: 1px;
|
| 733 |
+
height: 12px;
|
| 734 |
+
background: #ccc;
|
| 735 |
+
}
|
| 736 |
+
|
| 737 |
#display-area video {
|
| 738 |
width: 100%;
|
| 739 |
height: 100%;
|
|
|
|
| 790 |
}
|
| 791 |
|
| 792 |
.action-btn.green { background-color: #28a745; }
|
| 793 |
+
.action-btn.blue { background-color: #007BFF; }
|
| 794 |
+
.action-btn.orange { background-color: #e67e22; }
|
| 795 |
.action-btn.red { background-color: #dc3545; }
|
| 796 |
|
| 797 |
/* 4. Frame Control */
|
|
|
|
| 1792 |
|
| 1793 |
.avatar-circle.user { background-color: #555; }
|
| 1794 |
.avatar-circle.admin { background-color: #ffaa00; border-color: #fff; box-shadow: 0 0 10px rgba(255, 170, 0, 0.5); }
|
| 1795 |
+
|
| 1796 |
+
/* ================ CALIBRATION OVERLAY ================ */
|
| 1797 |
+
|
| 1798 |
+
.cal-overlay {
|
| 1799 |
+
position: fixed;
|
| 1800 |
+
top: 0;
|
| 1801 |
+
left: 0;
|
| 1802 |
+
width: 100vw;
|
| 1803 |
+
height: 100vh;
|
| 1804 |
+
background: rgba(8, 15, 28, 0.94);
|
| 1805 |
+
backdrop-filter: blur(6px);
|
| 1806 |
+
z-index: 10000;
|
| 1807 |
+
display: flex;
|
| 1808 |
+
align-items: center;
|
| 1809 |
+
justify-content: center;
|
| 1810 |
+
font-family: 'Nunito', sans-serif;
|
| 1811 |
+
}
|
| 1812 |
+
|
| 1813 |
+
/* ---- header / instructions ---- */
|
| 1814 |
+
.cal-header {
|
| 1815 |
+
position: absolute;
|
| 1816 |
+
top: 36px;
|
| 1817 |
+
left: 50%;
|
| 1818 |
+
transform: translateX(-50%);
|
| 1819 |
+
text-align: center;
|
| 1820 |
+
pointer-events: none;
|
| 1821 |
+
}
|
| 1822 |
+
|
| 1823 |
+
.cal-eyebrow {
|
| 1824 |
+
display: inline-block;
|
| 1825 |
+
padding: 6px 14px;
|
| 1826 |
+
border-radius: 999px;
|
| 1827 |
+
font-size: 0.82rem;
|
| 1828 |
+
font-weight: 800;
|
| 1829 |
+
letter-spacing: 0.04em;
|
| 1830 |
+
text-transform: uppercase;
|
| 1831 |
+
}
|
| 1832 |
+
|
| 1833 |
+
.cal-eyebrow-collect {
|
| 1834 |
+
background: rgba(40, 167, 69, 0.18);
|
| 1835 |
+
color: #5ee882;
|
| 1836 |
+
}
|
| 1837 |
+
|
| 1838 |
+
.cal-eyebrow-verify {
|
| 1839 |
+
background: rgba(0, 123, 255, 0.18);
|
| 1840 |
+
color: #6bb8ff;
|
| 1841 |
+
}
|
| 1842 |
+
|
| 1843 |
+
.cal-instruction {
|
| 1844 |
+
margin: 10px 0 0;
|
| 1845 |
+
color: rgba(255, 255, 255, 0.7);
|
| 1846 |
+
font-size: 0.95rem;
|
| 1847 |
+
line-height: 1.5;
|
| 1848 |
+
}
|
| 1849 |
+
|
| 1850 |
+
/* ---- target dot + ring ---- */
|
| 1851 |
+
.cal-target {
|
| 1852 |
+
position: absolute;
|
| 1853 |
+
transform: translate(-50%, -50%);
|
| 1854 |
+
}
|
| 1855 |
+
|
| 1856 |
+
.cal-ring {
|
| 1857 |
+
position: absolute;
|
| 1858 |
+
left: -30px;
|
| 1859 |
+
top: -30px;
|
| 1860 |
+
}
|
| 1861 |
+
|
| 1862 |
+
.cal-dot {
|
| 1863 |
+
width: 20px;
|
| 1864 |
+
height: 20px;
|
| 1865 |
+
border-radius: 50%;
|
| 1866 |
+
transition: box-shadow 0.3s ease;
|
| 1867 |
+
}
|
| 1868 |
+
|
| 1869 |
+
/* ---- cancel button (matches focus-flow-secondary) ---- */
|
| 1870 |
+
.cal-cancel {
|
| 1871 |
+
position: absolute;
|
| 1872 |
+
bottom: 40px;
|
| 1873 |
+
left: 50%;
|
| 1874 |
+
transform: translateX(-50%);
|
| 1875 |
+
border: 1px solid rgba(255, 255, 255, 0.25);
|
| 1876 |
+
border-radius: 999px;
|
| 1877 |
+
padding: 12px 28px;
|
| 1878 |
+
background: rgba(255, 255, 255, 0.08);
|
| 1879 |
+
color: rgba(255, 255, 255, 0.85);
|
| 1880 |
+
font-family: 'Nunito', sans-serif;
|
| 1881 |
+
font-size: 0.95rem;
|
| 1882 |
+
font-weight: 700;
|
| 1883 |
+
cursor: pointer;
|
| 1884 |
+
transition: background 0.2s ease, border-color 0.2s ease;
|
| 1885 |
+
}
|
| 1886 |
+
|
| 1887 |
+
.cal-cancel:hover {
|
| 1888 |
+
background: rgba(255, 255, 255, 0.14);
|
| 1889 |
+
border-color: rgba(255, 255, 255, 0.4);
|
| 1890 |
+
}
|
| 1891 |
+
|
| 1892 |
+
/* ---- done card (matches focus-flow-card style) ---- */
|
| 1893 |
+
.cal-done-card {
|
| 1894 |
+
text-align: center;
|
| 1895 |
+
padding: 36px 44px;
|
| 1896 |
+
border-radius: 20px;
|
| 1897 |
+
border: 1px solid rgba(255, 255, 255, 0.08);
|
| 1898 |
+
box-shadow: 0 28px 80px rgba(0, 0, 0, 0.4);
|
| 1899 |
+
animation: fadeIn 0.4s ease;
|
| 1900 |
+
}
|
| 1901 |
+
|
| 1902 |
+
.cal-done-success {
|
| 1903 |
+
background: linear-gradient(168deg, rgba(40, 167, 69, 0.15) 0%, rgba(20, 30, 48, 0.95) 60%);
|
| 1904 |
+
border-color: rgba(40, 167, 69, 0.3);
|
| 1905 |
+
}
|
| 1906 |
+
|
| 1907 |
+
.cal-done-fail {
|
| 1908 |
+
background: linear-gradient(168deg, rgba(220, 53, 69, 0.15) 0%, rgba(20, 30, 48, 0.95) 60%);
|
| 1909 |
+
border-color: rgba(220, 53, 69, 0.3);
|
| 1910 |
+
}
|
| 1911 |
+
|
| 1912 |
+
.cal-done-eyebrow {
|
| 1913 |
+
display: inline-block;
|
| 1914 |
+
padding: 6px 14px;
|
| 1915 |
+
border-radius: 999px;
|
| 1916 |
+
font-size: 0.78rem;
|
| 1917 |
+
font-weight: 800;
|
| 1918 |
+
letter-spacing: 0.06em;
|
| 1919 |
+
text-transform: uppercase;
|
| 1920 |
+
margin-bottom: 14px;
|
| 1921 |
+
}
|
| 1922 |
+
|
| 1923 |
+
.cal-done-success .cal-done-eyebrow {
|
| 1924 |
+
background: rgba(40, 167, 69, 0.2);
|
| 1925 |
+
color: #5ee882;
|
| 1926 |
+
}
|
| 1927 |
+
|
| 1928 |
+
.cal-done-fail .cal-done-eyebrow {
|
| 1929 |
+
background: rgba(220, 53, 69, 0.2);
|
| 1930 |
+
color: #f87171;
|
| 1931 |
+
}
|
| 1932 |
+
|
| 1933 |
+
.cal-done-title {
|
| 1934 |
+
margin: 0 0 8px;
|
| 1935 |
+
font-size: 1.6rem;
|
| 1936 |
+
color: #fff;
|
| 1937 |
+
}
|
| 1938 |
+
|
| 1939 |
+
.cal-done-subtitle {
|
| 1940 |
+
margin: 0;
|
| 1941 |
+
color: rgba(255, 255, 255, 0.6);
|
| 1942 |
+
font-size: 0.95rem;
|
| 1943 |
+
line-height: 1.5;
|
| 1944 |
+
}
|
| 1945 |
/* ================= Home page 2x2 responsive button grid ================= */
|
| 1946 |
.home-button-grid {
|
| 1947 |
+
display: flex;
|
| 1948 |
+
justify-content: center;
|
|
|
|
| 1949 |
width: 100%;
|
| 1950 |
+
max-width: 360px;
|
| 1951 |
+
margin: 40px auto 0;
|
| 1952 |
}
|
| 1953 |
|
| 1954 |
.home-button-grid .btn-main {
|
src/App.jsx
CHANGED
|
@@ -14,12 +14,8 @@ function App() {
|
|
| 14 |
const videoManagerRef = useRef(null);
|
| 15 |
const [isSessionActive, setIsSessionActive] = useState(false);
|
| 16 |
const [sessionResult, setSessionResult] = useState(null);
|
| 17 |
-
const [role, setRole] = useState('user');
|
| 18 |
|
| 19 |
-
// Clear persisted history on the initial page load without showing a prompt.
|
| 20 |
useEffect(() => {
|
| 21 |
-
fetch('/api/history', { method: 'DELETE' }).catch(err => console.error(err));
|
| 22 |
-
|
| 23 |
const callbacks = {
|
| 24 |
onSessionStart: () => {
|
| 25 |
setIsSessionActive(true);
|
|
@@ -37,54 +33,52 @@ function App() {
|
|
| 37 |
};
|
| 38 |
}, []);
|
| 39 |
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
|
| 45 |
return (
|
| 46 |
<div className="app-container">
|
| 47 |
<nav id="top-menu">
|
| 48 |
-
<div className="
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
|
|
|
|
|
|
|
|
|
| 54 |
<button className={`menu-btn ${activeTab === 'focus' ? 'active' : ''}`} onClick={() => setActiveTab('focus')}>
|
| 55 |
Start Focus {isSessionActive && <span style={{ marginLeft: '8px', color: '#00FF00' }}>●</span>}
|
| 56 |
</button>
|
| 57 |
<div className="separator"></div>
|
| 58 |
|
| 59 |
-
|
| 60 |
-
My Achievement
|
| 61 |
-
</button>
|
| 62 |
<div className="separator"></div>
|
| 63 |
|
| 64 |
-
|
| 65 |
-
My Records
|
| 66 |
-
</button>
|
| 67 |
<div className="separator"></div>
|
| 68 |
|
| 69 |
-
|
| 70 |
-
Customise
|
| 71 |
-
</button>
|
| 72 |
<div className="separator"></div>
|
| 73 |
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
</button>
|
| 77 |
</nav>
|
| 78 |
|
| 79 |
-
{
|
| 80 |
-
|
| 81 |
-
|
| 82 |
<FocusPageLocal
|
| 83 |
videoManager={videoManagerRef.current}
|
| 84 |
sessionResult={sessionResult}
|
| 85 |
setSessionResult={setSessionResult}
|
| 86 |
isActive={activeTab === 'focus'}
|
| 87 |
-
role={role}
|
| 88 |
/>
|
| 89 |
{activeTab === 'achievement' && <Achievement />}
|
| 90 |
{activeTab === 'records' && <Records />}
|
|
|
|
| 14 |
const videoManagerRef = useRef(null);
|
| 15 |
const [isSessionActive, setIsSessionActive] = useState(false);
|
| 16 |
const [sessionResult, setSessionResult] = useState(null);
|
|
|
|
| 17 |
|
|
|
|
| 18 |
useEffect(() => {
|
|
|
|
|
|
|
| 19 |
const callbacks = {
|
| 20 |
onSessionStart: () => {
|
| 21 |
setIsSessionActive(true);
|
|
|
|
| 33 |
};
|
| 34 |
}, []);
|
| 35 |
|
| 36 |
+
const renderMenuButton = (tabId, label) => (
|
| 37 |
+
<button
|
| 38 |
+
className={`menu-btn ${activeTab === tabId ? 'active' : ''}`}
|
| 39 |
+
onClick={() => setActiveTab(tabId)}
|
| 40 |
+
>
|
| 41 |
+
{label}
|
| 42 |
+
</button>
|
| 43 |
+
);
|
| 44 |
|
| 45 |
return (
|
| 46 |
<div className="app-container">
|
| 47 |
<nav id="top-menu">
|
| 48 |
+
<div className="top-menu-links">
|
| 49 |
+
<button
|
| 50 |
+
type="button"
|
| 51 |
+
className={`menu-btn ${activeTab === 'home' ? 'active' : ''}`}
|
| 52 |
+
onClick={() => setActiveTab('home')}
|
| 53 |
+
>
|
| 54 |
+
Home
|
| 55 |
+
</button>
|
| 56 |
+
<div className="separator" aria-hidden />
|
| 57 |
<button className={`menu-btn ${activeTab === 'focus' ? 'active' : ''}`} onClick={() => setActiveTab('focus')}>
|
| 58 |
Start Focus {isSessionActive && <span style={{ marginLeft: '8px', color: '#00FF00' }}>●</span>}
|
| 59 |
</button>
|
| 60 |
<div className="separator"></div>
|
| 61 |
|
| 62 |
+
{renderMenuButton('achievement', 'My Achievement')}
|
|
|
|
|
|
|
| 63 |
<div className="separator"></div>
|
| 64 |
|
| 65 |
+
{renderMenuButton('records', 'My Records')}
|
|
|
|
|
|
|
| 66 |
<div className="separator"></div>
|
| 67 |
|
| 68 |
+
{renderMenuButton('customise', 'Customise')}
|
|
|
|
|
|
|
| 69 |
<div className="separator"></div>
|
| 70 |
|
| 71 |
+
{renderMenuButton('help', 'Help')}
|
| 72 |
+
</div>
|
|
|
|
| 73 |
</nav>
|
| 74 |
|
| 75 |
+
{activeTab === 'home' && <Home setActiveTab={setActiveTab} />}
|
| 76 |
+
|
|
|
|
| 77 |
<FocusPageLocal
|
| 78 |
videoManager={videoManagerRef.current}
|
| 79 |
sessionResult={sessionResult}
|
| 80 |
setSessionResult={setSessionResult}
|
| 81 |
isActive={activeTab === 'focus'}
|
|
|
|
| 82 |
/>
|
| 83 |
{activeTab === 'achievement' && <Achievement />}
|
| 84 |
{activeTab === 'records' && <Records />}
|
src/components/CalibrationOverlay.jsx
CHANGED
|
@@ -1,7 +1,8 @@
|
|
| 1 |
import React, { useState, useEffect, useRef, useCallback } from 'react';
|
| 2 |
|
| 3 |
const COLLECT_MS = 2000;
|
| 4 |
-
const CENTER_MS = 3000;
|
|
|
|
| 5 |
|
| 6 |
function CalibrationOverlay({ calibration, videoManager }) {
|
| 7 |
const [progress, setProgress] = useState(0);
|
|
@@ -42,7 +43,7 @@ function CalibrationOverlay({ calibration, videoManager }) {
|
|
| 42 |
}
|
| 43 |
|
| 44 |
startRef.current = performance.now();
|
| 45 |
-
const duration = calibration.index === 0 ? CENTER_MS : COLLECT_MS;
|
| 46 |
|
| 47 |
const tick = () => {
|
| 48 |
const pct = Math.min((performance.now() - startRef.current) / duration, 1);
|
|
@@ -67,14 +68,18 @@ function CalibrationOverlay({ calibration, videoManager }) {
|
|
| 67 |
if (!calibration || !calibration.active) return null;
|
| 68 |
|
| 69 |
if (calibration.done) {
|
|
|
|
| 70 |
return (
|
| 71 |
-
<div ref={overlayRef}
|
| 72 |
-
<div
|
| 73 |
-
<
|
| 74 |
-
{
|
|
|
|
|
|
|
|
|
|
| 75 |
</h2>
|
| 76 |
-
<p
|
| 77 |
-
{
|
| 78 |
? 'Gaze tracking is now active.'
|
| 79 |
: 'Not enough samples collected. Try again.'}
|
| 80 |
</p>
|
|
@@ -84,63 +89,60 @@ function CalibrationOverlay({ calibration, videoManager }) {
|
|
| 84 |
}
|
| 85 |
|
| 86 |
const [tx, ty] = calibration.target || [0.5, 0.5];
|
|
|
|
|
|
|
|
|
|
| 87 |
|
| 88 |
return (
|
| 89 |
-
<div ref={overlayRef}
|
| 90 |
-
<div
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 103 |
</div>
|
| 104 |
|
| 105 |
-
<div
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
<svg width="60" height="60"
|
| 110 |
-
<circle cx="30" cy="30" r="24" fill="none" stroke="rgba(255,255,255,0.
|
| 111 |
-
<circle
|
|
|
|
| 112 |
strokeDasharray={`${progress * 150.8} 150.8`} strokeLinecap="round"
|
| 113 |
-
transform="rotate(-90, 30, 30)"
|
|
|
|
| 114 |
</svg>
|
| 115 |
-
<div
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
|
|
|
|
|
|
| 120 |
</div>
|
| 121 |
|
| 122 |
-
<button onClick={handleCancel}
|
| 123 |
-
position: 'absolute', bottom: '40px', left: '50%', transform: 'translateX(-50%)',
|
| 124 |
-
padding: '10px 28px', background: 'rgba(255,255,255,0.1)',
|
| 125 |
-
border: '1px solid rgba(255,255,255,0.3)', color: '#fff',
|
| 126 |
-
borderRadius: '20px', cursor: 'pointer', fontSize: '14px',
|
| 127 |
-
}}>
|
| 128 |
Cancel Calibration
|
| 129 |
</button>
|
| 130 |
</div>
|
| 131 |
);
|
| 132 |
}
|
| 133 |
|
| 134 |
-
const overlayStyle = {
|
| 135 |
-
position: 'fixed', top: 0, left: 0, width: '100vw', height: '100vh',
|
| 136 |
-
background: 'rgba(0, 0, 0, 0.92)', zIndex: 10000,
|
| 137 |
-
display: 'flex', alignItems: 'center', justifyContent: 'center',
|
| 138 |
-
};
|
| 139 |
-
|
| 140 |
-
const messageBoxStyle = {
|
| 141 |
-
textAlign: 'center', padding: '30px 40px',
|
| 142 |
-
background: 'rgba(30, 30, 50, 0.9)', borderRadius: '16px',
|
| 143 |
-
border: '1px solid rgba(255,255,255,0.1)',
|
| 144 |
-
};
|
| 145 |
-
|
| 146 |
export default CalibrationOverlay;
|
|
|
|
| 1 |
import React, { useState, useEffect, useRef, useCallback } from 'react';
|
| 2 |
|
| 3 |
const COLLECT_MS = 2000;
|
| 4 |
+
const CENTER_MS = 3000;
|
| 5 |
+
const VERIFY_MS = 3000;
|
| 6 |
|
| 7 |
function CalibrationOverlay({ calibration, videoManager }) {
|
| 8 |
const [progress, setProgress] = useState(0);
|
|
|
|
| 43 |
}
|
| 44 |
|
| 45 |
startRef.current = performance.now();
|
| 46 |
+
const duration = calibration.verifying ? VERIFY_MS : (calibration.index === 0 ? CENTER_MS : COLLECT_MS);
|
| 47 |
|
| 48 |
const tick = () => {
|
| 49 |
const pct = Math.min((performance.now() - startRef.current) / duration, 1);
|
|
|
|
| 68 |
if (!calibration || !calibration.active) return null;
|
| 69 |
|
| 70 |
if (calibration.done) {
|
| 71 |
+
const success = calibration.success;
|
| 72 |
return (
|
| 73 |
+
<div ref={overlayRef} className="cal-overlay">
|
| 74 |
+
<div className={`cal-done-card ${success ? 'cal-done-success' : 'cal-done-fail'}`}>
|
| 75 |
+
<div className="cal-done-eyebrow">
|
| 76 |
+
{success ? 'Complete' : 'Failed'}
|
| 77 |
+
</div>
|
| 78 |
+
<h2 className="cal-done-title">
|
| 79 |
+
{success ? 'Calibration Complete' : 'Calibration Failed'}
|
| 80 |
</h2>
|
| 81 |
+
<p className="cal-done-subtitle">
|
| 82 |
+
{success
|
| 83 |
? 'Gaze tracking is now active.'
|
| 84 |
: 'Not enough samples collected. Try again.'}
|
| 85 |
</p>
|
|
|
|
| 89 |
}
|
| 90 |
|
| 91 |
const [tx, ty] = calibration.target || [0.5, 0.5];
|
| 92 |
+
const isVerifying = calibration.verifying;
|
| 93 |
+
const accent = isVerifying ? '#007BFF' : '#28a745';
|
| 94 |
+
const glow = isVerifying ? 'rgba(0, 123, 255, 0.6)' : 'rgba(40, 167, 69, 0.6)';
|
| 95 |
|
| 96 |
return (
|
| 97 |
+
<div ref={overlayRef} className="cal-overlay">
|
| 98 |
+
<div className="cal-header">
|
| 99 |
+
{isVerifying ? (
|
| 100 |
+
<>
|
| 101 |
+
<span className="cal-eyebrow cal-eyebrow-verify">Verification</span>
|
| 102 |
+
<p className="cal-instruction">
|
| 103 |
+
Look at the dot to confirm calibration accuracy
|
| 104 |
+
</p>
|
| 105 |
+
</>
|
| 106 |
+
) : (
|
| 107 |
+
<>
|
| 108 |
+
<span className="cal-eyebrow cal-eyebrow-collect">
|
| 109 |
+
Point {calibration.index + 1} of {calibration.numPoints}
|
| 110 |
+
</span>
|
| 111 |
+
<p className="cal-instruction">
|
| 112 |
+
{calibration.index === 0
|
| 113 |
+
? 'Look at the center dot \u2014 this sets your baseline'
|
| 114 |
+
: 'Hold your gaze steady on the target'}
|
| 115 |
+
</p>
|
| 116 |
+
</>
|
| 117 |
+
)}
|
| 118 |
</div>
|
| 119 |
|
| 120 |
+
<div
|
| 121 |
+
className="cal-target"
|
| 122 |
+
style={{ left: `${tx * 100}%`, top: `${ty * 100}%` }}
|
| 123 |
+
>
|
| 124 |
+
<svg width="60" height="60" className="cal-ring">
|
| 125 |
+
<circle cx="30" cy="30" r="24" fill="none" stroke="rgba(255,255,255,0.12)" strokeWidth="3" />
|
| 126 |
+
<circle
|
| 127 |
+
cx="30" cy="30" r="24" fill="none" stroke={accent} strokeWidth="3"
|
| 128 |
strokeDasharray={`${progress * 150.8} 150.8`} strokeLinecap="round"
|
| 129 |
+
transform="rotate(-90, 30, 30)"
|
| 130 |
+
/>
|
| 131 |
</svg>
|
| 132 |
+
<div
|
| 133 |
+
className="cal-dot"
|
| 134 |
+
style={{
|
| 135 |
+
background: `radial-gradient(circle, #fff 30%, ${accent} 100%)`,
|
| 136 |
+
boxShadow: `0 0 24px ${glow}`,
|
| 137 |
+
}}
|
| 138 |
+
/>
|
| 139 |
</div>
|
| 140 |
|
| 141 |
+
<button onClick={handleCancel} className="cal-cancel">
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 142 |
Cancel Calibration
|
| 143 |
</button>
|
| 144 |
</div>
|
| 145 |
);
|
| 146 |
}
|
| 147 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 148 |
export default CalibrationOverlay;
|
src/components/Customise.jsx
CHANGED
|
@@ -1,51 +1,9 @@
|
|
| 1 |
-
import React, {
|
| 2 |
|
| 3 |
function Customise() {
|
| 4 |
-
const [sensitivity, setSensitivity] = useState(6);
|
| 5 |
-
const [frameRate, setFrameRate] = useState(30);
|
| 6 |
-
const [notificationsEnabled, setNotificationsEnabled] = useState(true);
|
| 7 |
-
const [threshold, setThreshold] = useState(30);
|
| 8 |
-
|
| 9 |
// Reference to the hidden import input.
|
| 10 |
const fileInputRef = useRef(null);
|
| 11 |
|
| 12 |
-
// 1. Load persisted settings.
|
| 13 |
-
useEffect(() => {
|
| 14 |
-
fetch('/api/settings')
|
| 15 |
-
.then(res => res.json())
|
| 16 |
-
.then(data => {
|
| 17 |
-
if (data) {
|
| 18 |
-
if (data.sensitivity) setSensitivity(data.sensitivity);
|
| 19 |
-
if (data.frame_rate) setFrameRate(data.frame_rate);
|
| 20 |
-
if (data.notification_threshold) setThreshold(data.notification_threshold);
|
| 21 |
-
if (data.notification_enabled !== undefined) setNotificationsEnabled(data.notification_enabled);
|
| 22 |
-
}
|
| 23 |
-
})
|
| 24 |
-
.catch(err => console.error("Failed to load settings", err));
|
| 25 |
-
}, []);
|
| 26 |
-
|
| 27 |
-
// 2. Save settings.
|
| 28 |
-
const handleSave = async () => {
|
| 29 |
-
const settings = {
|
| 30 |
-
sensitivity: parseInt(sensitivity),
|
| 31 |
-
frame_rate: parseInt(frameRate),
|
| 32 |
-
notification_enabled: notificationsEnabled,
|
| 33 |
-
notification_threshold: parseInt(threshold)
|
| 34 |
-
};
|
| 35 |
-
|
| 36 |
-
try {
|
| 37 |
-
const response = await fetch('/api/settings', {
|
| 38 |
-
method: 'PUT',
|
| 39 |
-
headers: { 'Content-Type': 'application/json' },
|
| 40 |
-
body: JSON.stringify(settings)
|
| 41 |
-
});
|
| 42 |
-
if (response.ok) alert("Settings saved successfully!");
|
| 43 |
-
else alert("Failed to save settings.");
|
| 44 |
-
} catch (error) {
|
| 45 |
-
alert("Error saving settings: " + error.message);
|
| 46 |
-
}
|
| 47 |
-
};
|
| 48 |
-
|
| 49 |
// 3. Export data.
|
| 50 |
const handleExport = async () => {
|
| 51 |
try {
|
|
@@ -148,41 +106,6 @@ function Customise() {
|
|
| 148 |
<h1 className="page-title">Customise</h1>
|
| 149 |
|
| 150 |
<div className="settings-container">
|
| 151 |
-
{/* Detection Settings */}
|
| 152 |
-
<div className="setting-group">
|
| 153 |
-
<h2>Detection Settings</h2>
|
| 154 |
-
<div className="setting-item">
|
| 155 |
-
<label htmlFor="sensitivity-slider">Detection Sensitivity</label>
|
| 156 |
-
<div className="slider-group">
|
| 157 |
-
<input type="range" id="sensitivity-slider" min="1" max="10" value={sensitivity} onChange={(e) => setSensitivity(e.target.value)} />
|
| 158 |
-
<span id="sensitivity-value">{sensitivity}</span>
|
| 159 |
-
</div>
|
| 160 |
-
<p className="setting-description">Higher values require stricter focus criteria</p>
|
| 161 |
-
</div>
|
| 162 |
-
<div className="setting-item">
|
| 163 |
-
<label htmlFor="default-framerate">Default Frame Rate</label>
|
| 164 |
-
<div className="slider-group">
|
| 165 |
-
<input type="range" id="default-framerate" min="5" max="60" value={frameRate} onChange={(e) => setFrameRate(e.target.value)} />
|
| 166 |
-
<span id="framerate-value">{frameRate}</span> FPS
|
| 167 |
-
</div>
|
| 168 |
-
</div>
|
| 169 |
-
</div>
|
| 170 |
-
|
| 171 |
-
{/* Notifications */}
|
| 172 |
-
<div className="setting-group">
|
| 173 |
-
<h2>Notifications</h2>
|
| 174 |
-
<div className="setting-item">
|
| 175 |
-
<label>
|
| 176 |
-
<input type="checkbox" id="enable-notifications" checked={notificationsEnabled} onChange={(e) => setNotificationsEnabled(e.target.checked)} />
|
| 177 |
-
Enable distraction notifications
|
| 178 |
-
</label>
|
| 179 |
-
</div>
|
| 180 |
-
<div className="setting-item">
|
| 181 |
-
<label htmlFor="notification-threshold">Alert after (seconds)</label>
|
| 182 |
-
<input type="number" id="notification-threshold" value={threshold} onChange={(e) => setThreshold(e.target.value)} min="5" max="300" />
|
| 183 |
-
</div>
|
| 184 |
-
</div>
|
| 185 |
-
|
| 186 |
{/* Data Management */}
|
| 187 |
<div className="setting-group">
|
| 188 |
<h2>Data Management</h2>
|
|
@@ -214,7 +137,6 @@ function Customise() {
|
|
| 214 |
</div>
|
| 215 |
</div>
|
| 216 |
|
| 217 |
-
<button id="save-settings" className="btn-main" onClick={handleSave}>Save Settings</button>
|
| 218 |
</div>
|
| 219 |
</main>
|
| 220 |
);
|
|
|
|
| 1 |
+
import React, { useRef } from 'react';
|
| 2 |
|
| 3 |
function Customise() {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
// Reference to the hidden import input.
|
| 5 |
const fileInputRef = useRef(null);
|
| 6 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
// 3. Export data.
|
| 8 |
const handleExport = async () => {
|
| 9 |
try {
|
|
|
|
| 106 |
<h1 className="page-title">Customise</h1>
|
| 107 |
|
| 108 |
<div className="settings-container">
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 109 |
{/* Data Management */}
|
| 110 |
<div className="setting-group">
|
| 111 |
<h2>Data Management</h2>
|
|
|
|
| 137 |
</div>
|
| 138 |
</div>
|
| 139 |
|
|
|
|
| 140 |
</div>
|
| 141 |
</main>
|
| 142 |
);
|
src/components/FocusPage.jsx
DELETED
|
@@ -1,264 +0,0 @@
|
|
| 1 |
-
import React, { useState, useEffect } from 'react';
|
| 2 |
-
|
| 3 |
-
function FocusPage({ videoManager, sessionResult, setSessionResult, isActive, displayVideoRef }) {
|
| 4 |
-
const [currentFrame, setCurrentFrame] = useState(30);
|
| 5 |
-
const [timelineEvents, setTimelineEvents] = useState([]);
|
| 6 |
-
|
| 7 |
-
const videoRef = displayVideoRef;
|
| 8 |
-
|
| 9 |
-
// Helper for formatting a duration in seconds.
|
| 10 |
-
const formatDuration = (seconds) => {
|
| 11 |
-
// Show a compact zero state instead of "0m 0s".
|
| 12 |
-
if (seconds === 0) return "0s";
|
| 13 |
-
|
| 14 |
-
const mins = Math.floor(seconds / 60);
|
| 15 |
-
const secs = Math.floor(seconds % 60);
|
| 16 |
-
return `${mins}m ${secs}s`;
|
| 17 |
-
};
|
| 18 |
-
|
| 19 |
-
useEffect(() => {
|
| 20 |
-
if (!videoManager) return;
|
| 21 |
-
|
| 22 |
-
// Override the status callback so the timeline updates live.
|
| 23 |
-
const originalOnStatusUpdate = videoManager.callbacks.onStatusUpdate;
|
| 24 |
-
videoManager.callbacks.onStatusUpdate = (isFocused) => {
|
| 25 |
-
setTimelineEvents(prev => {
|
| 26 |
-
const newEvents = [...prev, { isFocused, timestamp: Date.now() }];
|
| 27 |
-
if (newEvents.length > 60) newEvents.shift();
|
| 28 |
-
return newEvents;
|
| 29 |
-
});
|
| 30 |
-
// Preserve the original callback if one was already registered.
|
| 31 |
-
if (originalOnStatusUpdate) originalOnStatusUpdate(isFocused);
|
| 32 |
-
};
|
| 33 |
-
|
| 34 |
-
// Cleanup only restores callbacks and does not force-stop the session.
|
| 35 |
-
return () => {
|
| 36 |
-
if (videoManager) {
|
| 37 |
-
videoManager.callbacks.onStatusUpdate = originalOnStatusUpdate;
|
| 38 |
-
}
|
| 39 |
-
};
|
| 40 |
-
}, [videoManager]);
|
| 41 |
-
|
| 42 |
-
const handleStart = async () => {
|
| 43 |
-
try {
|
| 44 |
-
if (videoManager) {
|
| 45 |
-
setSessionResult(null); // Clear any previous summary overlay before starting.
|
| 46 |
-
setTimelineEvents([]);
|
| 47 |
-
|
| 48 |
-
console.log('🎬 Initializing camera...');
|
| 49 |
-
await videoManager.initCamera(videoRef.current);
|
| 50 |
-
console.log('✅ Camera initialized');
|
| 51 |
-
|
| 52 |
-
console.log('🚀 Starting streaming...');
|
| 53 |
-
await videoManager.startStreaming();
|
| 54 |
-
console.log('✅ Streaming started successfully');
|
| 55 |
-
}
|
| 56 |
-
} catch (err) {
|
| 57 |
-
console.error('❌ Start error:', err);
|
| 58 |
-
let errorMessage = "Failed to start: ";
|
| 59 |
-
|
| 60 |
-
if (err.name === 'NotAllowedError') {
|
| 61 |
-
errorMessage += "Camera permission denied. Please allow camera access.";
|
| 62 |
-
} else if (err.name === 'NotFoundError') {
|
| 63 |
-
errorMessage += "No camera found. Please connect a camera.";
|
| 64 |
-
} else if (err.name === 'NotReadableError') {
|
| 65 |
-
errorMessage += "Camera is already in use by another application.";
|
| 66 |
-
} else if (err.message && err.message.includes('HTTPS')) {
|
| 67 |
-
errorMessage += "Camera requires HTTPS. Please use a secure connection.";
|
| 68 |
-
} else {
|
| 69 |
-
errorMessage += err.message || "Unknown error occurred.";
|
| 70 |
-
}
|
| 71 |
-
|
| 72 |
-
alert(errorMessage + "\n\nCheck browser console for details.");
|
| 73 |
-
}
|
| 74 |
-
};
|
| 75 |
-
|
| 76 |
-
const handleStop = () => {
|
| 77 |
-
if (videoManager) {
|
| 78 |
-
videoManager.stopStreaming();
|
| 79 |
-
}
|
| 80 |
-
};
|
| 81 |
-
|
| 82 |
-
const handlePiP = async () => {
|
| 83 |
-
try {
|
| 84 |
-
const sourceVideoEl = videoRef.current;
|
| 85 |
-
if (!sourceVideoEl) {
|
| 86 |
-
alert('Video not ready. Please click Start first.');
|
| 87 |
-
return;
|
| 88 |
-
}
|
| 89 |
-
|
| 90 |
-
if (document.pictureInPictureElement) {
|
| 91 |
-
await document.exitPictureInPicture();
|
| 92 |
-
return;
|
| 93 |
-
}
|
| 94 |
-
|
| 95 |
-
sourceVideoEl.disablePictureInPicture = false;
|
| 96 |
-
|
| 97 |
-
if (typeof sourceVideoEl.webkitSetPresentationMode === 'function') {
|
| 98 |
-
sourceVideoEl.play().catch(() => {});
|
| 99 |
-
sourceVideoEl.webkitSetPresentationMode('picture-in-picture');
|
| 100 |
-
return;
|
| 101 |
-
}
|
| 102 |
-
|
| 103 |
-
if (!document.pictureInPictureEnabled || typeof sourceVideoEl.requestPictureInPicture !== 'function') {
|
| 104 |
-
alert('Picture-in-Picture is not supported in this browser.');
|
| 105 |
-
return;
|
| 106 |
-
}
|
| 107 |
-
|
| 108 |
-
const pipPromise = sourceVideoEl.requestPictureInPicture();
|
| 109 |
-
sourceVideoEl.play().catch(() => {});
|
| 110 |
-
await pipPromise;
|
| 111 |
-
} catch (err) {
|
| 112 |
-
console.error('PiP error:', err);
|
| 113 |
-
alert('Failed to enter Picture-in-Picture.');
|
| 114 |
-
}
|
| 115 |
-
};
|
| 116 |
-
|
| 117 |
-
// Floating window helper.
|
| 118 |
-
const handleFloatingWindow = () => {
|
| 119 |
-
handlePiP();
|
| 120 |
-
};
|
| 121 |
-
|
| 122 |
-
// ==========================================
|
| 123 |
-
// Preview button handler
|
| 124 |
-
// ==========================================
|
| 125 |
-
const handlePreview = () => {
|
| 126 |
-
// Inject placeholder data so the overlay can be previewed on demand.
|
| 127 |
-
setSessionResult({
|
| 128 |
-
duration_seconds: 0,
|
| 129 |
-
focus_score: 0
|
| 130 |
-
});
|
| 131 |
-
};
|
| 132 |
-
|
| 133 |
-
const handleCloseOverlay = () => {
|
| 134 |
-
setSessionResult(null);
|
| 135 |
-
};
|
| 136 |
-
// ==========================================
|
| 137 |
-
|
| 138 |
-
const handleFrameChange = (val) => {
|
| 139 |
-
setCurrentFrame(val);
|
| 140 |
-
if (videoManager) {
|
| 141 |
-
videoManager.setFrameRate(val);
|
| 142 |
-
}
|
| 143 |
-
};
|
| 144 |
-
|
| 145 |
-
const pageStyle = isActive
|
| 146 |
-
? undefined
|
| 147 |
-
: {
|
| 148 |
-
position: 'absolute',
|
| 149 |
-
width: '1px',
|
| 150 |
-
height: '1px',
|
| 151 |
-
overflow: 'hidden',
|
| 152 |
-
opacity: 0,
|
| 153 |
-
pointerEvents: 'none'
|
| 154 |
-
};
|
| 155 |
-
|
| 156 |
-
return (
|
| 157 |
-
<main id="page-b" className="page" style={pageStyle}>
|
| 158 |
-
{/* 1. Camera / display area */}
|
| 159 |
-
<section id="display-area" style={{ position: 'relative', overflow: 'hidden' }}>
|
| 160 |
-
<video
|
| 161 |
-
ref={videoRef}
|
| 162 |
-
muted
|
| 163 |
-
playsInline
|
| 164 |
-
autoPlay
|
| 165 |
-
style={{ width: '100%', height: '100%', objectFit: 'contain' }}
|
| 166 |
-
/>
|
| 167 |
-
|
| 168 |
-
{/* Session result overlay */}
|
| 169 |
-
{sessionResult && (
|
| 170 |
-
<div className="session-result-overlay">
|
| 171 |
-
<h3>Session Complete!</h3>
|
| 172 |
-
<div className="result-item">
|
| 173 |
-
<span className="label">Duration:</span>
|
| 174 |
-
<span className="value">{formatDuration(sessionResult.duration_seconds)}</span>
|
| 175 |
-
</div>
|
| 176 |
-
<div className="result-item">
|
| 177 |
-
<span className="label">Focus Score:</span>
|
| 178 |
-
<span className="value">{(sessionResult.focus_score * 100).toFixed(1)}%</span>
|
| 179 |
-
</div>
|
| 180 |
-
|
| 181 |
-
{/* Add a lightweight close button for preview mode. */}
|
| 182 |
-
<button
|
| 183 |
-
onClick={handleCloseOverlay}
|
| 184 |
-
style={{
|
| 185 |
-
marginTop: '20px',
|
| 186 |
-
padding: '8px 20px',
|
| 187 |
-
background: 'transparent',
|
| 188 |
-
border: '1px solid white',
|
| 189 |
-
color: 'white',
|
| 190 |
-
borderRadius: '20px',
|
| 191 |
-
cursor: 'pointer'
|
| 192 |
-
}}
|
| 193 |
-
>
|
| 194 |
-
Close
|
| 195 |
-
</button>
|
| 196 |
-
</div>
|
| 197 |
-
)}
|
| 198 |
-
|
| 199 |
-
</section>
|
| 200 |
-
|
| 201 |
-
{/* 2. Timeline area */}
|
| 202 |
-
<section id="timeline-area">
|
| 203 |
-
<div className="timeline-label">Timeline</div>
|
| 204 |
-
<div id="timeline-visuals">
|
| 205 |
-
{timelineEvents.map((event, index) => (
|
| 206 |
-
<div
|
| 207 |
-
key={index}
|
| 208 |
-
className="timeline-block"
|
| 209 |
-
style={{
|
| 210 |
-
backgroundColor: event.isFocused ? '#00FF00' : '#FF0000',
|
| 211 |
-
width: '10px',
|
| 212 |
-
height: '20px',
|
| 213 |
-
display: 'inline-block',
|
| 214 |
-
marginRight: '2px',
|
| 215 |
-
borderRadius: '2px'
|
| 216 |
-
}}
|
| 217 |
-
title={event.isFocused ? 'Focused' : 'Distracted'}
|
| 218 |
-
/>
|
| 219 |
-
))}
|
| 220 |
-
</div>
|
| 221 |
-
<div id="timeline-line"></div>
|
| 222 |
-
</section>
|
| 223 |
-
|
| 224 |
-
{/* 3. Control buttons */}
|
| 225 |
-
<section id="control-panel">
|
| 226 |
-
<button id="btn-cam-start" className="action-btn green" onClick={handleStart}>Start</button>
|
| 227 |
-
<button id="btn-floating" className="action-btn yellow" onClick={handleFloatingWindow}>Floating Window</button>
|
| 228 |
-
|
| 229 |
-
{/* Temporarily repurpose the Models button as a preview action. */}
|
| 230 |
-
<button
|
| 231 |
-
id="btn-preview"
|
| 232 |
-
className="action-btn"
|
| 233 |
-
style={{ backgroundColor: '#6c5ce7' }} // Use purple so the preview action stands out.
|
| 234 |
-
onClick={handlePreview}
|
| 235 |
-
>
|
| 236 |
-
Preview Result
|
| 237 |
-
</button>
|
| 238 |
-
|
| 239 |
-
<button id="btn-cam-stop" className="action-btn red" onClick={handleStop}>Stop</button>
|
| 240 |
-
</section>
|
| 241 |
-
|
| 242 |
-
{/* 4. Frame control */}
|
| 243 |
-
<section id="frame-control">
|
| 244 |
-
<label htmlFor="frame-slider">Frame</label>
|
| 245 |
-
<input
|
| 246 |
-
type="range"
|
| 247 |
-
id="frame-slider"
|
| 248 |
-
min="1"
|
| 249 |
-
max="60"
|
| 250 |
-
value={currentFrame}
|
| 251 |
-
onChange={(e) => handleFrameChange(e.target.value)}
|
| 252 |
-
/>
|
| 253 |
-
<input
|
| 254 |
-
type="number"
|
| 255 |
-
id="frame-input"
|
| 256 |
-
value={currentFrame}
|
| 257 |
-
onChange={(e) => handleFrameChange(e.target.value)}
|
| 258 |
-
/>
|
| 259 |
-
</section>
|
| 260 |
-
</main>
|
| 261 |
-
);
|
| 262 |
-
}
|
| 263 |
-
|
| 264 |
-
export default FocusPage;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/components/FocusPageLocal.jsx
CHANGED
|
@@ -37,7 +37,64 @@ function CameraIcon() {
|
|
| 37 |
);
|
| 38 |
}
|
| 39 |
|
| 40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
const [currentFrame, setCurrentFrame] = useState(15);
|
| 42 |
const [timelineEvents, setTimelineEvents] = useState([]);
|
| 43 |
const [stats, setStats] = useState(null);
|
|
@@ -131,6 +188,8 @@ function FocusPageLocal({ videoManager, sessionResult, setSessionResult, isActiv
|
|
| 131 |
setFocusState(FOCUS_STATES.pending);
|
| 132 |
setCameraReady(false);
|
| 133 |
if (originalOnSessionEnd) originalOnSessionEnd(summary);
|
|
|
|
|
|
|
| 134 |
videoManager.callbacks.onCalibrationUpdate = (cal) => {
|
| 135 |
setCalibration(cal && cal.active ? { ...cal } : null);
|
| 136 |
};
|
|
@@ -144,6 +203,7 @@ function FocusPageLocal({ videoManager, sessionResult, setSessionResult, isActiv
|
|
| 144 |
return () => {
|
| 145 |
if (videoManager) {
|
| 146 |
videoManager.callbacks.onStatusUpdate = originalOnStatusUpdate;
|
|
|
|
| 147 |
videoManager.callbacks.onCalibrationUpdate = null;
|
| 148 |
}
|
| 149 |
clearInterval(statsInterval);
|
|
@@ -156,7 +216,16 @@ function FocusPageLocal({ videoManager, sessionResult, setSessionResult, isActiv
|
|
| 156 |
.then((res) => res.json())
|
| 157 |
.then((data) => {
|
| 158 |
if (data.available) setAvailableModels(data.available);
|
| 159 |
-
if (data.current)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 160 |
if (data.l2cs_boost !== undefined) setL2csBoost(data.l2cs_boost);
|
| 161 |
if (data.l2cs_boost_available !== undefined) setL2csBoostAvailable(data.l2cs_boost_available);
|
| 162 |
})
|
|
@@ -214,8 +283,6 @@ function FocusPageLocal({ videoManager, sessionResult, setSessionResult, isActiv
|
|
| 214 |
const result = await res.json();
|
| 215 |
if (result.updated) {
|
| 216 |
setCurrentModel(modelName);
|
| 217 |
-
setL2csBoostAvailable(modelName !== 'l2cs' && availableModels.includes('l2cs'));
|
| 218 |
-
if (modelName === 'l2cs') setL2csBoost(false);
|
| 219 |
}
|
| 220 |
} catch (err) {
|
| 221 |
console.error('Failed to switch model:', err);
|
|
@@ -238,7 +305,7 @@ function FocusPageLocal({ videoManager, sessionResult, setSessionResult, isActiv
|
|
| 238 |
}
|
| 239 |
};
|
| 240 |
|
| 241 |
-
const
|
| 242 |
const next = !l2csBoost;
|
| 243 |
try {
|
| 244 |
const res = await fetch('/api/settings', {
|
|
@@ -246,9 +313,18 @@ function FocusPageLocal({ videoManager, sessionResult, setSessionResult, isActiv
|
|
| 246 |
headers: { 'Content-Type': 'application/json' },
|
| 247 |
body: JSON.stringify({ l2cs_boost: next })
|
| 248 |
});
|
| 249 |
-
if (res.ok)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 250 |
} catch (err) {
|
| 251 |
-
console.error('Failed to toggle
|
| 252 |
}
|
| 253 |
};
|
| 254 |
|
|
@@ -689,120 +765,61 @@ function FocusPageLocal({ videoManager, sessionResult, setSessionResult, isActiv
|
|
| 689 |
</div>
|
| 690 |
)}
|
| 691 |
|
| 692 |
-
{role === 'admin' && stats && stats.isStreaming ? (
|
| 693 |
-
<div className="focus-debug-panel">
|
| 694 |
-
<div>Session: {stats.sessionId}</div>
|
| 695 |
-
<div>Sent: {stats.framesSent}</div>
|
| 696 |
-
<div>Processed: {stats.framesProcessed}</div>
|
| 697 |
-
<div>Latency: {stats.avgLatency.toFixed(0)}ms</div>
|
| 698 |
-
<div>Status: {stats.currentStatus ? 'Focused' : 'Not Focused'}</div>
|
| 699 |
-
<div>Confidence: {(stats.lastConfidence * 100).toFixed(1)}%</div>
|
| 700 |
-
{systemStats && systemStats.cpu_percent != null && (
|
| 701 |
-
<div style={{ marginTop: '6px', borderTop: '1px solid #444', paddingTop: '4px' }}>
|
| 702 |
-
<div>CPU: {systemStats.cpu_percent}%</div>
|
| 703 |
-
<div>RAM: {systemStats.memory_percent}% ({systemStats.memory_used_mb}/{systemStats.memory_total_mb} MB)</div>
|
| 704 |
-
</div>
|
| 705 |
-
)}
|
| 706 |
-
</div>
|
| 707 |
-
) : null}
|
| 708 |
</section>
|
| 709 |
|
| 710 |
-
{/* Server CPU / Memory (always visible) */}
|
| 711 |
-
{systemStats && (systemStats.cpu_percent != null || systemStats.memory_percent != null) && (
|
| 712 |
-
<section style={{
|
| 713 |
-
display: 'flex',
|
| 714 |
-
alignItems: 'center',
|
| 715 |
-
justifyContent: 'center',
|
| 716 |
-
gap: '16px',
|
| 717 |
-
padding: '6px 12px',
|
| 718 |
-
background: 'rgba(0,0,0,0.3)',
|
| 719 |
-
borderRadius: '8px',
|
| 720 |
-
margin: '6px auto',
|
| 721 |
-
maxWidth: '400px',
|
| 722 |
-
fontSize: '13px',
|
| 723 |
-
color: '#aaa'
|
| 724 |
-
}}>
|
| 725 |
-
<span title="Server CPU">CPU: <strong style={{ color: '#8f8' }}>{systemStats.cpu_percent}%</strong></span>
|
| 726 |
-
<span title="Server memory">RAM: <strong style={{ color: '#8af' }}>{systemStats.memory_percent}%</strong> ({systemStats.memory_used_mb}/{systemStats.memory_total_mb} MB)</span>
|
| 727 |
-
<span style={{ color: '#aaa', fontSize: '13px', marginRight: '4px' }}>Model:</span>
|
| 728 |
-
{availableModels.map(name => (
|
| 729 |
-
<button
|
| 730 |
-
key={name}
|
| 731 |
-
onClick={() => handleModelChange(name)}
|
| 732 |
-
style={{
|
| 733 |
-
padding: '5px 14px',
|
| 734 |
-
borderRadius: '16px',
|
| 735 |
-
border: currentModel === name ? '2px solid #007BFF' : '1px solid #555',
|
| 736 |
-
background: currentModel === name ? '#007BFF' : 'transparent',
|
| 737 |
-
color: currentModel === name ? '#fff' : '#ccc',
|
| 738 |
-
fontSize: '12px',
|
| 739 |
-
fontWeight: currentModel === name ? 'bold' : 'normal',
|
| 740 |
-
cursor: 'pointer',
|
| 741 |
-
textTransform: 'uppercase',
|
| 742 |
-
transition: 'all 0.2s'
|
| 743 |
-
}}
|
| 744 |
-
>
|
| 745 |
-
{name}
|
| 746 |
-
</button>
|
| 747 |
-
))}
|
| 748 |
-
{l2csBoostAvailable && currentModel !== 'l2cs' && (
|
| 749 |
-
<button
|
| 750 |
-
onClick={handleBoostToggle}
|
| 751 |
-
style={{
|
| 752 |
-
padding: '5px 14px',
|
| 753 |
-
borderRadius: '16px',
|
| 754 |
-
border: l2csBoost ? '2px solid #f59e0b' : '1px solid #555',
|
| 755 |
-
background: l2csBoost ? 'rgba(245, 158, 11, 0.15)' : 'transparent',
|
| 756 |
-
color: l2csBoost ? '#f59e0b' : '#888',
|
| 757 |
-
fontSize: '11px',
|
| 758 |
-
fontWeight: l2csBoost ? 'bold' : 'normal',
|
| 759 |
-
cursor: 'pointer',
|
| 760 |
-
transition: 'all 0.2s',
|
| 761 |
-
marginLeft: '4px',
|
| 762 |
-
}}
|
| 763 |
-
>
|
| 764 |
-
{l2csBoost ? 'GAZE ON' : 'GAZE'}
|
| 765 |
-
</button>
|
| 766 |
-
)}
|
| 767 |
-
{(currentModel === 'l2cs' || l2csBoost) && stats && stats.isStreaming && (
|
| 768 |
-
<button
|
| 769 |
-
onClick={() => videoManager && videoManager.startCalibration()}
|
| 770 |
-
style={{
|
| 771 |
-
padding: '5px 14px',
|
| 772 |
-
borderRadius: '16px',
|
| 773 |
-
border: '1px solid #4ade80',
|
| 774 |
-
background: 'transparent',
|
| 775 |
-
color: '#4ade80',
|
| 776 |
-
fontSize: '12px',
|
| 777 |
-
fontWeight: 'bold',
|
| 778 |
-
cursor: 'pointer',
|
| 779 |
-
transition: 'all 0.2s',
|
| 780 |
-
marginLeft: '4px',
|
| 781 |
-
}}
|
| 782 |
-
>
|
| 783 |
-
Calibrate
|
| 784 |
-
</button>
|
| 785 |
-
)}
|
| 786 |
-
</section>
|
| 787 |
-
)}
|
| 788 |
-
|
| 789 |
{flowStep === FLOW_STEPS.ready ? (
|
| 790 |
<>
|
|
|
|
| 791 |
{availableModels.length > 0 ? (
|
| 792 |
<section className="focus-model-strip">
|
| 793 |
<span className="focus-model-label">Model:</span>
|
| 794 |
-
{availableModels.map((name) => (
|
| 795 |
<button
|
| 796 |
key={name}
|
| 797 |
onClick={() => handleModelChange(name)}
|
| 798 |
className={`focus-model-button ${currentModel === name ? 'active' : ''}`}
|
| 799 |
>
|
| 800 |
-
{name}
|
| 801 |
</button>
|
| 802 |
))}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 803 |
</section>
|
| 804 |
) : null}
|
| 805 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 806 |
<section id="timeline-area">
|
| 807 |
<div className="timeline-label">Timeline</div>
|
| 808 |
<div id="timeline-visuals">
|
|
@@ -811,7 +828,7 @@ function FocusPageLocal({ videoManager, sessionResult, setSessionResult, isActiv
|
|
| 811 |
key={index}
|
| 812 |
className="timeline-block"
|
| 813 |
style={{
|
| 814 |
-
backgroundColor: event.isFocused ? '#
|
| 815 |
width: '10px',
|
| 816 |
height: '20px',
|
| 817 |
display: 'inline-block',
|
|
@@ -830,19 +847,14 @@ function FocusPageLocal({ videoManager, sessionResult, setSessionResult, isActiv
|
|
| 830 |
{isStarting ? 'Starting...' : 'Start'}
|
| 831 |
</button>
|
| 832 |
|
| 833 |
-
<button id="btn-floating" className="action-btn
|
| 834 |
Floating Window
|
| 835 |
</button>
|
| 836 |
-
|
| 837 |
-
|
| 838 |
-
className="action-btn"
|
| 839 |
-
style={{ backgroundColor: '#ff7a52' }}
|
| 840 |
-
onClick={handlePreview}
|
| 841 |
-
>
|
| 842 |
Preview Result
|
| 843 |
</button>
|
| 844 |
|
| 845 |
-
|
| 846 |
<button id="btn-cam-stop" className="action-btn red" onClick={handleStop}>
|
| 847 |
Stop
|
| 848 |
</button>
|
|
@@ -852,6 +864,59 @@ function FocusPageLocal({ videoManager, sessionResult, setSessionResult, isActiv
|
|
| 852 |
<div className="focus-inline-error focus-inline-error-standalone">{cameraError}</div>
|
| 853 |
) : null}
|
| 854 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 855 |
<section id="frame-control">
|
| 856 |
<label htmlFor="frame-slider">Frame Rate (FPS)</label>
|
| 857 |
<input
|
|
@@ -873,55 +938,6 @@ function FocusPageLocal({ videoManager, sessionResult, setSessionResult, isActiv
|
|
| 873 |
</section>
|
| 874 |
</>
|
| 875 |
) : null}
|
| 876 |
-
))}
|
| 877 |
-
</div>
|
| 878 |
-
<div id="timeline-line"></div>
|
| 879 |
-
</section>
|
| 880 |
-
|
| 881 |
-
{/* 4. Control Buttons */}
|
| 882 |
-
<section id="control-panel">
|
| 883 |
-
<button id="btn-cam-start" className="action-btn green" onClick={handleStart}>
|
| 884 |
-
Start
|
| 885 |
-
</button>
|
| 886 |
-
|
| 887 |
-
<button id="btn-floating" className="action-btn yellow" onClick={handleFloatingWindow}>
|
| 888 |
-
Floating Window
|
| 889 |
-
</button>
|
| 890 |
-
|
| 891 |
-
<button
|
| 892 |
-
id="btn-preview"
|
| 893 |
-
className="action-btn"
|
| 894 |
-
style={{ backgroundColor: '#6c5ce7' }}
|
| 895 |
-
onClick={handlePreview}
|
| 896 |
-
>
|
| 897 |
-
Preview Result
|
| 898 |
-
</button>
|
| 899 |
-
|
| 900 |
-
<button id="btn-cam-stop" className="action-btn red" onClick={handleStop}>
|
| 901 |
-
Stop
|
| 902 |
-
</button>
|
| 903 |
-
</section>
|
| 904 |
-
|
| 905 |
-
{/* 5. Frame Control */}
|
| 906 |
-
<section id="frame-control">
|
| 907 |
-
<label htmlFor="frame-slider">Frame Rate (FPS)</label>
|
| 908 |
-
<input
|
| 909 |
-
type="range"
|
| 910 |
-
id="frame-slider"
|
| 911 |
-
min="10"
|
| 912 |
-
max="30"
|
| 913 |
-
value={currentFrame}
|
| 914 |
-
onChange={(e) => handleFrameChange(e.target.value)}
|
| 915 |
-
/>
|
| 916 |
-
<input
|
| 917 |
-
type="number"
|
| 918 |
-
id="frame-input"
|
| 919 |
-
min="10"
|
| 920 |
-
max="30"
|
| 921 |
-
value={currentFrame}
|
| 922 |
-
onChange={(e) => handleFrameChange(e.target.value)}
|
| 923 |
-
/>
|
| 924 |
-
</section>
|
| 925 |
|
| 926 |
{/* Calibration overlay (fixed fullscreen, must be outside overflow:hidden containers) */}
|
| 927 |
<CalibrationOverlay calibration={calibration} videoManager={videoManager} />
|
|
|
|
| 37 |
);
|
| 38 |
}
|
| 39 |
|
| 40 |
+
const MODEL_ORDER = ['hybrid', 'xgboost', 'mlp', 'geometric'];
|
| 41 |
+
|
| 42 |
+
const MODEL_INFO = {
|
| 43 |
+
hybrid: {
|
| 44 |
+
label: 'Hybrid',
|
| 45 |
+
tagline: 'Best overall — combines ML with geometric scoring',
|
| 46 |
+
how: 'Fuses XGBoost predictions (30%) with geometric face/eye scores (70%). A logistic regression meta-classifier combines both signals for the final decision.',
|
| 47 |
+
accuracy: '84.3%',
|
| 48 |
+
f1: '0.864',
|
| 49 |
+
auc: '0.880',
|
| 50 |
+
threshold: '0.46',
|
| 51 |
+
evaluation: 'Leave-One-Person-Out (9 participants, 144K frames)',
|
| 52 |
+
features: '10 features: head deviation, face score, eye scores (EAR), gaze offset, pitch, horizontal gaze, PERCLOS',
|
| 53 |
+
strengths: 'Most robust across different people. Geometric scoring generalises well; ML catches subtle patterns.',
|
| 54 |
+
badge: 'Recommended',
|
| 55 |
+
},
|
| 56 |
+
xgboost: {
|
| 57 |
+
label: 'XGBoost',
|
| 58 |
+
tagline: 'Highest raw accuracy — gradient-boosted decision trees',
|
| 59 |
+
how: 'Ensemble of 600 decision trees (max depth 8). Each tree learns to correct errors from previous trees. Outputs probability of focused state.',
|
| 60 |
+
accuracy: '84.3%',
|
| 61 |
+
f1: '0.859',
|
| 62 |
+
auc: '0.880',
|
| 63 |
+
threshold: '0.38',
|
| 64 |
+
evaluation: 'Leave-One-Person-Out (9 participants, 144K frames)',
|
| 65 |
+
features: '10 features: head deviation, face score, eye scores (EAR), gaze offset, pitch, horizontal gaze, PERCLOS',
|
| 66 |
+
strengths: 'Strong pattern recognition. Handles non-linear feature interactions. 95.9% accuracy on random split (but LOPO is the fairer test).',
|
| 67 |
+
badge: null,
|
| 68 |
+
},
|
| 69 |
+
mlp: {
|
| 70 |
+
label: 'MLP',
|
| 71 |
+
tagline: 'Lightweight neural network — fast and efficient',
|
| 72 |
+
how: 'Two-layer neural network (64→32 neurons). Takes 10 face features, applies learned weights, outputs focused/unfocused probability via softmax.',
|
| 73 |
+
accuracy: '82.7%',
|
| 74 |
+
f1: '0.858',
|
| 75 |
+
auc: '0.862',
|
| 76 |
+
threshold: '0.23',
|
| 77 |
+
evaluation: 'Leave-One-Person-Out (9 participants, 144K frames)',
|
| 78 |
+
features: '10 features: head deviation, face score, eye scores (EAR), gaze offset, pitch, horizontal gaze, PERCLOS',
|
| 79 |
+
strengths: 'Fastest inference. Smallest model size. Good baseline. 92.9% accuracy on random split.',
|
| 80 |
+
badge: null,
|
| 81 |
+
},
|
| 82 |
+
geometric: {
|
| 83 |
+
label: 'Geometric',
|
| 84 |
+
tagline: 'Baseline only — hardcoded thresholds, no learning',
|
| 85 |
+
how: 'Uses fixed thresholds on head orientation (70%) and eye openness (30%). No training — just hand-tuned rules on 478 face landmarks. Cannot adapt to new faces or environments.',
|
| 86 |
+
accuracy: '~77%',
|
| 87 |
+
f1: '0.772',
|
| 88 |
+
auc: 'N/A',
|
| 89 |
+
threshold: '0.55',
|
| 90 |
+
evaluation: 'Leave-One-Person-Out geometric sweep',
|
| 91 |
+
features: 'Head yaw/pitch/roll angles, eye aspect ratio (EAR), iris gaze offset, mouth aspect ratio (MAR)',
|
| 92 |
+
strengths: 'No model files needed. Useful as a fallback. This is the baseline that motivated building the ML models — its fixed thresholds struggle with different face shapes, lighting, and camera angles.',
|
| 93 |
+
badge: 'Baseline',
|
| 94 |
+
},
|
| 95 |
+
};
|
| 96 |
+
|
| 97 |
+
function FocusPageLocal({ videoManager, sessionResult, setSessionResult, isActive }) {
|
| 98 |
const [currentFrame, setCurrentFrame] = useState(15);
|
| 99 |
const [timelineEvents, setTimelineEvents] = useState([]);
|
| 100 |
const [stats, setStats] = useState(null);
|
|
|
|
| 188 |
setFocusState(FOCUS_STATES.pending);
|
| 189 |
setCameraReady(false);
|
| 190 |
if (originalOnSessionEnd) originalOnSessionEnd(summary);
|
| 191 |
+
};
|
| 192 |
+
|
| 193 |
videoManager.callbacks.onCalibrationUpdate = (cal) => {
|
| 194 |
setCalibration(cal && cal.active ? { ...cal } : null);
|
| 195 |
};
|
|
|
|
| 203 |
return () => {
|
| 204 |
if (videoManager) {
|
| 205 |
videoManager.callbacks.onStatusUpdate = originalOnStatusUpdate;
|
| 206 |
+
videoManager.callbacks.onSessionEnd = originalOnSessionEnd;
|
| 207 |
videoManager.callbacks.onCalibrationUpdate = null;
|
| 208 |
}
|
| 209 |
clearInterval(statsInterval);
|
|
|
|
| 216 |
.then((res) => res.json())
|
| 217 |
.then((data) => {
|
| 218 |
if (data.available) setAvailableModels(data.available);
|
| 219 |
+
if (data.current) {
|
| 220 |
+
// If L2CS was the active model, switch to a base model + enable boost
|
| 221 |
+
if (data.current === 'l2cs') {
|
| 222 |
+
const fallback = data.available.find((m) => m !== 'l2cs') || 'mlp';
|
| 223 |
+
setCurrentModel(fallback);
|
| 224 |
+
handleModelChange(fallback);
|
| 225 |
+
} else {
|
| 226 |
+
setCurrentModel(data.current);
|
| 227 |
+
}
|
| 228 |
+
}
|
| 229 |
if (data.l2cs_boost !== undefined) setL2csBoost(data.l2cs_boost);
|
| 230 |
if (data.l2cs_boost_available !== undefined) setL2csBoostAvailable(data.l2cs_boost_available);
|
| 231 |
})
|
|
|
|
| 283 |
const result = await res.json();
|
| 284 |
if (result.updated) {
|
| 285 |
setCurrentModel(modelName);
|
|
|
|
|
|
|
| 286 |
}
|
| 287 |
} catch (err) {
|
| 288 |
console.error('Failed to switch model:', err);
|
|
|
|
| 305 |
}
|
| 306 |
};
|
| 307 |
|
| 308 |
+
const handleEyeGazeToggle = async () => {
|
| 309 |
const next = !l2csBoost;
|
| 310 |
try {
|
| 311 |
const res = await fetch('/api/settings', {
|
|
|
|
| 313 |
headers: { 'Content-Type': 'application/json' },
|
| 314 |
body: JSON.stringify({ l2cs_boost: next })
|
| 315 |
});
|
| 316 |
+
if (!res.ok) return;
|
| 317 |
+
setL2csBoost(next);
|
| 318 |
+
|
| 319 |
+
if (next && videoManager && videoManager.isStreaming) {
|
| 320 |
+
// Turning ON → auto-start calibration
|
| 321 |
+
videoManager.startCalibration();
|
| 322 |
+
} else if (!next && videoManager) {
|
| 323 |
+
// Turning OFF → cancel any active calibration
|
| 324 |
+
videoManager.cancelCalibration();
|
| 325 |
+
}
|
| 326 |
} catch (err) {
|
| 327 |
+
console.error('Failed to toggle eye gaze:', err);
|
| 328 |
}
|
| 329 |
};
|
| 330 |
|
|
|
|
| 765 |
</div>
|
| 766 |
)}
|
| 767 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 768 |
</section>
|
| 769 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 770 |
{flowStep === FLOW_STEPS.ready ? (
|
| 771 |
<>
|
| 772 |
+
{/* Model selector */}
|
| 773 |
{availableModels.length > 0 ? (
|
| 774 |
<section className="focus-model-strip">
|
| 775 |
<span className="focus-model-label">Model:</span>
|
| 776 |
+
{MODEL_ORDER.filter((n) => availableModels.includes(n)).map((name) => (
|
| 777 |
<button
|
| 778 |
key={name}
|
| 779 |
onClick={() => handleModelChange(name)}
|
| 780 |
className={`focus-model-button ${currentModel === name ? 'active' : ''}`}
|
| 781 |
>
|
| 782 |
+
{MODEL_INFO[name]?.label || name}
|
| 783 |
</button>
|
| 784 |
))}
|
| 785 |
+
|
| 786 |
+
{l2csBoostAvailable && (
|
| 787 |
+
<>
|
| 788 |
+
<span className="focus-model-sep" />
|
| 789 |
+
<button
|
| 790 |
+
onClick={handleEyeGazeToggle}
|
| 791 |
+
className={`eye-gaze-toggle ${l2csBoost ? 'on' : 'off'}`}
|
| 792 |
+
title={l2csBoost ? 'Eye gaze tracking active — click to disable' : 'Enable eye gaze tracking (requires calibration)'}
|
| 793 |
+
>
|
| 794 |
+
<svg width="16" height="16" viewBox="0 0 16 16" className="eye-gaze-icon" aria-hidden="true">
|
| 795 |
+
<ellipse cx="8" cy="8" rx="7" ry="4.5" fill="none" stroke="currentColor" strokeWidth="1.4" />
|
| 796 |
+
<circle cx="8" cy="8" r="2.2" fill="currentColor" />
|
| 797 |
+
</svg>
|
| 798 |
+
{l2csBoost ? 'Eye Gaze On' : 'Eye Gaze'}
|
| 799 |
+
</button>
|
| 800 |
+
{l2csBoost && stats && stats.isStreaming && (
|
| 801 |
+
<button
|
| 802 |
+
onClick={() => videoManager && videoManager.startCalibration()}
|
| 803 |
+
className="focus-model-button recalibrate"
|
| 804 |
+
title="Re-run gaze calibration"
|
| 805 |
+
>
|
| 806 |
+
Recalibrate
|
| 807 |
+
</button>
|
| 808 |
+
)}
|
| 809 |
+
</>
|
| 810 |
+
)}
|
| 811 |
</section>
|
| 812 |
) : null}
|
| 813 |
|
| 814 |
+
{/* Server stats */}
|
| 815 |
+
{systemStats && systemStats.cpu_percent != null && (
|
| 816 |
+
<section className="focus-system-stats">
|
| 817 |
+
<span>CPU: <strong>{systemStats.cpu_percent}%</strong></span>
|
| 818 |
+
<span className="focus-system-stats-sep" />
|
| 819 |
+
<span>RAM: <strong>{systemStats.memory_percent}%</strong> ({systemStats.memory_used_mb}/{systemStats.memory_total_mb} MB)</span>
|
| 820 |
+
</section>
|
| 821 |
+
)}
|
| 822 |
+
|
| 823 |
<section id="timeline-area">
|
| 824 |
<div className="timeline-label">Timeline</div>
|
| 825 |
<div id="timeline-visuals">
|
|
|
|
| 828 |
key={index}
|
| 829 |
className="timeline-block"
|
| 830 |
style={{
|
| 831 |
+
backgroundColor: event.isFocused ? '#28a745' : '#dc3545',
|
| 832 |
width: '10px',
|
| 833 |
height: '20px',
|
| 834 |
display: 'inline-block',
|
|
|
|
| 847 |
{isStarting ? 'Starting...' : 'Start'}
|
| 848 |
</button>
|
| 849 |
|
| 850 |
+
<button id="btn-floating" className="action-btn blue" onClick={handlePiP}>
|
| 851 |
Floating Window
|
| 852 |
</button>
|
| 853 |
+
|
| 854 |
+
<button id="btn-preview" className="action-btn orange" onClick={handlePreview}>
|
|
|
|
|
|
|
|
|
|
|
|
|
| 855 |
Preview Result
|
| 856 |
</button>
|
| 857 |
|
|
|
|
| 858 |
<button id="btn-cam-stop" className="action-btn red" onClick={handleStop}>
|
| 859 |
Stop
|
| 860 |
</button>
|
|
|
|
| 864 |
<div className="focus-inline-error focus-inline-error-standalone">{cameraError}</div>
|
| 865 |
) : null}
|
| 866 |
|
| 867 |
+
{/* Model info card — below action buttons */}
|
| 868 |
+
{MODEL_INFO[currentModel] && (
|
| 869 |
+
<section className="model-card">
|
| 870 |
+
<div className="model-card-header">
|
| 871 |
+
<h3 className="model-card-title">{MODEL_INFO[currentModel].label}</h3>
|
| 872 |
+
{MODEL_INFO[currentModel].badge && (
|
| 873 |
+
<span className={MODEL_INFO[currentModel].badge === 'Baseline' ? 'model-card-badge-baseline' : 'model-card-badge'}>
|
| 874 |
+
{MODEL_INFO[currentModel].badge}
|
| 875 |
+
</span>
|
| 876 |
+
)}
|
| 877 |
+
</div>
|
| 878 |
+
<p className="model-card-tagline">{MODEL_INFO[currentModel].tagline}</p>
|
| 879 |
+
|
| 880 |
+
<div className="model-card-metrics">
|
| 881 |
+
<div className="model-card-metric">
|
| 882 |
+
<span className="model-card-metric-value">{MODEL_INFO[currentModel].accuracy}</span>
|
| 883 |
+
<span className="model-card-metric-label">Accuracy</span>
|
| 884 |
+
</div>
|
| 885 |
+
<div className="model-card-metric">
|
| 886 |
+
<span className="model-card-metric-value">{MODEL_INFO[currentModel].f1}</span>
|
| 887 |
+
<span className="model-card-metric-label">F1 Score</span>
|
| 888 |
+
</div>
|
| 889 |
+
<div className="model-card-metric">
|
| 890 |
+
<span className="model-card-metric-value">{MODEL_INFO[currentModel].auc}</span>
|
| 891 |
+
<span className="model-card-metric-label">ROC-AUC</span>
|
| 892 |
+
</div>
|
| 893 |
+
<div className="model-card-metric">
|
| 894 |
+
<span className="model-card-metric-value">{MODEL_INFO[currentModel].threshold}</span>
|
| 895 |
+
<span className="model-card-metric-label">Threshold</span>
|
| 896 |
+
</div>
|
| 897 |
+
</div>
|
| 898 |
+
|
| 899 |
+
<div className="model-card-details">
|
| 900 |
+
<div className="model-card-section">
|
| 901 |
+
<h4>How it works</h4>
|
| 902 |
+
<p>{MODEL_INFO[currentModel].how}</p>
|
| 903 |
+
</div>
|
| 904 |
+
<div className="model-card-section">
|
| 905 |
+
<h4>Features used</h4>
|
| 906 |
+
<p>{MODEL_INFO[currentModel].features}</p>
|
| 907 |
+
</div>
|
| 908 |
+
<div className="model-card-section">
|
| 909 |
+
<h4>Strengths</h4>
|
| 910 |
+
<p>{MODEL_INFO[currentModel].strengths}</p>
|
| 911 |
+
</div>
|
| 912 |
+
</div>
|
| 913 |
+
|
| 914 |
+
<div className="model-card-eval">
|
| 915 |
+
Evaluated with {MODEL_INFO[currentModel].evaluation}
|
| 916 |
+
</div>
|
| 917 |
+
</section>
|
| 918 |
+
)}
|
| 919 |
+
|
| 920 |
<section id="frame-control">
|
| 921 |
<label htmlFor="frame-slider">Frame Rate (FPS)</label>
|
| 922 |
<input
|
|
|
|
| 938 |
</section>
|
| 939 |
</>
|
| 940 |
) : null}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 941 |
|
| 942 |
{/* Calibration overlay (fixed fullscreen, must be outside overflow:hidden containers) */}
|
| 943 |
<CalibrationOverlay calibration={calibration} videoManager={videoManager} />
|
src/components/Help.jsx
CHANGED
|
@@ -1,6 +1,24 @@
|
|
| 1 |
-
import React from 'react';
|
| 2 |
|
| 3 |
function Help() {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
return (
|
| 5 |
<main id="page-f" className="page">
|
| 6 |
<h1 className="page-title">Help</h1>
|
|
@@ -47,7 +65,27 @@ function Help() {
|
|
| 47 |
|
| 48 |
<section className="help-section">
|
| 49 |
<h2>Privacy & Data</h2>
|
| 50 |
-
<p>Video frames are processed in real-time on the server and are never stored. Only focus status metadata (timestamps, confidence scores) is saved to the session database.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 51 |
</section>
|
| 52 |
|
| 53 |
<section className="help-section">
|
|
|
|
| 1 |
+
import React, { useState } from 'react';
|
| 2 |
|
| 3 |
function Help() {
|
| 4 |
+
const [clearMsg, setClearMsg] = useState('');
|
| 5 |
+
|
| 6 |
+
const clearAllHistory = async () => {
|
| 7 |
+
if (!window.confirm('Delete all saved sessions? My Records and My Achievement will reset.')) return;
|
| 8 |
+
setClearMsg('');
|
| 9 |
+
try {
|
| 10 |
+
const res = await fetch('/api/history', { method: 'DELETE' });
|
| 11 |
+
const data = await res.json().catch(() => ({}));
|
| 12 |
+
if (res.ok && data.status === 'success') {
|
| 13 |
+
setClearMsg('Session history cleared.');
|
| 14 |
+
} else {
|
| 15 |
+
setClearMsg(data.message || 'Could not clear history.');
|
| 16 |
+
}
|
| 17 |
+
} catch (e) {
|
| 18 |
+
setClearMsg('Request failed.');
|
| 19 |
+
}
|
| 20 |
+
};
|
| 21 |
+
|
| 22 |
return (
|
| 23 |
<main id="page-f" className="page">
|
| 24 |
<h1 className="page-title">Help</h1>
|
|
|
|
| 65 |
|
| 66 |
<section className="help-section">
|
| 67 |
<h2>Privacy & Data</h2>
|
| 68 |
+
<p>Video frames are processed in real-time on the server and are never stored. Only focus status metadata (timestamps, confidence scores) is saved to the session database. View past runs under <strong>My Records</strong>; stats and badges live under <strong>My Achievement</strong>.</p>
|
| 69 |
+
<p style={{ marginTop: '12px' }}>
|
| 70 |
+
<button
|
| 71 |
+
type="button"
|
| 72 |
+
onClick={clearAllHistory}
|
| 73 |
+
style={{
|
| 74 |
+
padding: '8px 16px',
|
| 75 |
+
borderRadius: '8px',
|
| 76 |
+
border: '1px solid #c44',
|
| 77 |
+
background: 'transparent',
|
| 78 |
+
color: '#e88',
|
| 79 |
+
cursor: 'pointer',
|
| 80 |
+
fontSize: '14px'
|
| 81 |
+
}}
|
| 82 |
+
>
|
| 83 |
+
Clear all session history
|
| 84 |
+
</button>
|
| 85 |
+
{clearMsg && (
|
| 86 |
+
<span style={{ marginLeft: '12px', color: '#aaa', fontSize: '14px' }}>{clearMsg}</span>
|
| 87 |
+
)}
|
| 88 |
+
</p>
|
| 89 |
</section>
|
| 90 |
|
| 91 |
<section className="help-section">
|
src/components/Home.jsx
CHANGED
|
@@ -1,123 +1,15 @@
|
|
| 1 |
-
import React
|
| 2 |
-
|
| 3 |
-
function Home({ setActiveTab, role, setRole }) {
|
| 4 |
-
const fileInputRef = useRef(null);
|
| 5 |
-
|
| 6 |
-
// 1. Start a fresh focus workflow.
|
| 7 |
-
const handleNewStart = async () => {
|
| 8 |
-
await fetch('/api/history', { method: 'DELETE' });
|
| 9 |
-
setActiveTab('focus');
|
| 10 |
-
};
|
| 11 |
-
|
| 12 |
-
// 2. Restore a backup automatically from local storage.
|
| 13 |
-
const handleAutoImport = async () => {
|
| 14 |
-
const backup = localStorage.getItem('focus_magic_backup');
|
| 15 |
-
if (backup) {
|
| 16 |
-
try {
|
| 17 |
-
const sessions = JSON.parse(backup);
|
| 18 |
-
const response = await fetch('/api/import', {
|
| 19 |
-
method: 'POST',
|
| 20 |
-
headers: { 'Content-Type': 'application/json' },
|
| 21 |
-
body: JSON.stringify(sessions)
|
| 22 |
-
});
|
| 23 |
-
if (response.ok) {
|
| 24 |
-
alert("Auto-recovery successful!");
|
| 25 |
-
} else {
|
| 26 |
-
alert("Auto-recovery failed.");
|
| 27 |
-
}
|
| 28 |
-
} catch (err) {
|
| 29 |
-
alert("Error: " + err.message);
|
| 30 |
-
}
|
| 31 |
-
} else {
|
| 32 |
-
alert("No previous backup found. Please use Manual Import.");
|
| 33 |
-
}
|
| 34 |
-
};
|
| 35 |
-
|
| 36 |
-
// 3. Import a backup file manually.
|
| 37 |
-
const handleFileChange = async (event) => {
|
| 38 |
-
const file = event.target.files[0];
|
| 39 |
-
if (!file) return;
|
| 40 |
-
const reader = new FileReader();
|
| 41 |
-
reader.onload = async (e) => {
|
| 42 |
-
try {
|
| 43 |
-
const sessions = JSON.parse(e.target.result);
|
| 44 |
-
const response = await fetch('/api/import', {
|
| 45 |
-
method: 'POST',
|
| 46 |
-
headers: { 'Content-Type': 'application/json' },
|
| 47 |
-
body: JSON.stringify(sessions)
|
| 48 |
-
});
|
| 49 |
-
if (response.ok) {
|
| 50 |
-
alert("Import successful!");
|
| 51 |
-
}
|
| 52 |
-
} catch (err) {
|
| 53 |
-
alert("Error: " + err.message);
|
| 54 |
-
}
|
| 55 |
-
event.target.value = '';
|
| 56 |
-
};
|
| 57 |
-
reader.readAsText(file);
|
| 58 |
-
};
|
| 59 |
-
|
| 60 |
-
// 4. Toggle between Admin and User modes.
|
| 61 |
-
const handleAdminToggle = async () => {
|
| 62 |
-
if (role === 'admin') {
|
| 63 |
-
if (window.confirm("Switch back to User mode? Current data will be cleared.")) {
|
| 64 |
-
await fetch('/api/history', { method: 'DELETE' });
|
| 65 |
-
setRole('user');
|
| 66 |
-
alert("Switched to User mode.");
|
| 67 |
-
}
|
| 68 |
-
} else {
|
| 69 |
-
const pwd = window.prompt("Enter Admin Password:");
|
| 70 |
-
if (pwd === "123") {
|
| 71 |
-
try {
|
| 72 |
-
await fetch('/api/history', { method: 'DELETE' });
|
| 73 |
-
const res = await fetch('/test_data.json');
|
| 74 |
-
if (!res.ok) throw new Error("test_data.json not found");
|
| 75 |
-
const testData = await res.json();
|
| 76 |
-
const importRes = await fetch('/api/import', {
|
| 77 |
-
method: 'POST',
|
| 78 |
-
headers: { 'Content-Type': 'application/json' },
|
| 79 |
-
body: JSON.stringify(testData)
|
| 80 |
-
});
|
| 81 |
-
if (importRes.ok) {
|
| 82 |
-
setRole('admin');
|
| 83 |
-
alert("Admin mode activated!");
|
| 84 |
-
}
|
| 85 |
-
} catch (error) {
|
| 86 |
-
alert("Admin login failed: " + error.message);
|
| 87 |
-
}
|
| 88 |
-
} else if (pwd !== null) {
|
| 89 |
-
alert("Incorrect password!");
|
| 90 |
-
}
|
| 91 |
-
}
|
| 92 |
-
};
|
| 93 |
|
|
|
|
| 94 |
return (
|
| 95 |
<main id="page-a" className="page">
|
| 96 |
<h1>FocusGuard</h1>
|
| 97 |
<p>Your productivity monitor assistant.</p>
|
| 98 |
|
| 99 |
-
{/* Keep the hidden file input outside the button grid so it never affects layout. */}
|
| 100 |
-
<input type="file" ref={fileInputRef} style={{ display: 'none' }} accept=".json" onChange={handleFileChange} />
|
| 101 |
-
|
| 102 |
-
{/* Render the four main actions inside a clean 2x2 grid. */}
|
| 103 |
<div className="home-button-grid">
|
| 104 |
-
|
| 105 |
-
<button className="btn-main" onClick={handleNewStart}>
|
| 106 |
Start Focus
|
| 107 |
</button>
|
| 108 |
-
|
| 109 |
-
<button className="btn-main" onClick={handleAutoImport}>
|
| 110 |
-
Auto Import History
|
| 111 |
-
</button>
|
| 112 |
-
|
| 113 |
-
<button className="btn-main" onClick={() => fileInputRef.current.click()}>
|
| 114 |
-
Manual Import History
|
| 115 |
-
</button>
|
| 116 |
-
|
| 117 |
-
<button className="btn-main" onClick={handleAdminToggle}>
|
| 118 |
-
{role === 'admin' ? 'Switch to User Mode' : 'Admin Login'}
|
| 119 |
-
</button>
|
| 120 |
-
|
| 121 |
</div>
|
| 122 |
</main>
|
| 123 |
);
|
|
|
|
| 1 |
+
import React from 'react';
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
|
| 3 |
+
function Home({ setActiveTab }) {
|
| 4 |
return (
|
| 5 |
<main id="page-a" className="page">
|
| 6 |
<h1>FocusGuard</h1>
|
| 7 |
<p>Your productivity monitor assistant.</p>
|
| 8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
<div className="home-button-grid">
|
| 10 |
+
<button type="button" className="btn-main" onClick={() => setActiveTab('focus')}>
|
|
|
|
| 11 |
Start Focus
|
| 12 |
</button>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
</div>
|
| 14 |
</main>
|
| 15 |
);
|
src/utils/VideoManagerLocal.js
CHANGED
|
@@ -1,6 +1,29 @@
|
|
| 1 |
// src/utils/VideoManagerLocal.js
|
| 2 |
// Local video processing implementation using WebSocket + Canvas, without WebRTC.
|
| 3 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
export class VideoManagerLocal {
|
| 5 |
constructor(callbacks) {
|
| 6 |
this.callbacks = callbacks || {};
|
|
@@ -33,23 +56,8 @@ export class VideoManagerLocal {
|
|
| 33 |
// Continuous render loop
|
| 34 |
this._animFrameId = null;
|
| 35 |
|
| 36 |
-
// Notification state
|
| 37 |
-
this.notificationEnabled = true;
|
| 38 |
-
this.notificationThreshold = 30;
|
| 39 |
-
this.unfocusedStartTime = null;
|
| 40 |
-
this.lastNotificationTime = null;
|
| 41 |
-
this.notificationCooldown = 60000;
|
| 42 |
-
|
| 43 |
// Calibration state
|
| 44 |
-
this.calibration =
|
| 45 |
-
active: false,
|
| 46 |
-
collecting: false,
|
| 47 |
-
target: null,
|
| 48 |
-
index: 0,
|
| 49 |
-
numPoints: 0,
|
| 50 |
-
done: false,
|
| 51 |
-
success: false,
|
| 52 |
-
};
|
| 53 |
|
| 54 |
// Performance metrics
|
| 55 |
this.stats = {
|
|
@@ -122,10 +130,6 @@ export class VideoManagerLocal {
|
|
| 122 |
}
|
| 123 |
}
|
| 124 |
|
| 125 |
-
// Request notification permission
|
| 126 |
-
await this.requestNotificationPermission();
|
| 127 |
-
await this.loadNotificationSettings();
|
| 128 |
-
|
| 129 |
// Open the WebSocket connection
|
| 130 |
await this.connectWebSocket();
|
| 131 |
|
|
@@ -207,7 +211,10 @@ export class VideoManagerLocal {
|
|
| 207 |
|
| 208 |
socket.onerror = () => {
|
| 209 |
console.error('WebSocket error:', { url: wsUrl, readyState: socket.readyState });
|
| 210 |
-
rejectWithMessage(
|
|
|
|
|
|
|
|
|
|
| 211 |
};
|
| 212 |
|
| 213 |
socket.onclose = (event) => {
|
|
@@ -217,7 +224,11 @@ export class VideoManagerLocal {
|
|
| 217 |
}
|
| 218 |
|
| 219 |
if (!opened) {
|
| 220 |
-
rejectWithMessage(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 221 |
return;
|
| 222 |
}
|
| 223 |
|
|
@@ -319,21 +330,60 @@ export class VideoManagerLocal {
|
|
| 319 |
ctx.fillStyle = '#B4B4B4';
|
| 320 |
ctx.font = '11px Arial';
|
| 321 |
ctx.textAlign = 'right';
|
| 322 |
-
ctx.fillText(
|
| 323 |
ctx.textAlign = 'left';
|
| 324 |
}
|
| 325 |
}
|
| 326 |
-
// Gaze
|
| 327 |
if (data && data.gaze_x !== undefined && data.gaze_y !== undefined) {
|
| 328 |
-
const
|
| 329 |
-
const
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 330 |
ctx.beginPath();
|
| 331 |
-
ctx.arc(
|
| 332 |
-
ctx.fillStyle =
|
| 333 |
ctx.fill();
|
| 334 |
ctx.strokeStyle = '#FFFFFF';
|
| 335 |
-
ctx.lineWidth =
|
| 336 |
ctx.stroke();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 337 |
}
|
| 338 |
|
| 339 |
// Performance stats
|
|
@@ -376,7 +426,8 @@ export class VideoManagerLocal {
|
|
| 376 |
const latency = now - this._lastSendTime;
|
| 377 |
this.stats.lastLatencies.push(latency);
|
| 378 |
if (this.stats.lastLatencies.length > 10) this.stats.lastLatencies.shift();
|
| 379 |
-
|
|
|
|
| 380 |
}
|
| 381 |
|
| 382 |
this.updateStatus(data.focused);
|
|
@@ -407,6 +458,8 @@ export class VideoManagerLocal {
|
|
| 407 |
gaze_x: data.gaze_x,
|
| 408 |
gaze_y: data.gaze_y,
|
| 409 |
on_screen: data.on_screen,
|
|
|
|
|
|
|
| 410 |
};
|
| 411 |
this.drawDetectionResult(detectionData);
|
| 412 |
break;
|
|
@@ -447,8 +500,20 @@ export class VideoManagerLocal {
|
|
| 447 |
}
|
| 448 |
break;
|
| 449 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 450 |
case 'calibration_done':
|
| 451 |
this.calibration.collecting = false;
|
|
|
|
| 452 |
this.calibration.done = true;
|
| 453 |
this.calibration.success = data.success;
|
| 454 |
if (this.callbacks.onCalibrationUpdate) {
|
|
@@ -463,7 +528,7 @@ export class VideoManagerLocal {
|
|
| 463 |
break;
|
| 464 |
|
| 465 |
case 'calibration_cancelled':
|
| 466 |
-
this.calibration =
|
| 467 |
if (this.callbacks.onCalibrationUpdate) {
|
| 468 |
this.callbacks.onCalibrationUpdate({ ...this.calibration });
|
| 469 |
}
|
|
@@ -494,14 +559,19 @@ export class VideoManagerLocal {
|
|
| 494 |
if (this.ws && this.ws.readyState === WebSocket.OPEN) {
|
| 495 |
this.ws.send(JSON.stringify({ type: 'calibration_cancel' }));
|
| 496 |
}
|
| 497 |
-
this.calibration =
|
| 498 |
if (this.callbacks.onCalibrationUpdate) {
|
| 499 |
this.callbacks.onCalibrationUpdate({ ...this.calibration });
|
| 500 |
}
|
| 501 |
}
|
| 502 |
|
| 503 |
// Face mesh landmark index groups (matches live_demo.py)
|
| 504 |
-
static FACE_OVAL = [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 505 |
static LEFT_EYE = [33,7,163,144,145,153,154,155,133,173,157,158,159,160,161,246];
|
| 506 |
static RIGHT_EYE = [362,382,381,380,374,373,390,249,263,466,388,387,386,385,384,398];
|
| 507 |
static LEFT_IRIS = [468,469,470,471,472];
|
|
@@ -593,9 +663,28 @@ export class VideoManagerLocal {
|
|
| 593 |
|
| 594 |
// Irises (circles + gaze direction lines)
|
| 595 |
const irisSets = [
|
| 596 |
-
{
|
| 597 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 598 |
];
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 599 |
for (const { iris, center: centerIdx, inner, outer } of irisSets) {
|
| 600 |
const centerPt = _get(iris[0]);
|
| 601 |
if (!centerPt) continue;
|
|
@@ -614,25 +703,65 @@ export class VideoManagerLocal {
|
|
| 614 |
ctx.strokeStyle = '#FF00FF';
|
| 615 |
ctx.lineWidth = 2;
|
| 616 |
ctx.stroke();
|
| 617 |
-
// Iris center dot
|
| 618 |
ctx.beginPath();
|
| 619 |
-
ctx.arc(cx, cy,
|
| 620 |
-
ctx.fillStyle =
|
| 621 |
ctx.fill();
|
| 622 |
-
|
| 623 |
-
|
| 624 |
-
|
| 625 |
-
|
| 626 |
-
|
| 627 |
-
|
| 628 |
-
|
| 629 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 630 |
ctx.beginPath();
|
| 631 |
ctx.moveTo(cx, cy);
|
| 632 |
-
ctx.lineTo(
|
| 633 |
-
ctx.strokeStyle =
|
| 634 |
-
ctx.lineWidth =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 635 |
ctx.stroke();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 636 |
}
|
| 637 |
}
|
| 638 |
}
|
|
@@ -661,77 +790,6 @@ export class VideoManagerLocal {
|
|
| 661 |
this.currentStatus = false;
|
| 662 |
}
|
| 663 |
|
| 664 |
-
this.handleNotificationLogic(previousStatus, this.currentStatus);
|
| 665 |
-
}
|
| 666 |
-
|
| 667 |
-
handleNotificationLogic(previousStatus, currentStatus) {
|
| 668 |
-
const now = Date.now();
|
| 669 |
-
|
| 670 |
-
if (previousStatus && !currentStatus) {
|
| 671 |
-
this.unfocusedStartTime = now;
|
| 672 |
-
}
|
| 673 |
-
|
| 674 |
-
if (!previousStatus && currentStatus) {
|
| 675 |
-
this.unfocusedStartTime = null;
|
| 676 |
-
}
|
| 677 |
-
|
| 678 |
-
if (!currentStatus && this.unfocusedStartTime) {
|
| 679 |
-
const unfocusedDuration = (now - this.unfocusedStartTime) / 1000;
|
| 680 |
-
|
| 681 |
-
if (unfocusedDuration >= this.notificationThreshold) {
|
| 682 |
-
const canSendNotification = !this.lastNotificationTime ||
|
| 683 |
-
(now - this.lastNotificationTime) >= this.notificationCooldown;
|
| 684 |
-
|
| 685 |
-
if (canSendNotification) {
|
| 686 |
-
this.sendNotification(
|
| 687 |
-
'Focus Alert',
|
| 688 |
-
`You've been distracted for ${Math.floor(unfocusedDuration)} seconds. Get back to work!`
|
| 689 |
-
);
|
| 690 |
-
this.lastNotificationTime = now;
|
| 691 |
-
}
|
| 692 |
-
}
|
| 693 |
-
}
|
| 694 |
-
}
|
| 695 |
-
|
| 696 |
-
async requestNotificationPermission() {
|
| 697 |
-
if ('Notification' in window && Notification.permission === 'default') {
|
| 698 |
-
try {
|
| 699 |
-
await Notification.requestPermission();
|
| 700 |
-
} catch (error) {
|
| 701 |
-
console.error('Failed to request notification permission:', error);
|
| 702 |
-
}
|
| 703 |
-
}
|
| 704 |
-
}
|
| 705 |
-
|
| 706 |
-
async loadNotificationSettings() {
|
| 707 |
-
try {
|
| 708 |
-
const response = await fetch('/api/settings');
|
| 709 |
-
const settings = await response.json();
|
| 710 |
-
if (settings) {
|
| 711 |
-
this.notificationEnabled = settings.notification_enabled ?? true;
|
| 712 |
-
this.notificationThreshold = settings.notification_threshold ?? 30;
|
| 713 |
-
}
|
| 714 |
-
} catch (error) {
|
| 715 |
-
console.error('Failed to load notification settings:', error);
|
| 716 |
-
}
|
| 717 |
-
}
|
| 718 |
-
|
| 719 |
-
sendNotification(title, message) {
|
| 720 |
-
if (!this.notificationEnabled) return;
|
| 721 |
-
if ('Notification' in window && Notification.permission === 'granted') {
|
| 722 |
-
try {
|
| 723 |
-
const notification = new Notification(title, {
|
| 724 |
-
body: message,
|
| 725 |
-
icon: '/vite.svg',
|
| 726 |
-
badge: '/vite.svg',
|
| 727 |
-
tag: 'focus-guard-distraction',
|
| 728 |
-
requireInteraction: false
|
| 729 |
-
});
|
| 730 |
-
setTimeout(() => notification.close(), 3000);
|
| 731 |
-
} catch (error) {
|
| 732 |
-
console.error('Failed to send notification:', error);
|
| 733 |
-
}
|
| 734 |
-
}
|
| 735 |
}
|
| 736 |
|
| 737 |
async stopStreaming() {
|
|
@@ -821,10 +879,6 @@ export class VideoManagerLocal {
|
|
| 821 |
ctx.clearRect(0, 0, this.displayCanvas.width, this.displayCanvas.height);
|
| 822 |
}
|
| 823 |
|
| 824 |
-
// Reset transient state
|
| 825 |
-
this.unfocusedStartTime = null;
|
| 826 |
-
this.lastNotificationTime = null;
|
| 827 |
-
|
| 828 |
console.log('Streaming stopped');
|
| 829 |
console.log('Stats:', this.stats);
|
| 830 |
}
|
|
|
|
| 1 |
// src/utils/VideoManagerLocal.js
|
| 2 |
// Local video processing implementation using WebSocket + Canvas, without WebRTC.
|
| 3 |
|
| 4 |
+
const DEFAULT_CALIBRATION_STATE = Object.freeze({
|
| 5 |
+
active: false,
|
| 6 |
+
collecting: false,
|
| 7 |
+
target: null,
|
| 8 |
+
index: 0,
|
| 9 |
+
numPoints: 0,
|
| 10 |
+
done: false,
|
| 11 |
+
success: false,
|
| 12 |
+
});
|
| 13 |
+
|
| 14 |
+
const createCalibrationState = (overrides = {}) => ({
|
| 15 |
+
...DEFAULT_CALIBRATION_STATE,
|
| 16 |
+
...overrides,
|
| 17 |
+
});
|
| 18 |
+
|
| 19 |
+
const formatSignedInt = (value) => `${value > 0 ? '+' : ''}${value.toFixed(0)}`;
|
| 20 |
+
|
| 21 |
+
const buildHeadPoseText = (data) => (
|
| 22 |
+
`yaw:${formatSignedInt(data.yaw)} `
|
| 23 |
+
+ `pitch:${formatSignedInt(data.pitch)} `
|
| 24 |
+
+ `roll:${formatSignedInt(data.roll)}`
|
| 25 |
+
);
|
| 26 |
+
|
| 27 |
export class VideoManagerLocal {
|
| 28 |
constructor(callbacks) {
|
| 29 |
this.callbacks = callbacks || {};
|
|
|
|
| 56 |
// Continuous render loop
|
| 57 |
this._animFrameId = null;
|
| 58 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
// Calibration state
|
| 60 |
+
this.calibration = createCalibrationState();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
|
| 62 |
// Performance metrics
|
| 63 |
this.stats = {
|
|
|
|
| 130 |
}
|
| 131 |
}
|
| 132 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 133 |
// Open the WebSocket connection
|
| 134 |
await this.connectWebSocket();
|
| 135 |
|
|
|
|
| 211 |
|
| 212 |
socket.onerror = () => {
|
| 213 |
console.error('WebSocket error:', { url: wsUrl, readyState: socket.readyState });
|
| 214 |
+
rejectWithMessage(
|
| 215 |
+
`Failed to connect to ${wsUrl}. `
|
| 216 |
+
+ 'Check that the backend server is running and reachable.'
|
| 217 |
+
);
|
| 218 |
};
|
| 219 |
|
| 220 |
socket.onclose = (event) => {
|
|
|
|
| 224 |
}
|
| 225 |
|
| 226 |
if (!opened) {
|
| 227 |
+
rejectWithMessage(
|
| 228 |
+
`WebSocket closed before connection was established `
|
| 229 |
+
+ `(${event.code || 'no code'}). `
|
| 230 |
+
+ 'Check that the backend server is running on the expected port.'
|
| 231 |
+
);
|
| 232 |
return;
|
| 233 |
}
|
| 234 |
|
|
|
|
| 330 |
ctx.fillStyle = '#B4B4B4';
|
| 331 |
ctx.font = '11px Arial';
|
| 332 |
ctx.textAlign = 'right';
|
| 333 |
+
ctx.fillText(buildHeadPoseText(data), w - 10, 48);
|
| 334 |
ctx.textAlign = 'left';
|
| 335 |
}
|
| 336 |
}
|
| 337 |
+
// Gaze minimap — small screen representation in bottom-right corner
|
| 338 |
if (data && data.gaze_x !== undefined && data.gaze_y !== undefined) {
|
| 339 |
+
const mapW = 120;
|
| 340 |
+
const mapH = 80;
|
| 341 |
+
const mapPad = 10;
|
| 342 |
+
const mapX = w - mapW - mapPad;
|
| 343 |
+
const mapY = h - mapH - 30; // above the performance stats bar
|
| 344 |
+
|
| 345 |
+
// Background (rounded rect with fallback)
|
| 346 |
+
ctx.fillStyle = 'rgba(0, 0, 0, 0.7)';
|
| 347 |
+
ctx.beginPath();
|
| 348 |
+
if (ctx.roundRect) {
|
| 349 |
+
ctx.roundRect(mapX - 4, mapY - 4, mapW + 8, mapH + 8, 6);
|
| 350 |
+
} else {
|
| 351 |
+
ctx.rect(mapX - 4, mapY - 4, mapW + 8, mapH + 8);
|
| 352 |
+
}
|
| 353 |
+
ctx.fill();
|
| 354 |
+
|
| 355 |
+
// Screen area
|
| 356 |
+
ctx.fillStyle = data.on_screen ? 'rgba(30, 40, 60, 0.9)' : 'rgba(60, 20, 20, 0.9)';
|
| 357 |
+
ctx.fillRect(mapX, mapY, mapW, mapH);
|
| 358 |
+
ctx.strokeStyle = data.on_screen ? 'rgba(100, 180, 255, 0.6)' : 'rgba(255, 100, 100, 0.6)';
|
| 359 |
+
ctx.lineWidth = 1;
|
| 360 |
+
ctx.strokeRect(mapX, mapY, mapW, mapH);
|
| 361 |
+
|
| 362 |
+
// Gaze dot — clamp to minimap bounds for visibility
|
| 363 |
+
const dotX = mapX + Math.max(0, Math.min(1, data.gaze_x)) * mapW;
|
| 364 |
+
const dotY = mapY + Math.max(0, Math.min(1, data.gaze_y)) * mapH;
|
| 365 |
+
const dotColor = data.on_screen ? '#00FF00' : '#FF4444';
|
| 366 |
+
|
| 367 |
+
// Glow
|
| 368 |
+
ctx.beginPath();
|
| 369 |
+
ctx.arc(dotX, dotY, 8, 0, 2 * Math.PI);
|
| 370 |
+
ctx.fillStyle = data.on_screen ? 'rgba(0, 255, 0, 0.15)' : 'rgba(255, 68, 68, 0.15)';
|
| 371 |
+
ctx.fill();
|
| 372 |
+
|
| 373 |
+
// Dot
|
| 374 |
ctx.beginPath();
|
| 375 |
+
ctx.arc(dotX, dotY, 4, 0, 2 * Math.PI);
|
| 376 |
+
ctx.fillStyle = dotColor;
|
| 377 |
ctx.fill();
|
| 378 |
ctx.strokeStyle = '#FFFFFF';
|
| 379 |
+
ctx.lineWidth = 1.5;
|
| 380 |
ctx.stroke();
|
| 381 |
+
|
| 382 |
+
// Label
|
| 383 |
+
ctx.fillStyle = 'rgba(255, 255, 255, 0.6)';
|
| 384 |
+
ctx.font = '9px Arial';
|
| 385 |
+
ctx.textAlign = 'left';
|
| 386 |
+
ctx.fillText('GAZE', mapX + 3, mapY + 10);
|
| 387 |
}
|
| 388 |
|
| 389 |
// Performance stats
|
|
|
|
| 426 |
const latency = now - this._lastSendTime;
|
| 427 |
this.stats.lastLatencies.push(latency);
|
| 428 |
if (this.stats.lastLatencies.length > 10) this.stats.lastLatencies.shift();
|
| 429 |
+
const latencySum = this.stats.lastLatencies.reduce((a, b) => a + b, 0);
|
| 430 |
+
this.stats.avgLatency = latencySum / this.stats.lastLatencies.length;
|
| 431 |
}
|
| 432 |
|
| 433 |
this.updateStatus(data.focused);
|
|
|
|
| 458 |
gaze_x: data.gaze_x,
|
| 459 |
gaze_y: data.gaze_y,
|
| 460 |
on_screen: data.on_screen,
|
| 461 |
+
gaze_yaw: data.gaze_yaw,
|
| 462 |
+
gaze_pitch: data.gaze_pitch,
|
| 463 |
};
|
| 464 |
this.drawDetectionResult(detectionData);
|
| 465 |
break;
|
|
|
|
| 500 |
}
|
| 501 |
break;
|
| 502 |
|
| 503 |
+
case 'calibration_verify':
|
| 504 |
+
this.calibration.collecting = true;
|
| 505 |
+
this.calibration.target = data.target;
|
| 506 |
+
this.calibration.index = -1; // special: verification phase
|
| 507 |
+
this.calibration.verifying = true;
|
| 508 |
+
this.calibration.verifyMessage = data.message || 'Verify calibration';
|
| 509 |
+
if (this.callbacks.onCalibrationUpdate) {
|
| 510 |
+
this.callbacks.onCalibrationUpdate({ ...this.calibration });
|
| 511 |
+
}
|
| 512 |
+
break;
|
| 513 |
+
|
| 514 |
case 'calibration_done':
|
| 515 |
this.calibration.collecting = false;
|
| 516 |
+
this.calibration.verifying = false;
|
| 517 |
this.calibration.done = true;
|
| 518 |
this.calibration.success = data.success;
|
| 519 |
if (this.callbacks.onCalibrationUpdate) {
|
|
|
|
| 528 |
break;
|
| 529 |
|
| 530 |
case 'calibration_cancelled':
|
| 531 |
+
this.calibration = createCalibrationState();
|
| 532 |
if (this.callbacks.onCalibrationUpdate) {
|
| 533 |
this.callbacks.onCalibrationUpdate({ ...this.calibration });
|
| 534 |
}
|
|
|
|
| 559 |
if (this.ws && this.ws.readyState === WebSocket.OPEN) {
|
| 560 |
this.ws.send(JSON.stringify({ type: 'calibration_cancel' }));
|
| 561 |
}
|
| 562 |
+
this.calibration = createCalibrationState();
|
| 563 |
if (this.callbacks.onCalibrationUpdate) {
|
| 564 |
this.callbacks.onCalibrationUpdate({ ...this.calibration });
|
| 565 |
}
|
| 566 |
}
|
| 567 |
|
| 568 |
// Face mesh landmark index groups (matches live_demo.py)
|
| 569 |
+
static FACE_OVAL = [
|
| 570 |
+
10, 338, 297, 332, 284, 251, 389, 356, 454,
|
| 571 |
+
323, 361, 288, 397, 365, 379, 378, 400, 377,
|
| 572 |
+
152, 148, 176, 149, 150, 136, 172, 58, 132,
|
| 573 |
+
93, 234, 127, 162, 21, 54, 103, 67, 109, 10,
|
| 574 |
+
];
|
| 575 |
static LEFT_EYE = [33,7,163,144,145,153,154,155,133,173,157,158,159,160,161,246];
|
| 576 |
static RIGHT_EYE = [362,382,381,380,374,373,390,249,263,466,388,387,386,385,384,398];
|
| 577 |
static LEFT_IRIS = [468,469,470,471,472];
|
|
|
|
| 663 |
|
| 664 |
// Irises (circles + gaze direction lines)
|
| 665 |
const irisSets = [
|
| 666 |
+
{
|
| 667 |
+
iris: VideoManagerLocal.LEFT_IRIS,
|
| 668 |
+
center: VideoManagerLocal.LEFT_IRIS_CENTER,
|
| 669 |
+
inner: VideoManagerLocal.LEFT_EYE_INNER,
|
| 670 |
+
outer: VideoManagerLocal.LEFT_EYE_OUTER,
|
| 671 |
+
},
|
| 672 |
+
{
|
| 673 |
+
iris: VideoManagerLocal.RIGHT_IRIS,
|
| 674 |
+
center: VideoManagerLocal.RIGHT_IRIS_CENTER,
|
| 675 |
+
inner: VideoManagerLocal.RIGHT_EYE_INNER,
|
| 676 |
+
outer: VideoManagerLocal.RIGHT_EYE_OUTER,
|
| 677 |
+
},
|
| 678 |
];
|
| 679 |
+
// Get L2CS gaze angles + on_screen state from latest detection data
|
| 680 |
+
const detection = this._lastDetection;
|
| 681 |
+
const gazeYaw = detection ? detection.gaze_yaw : undefined;
|
| 682 |
+
const gazePitch = detection ? detection.gaze_pitch : undefined;
|
| 683 |
+
const onScreen = detection ? detection.on_screen : undefined;
|
| 684 |
+
const hasL2CSGaze = gazeYaw !== undefined && gazePitch !== undefined;
|
| 685 |
+
const gazeLineColor = (onScreen === false) ? '#FF0000' : '#00FF00';
|
| 686 |
+
const gazeLineLength = 100;
|
| 687 |
+
|
| 688 |
for (const { iris, center: centerIdx, inner, outer } of irisSets) {
|
| 689 |
const centerPt = _get(iris[0]);
|
| 690 |
if (!centerPt) continue;
|
|
|
|
| 703 |
ctx.strokeStyle = '#FF00FF';
|
| 704 |
ctx.lineWidth = 2;
|
| 705 |
ctx.stroke();
|
| 706 |
+
// Iris center anchor dot (color-coded)
|
| 707 |
ctx.beginPath();
|
| 708 |
+
ctx.arc(cx, cy, 3, 0, 2 * Math.PI);
|
| 709 |
+
ctx.fillStyle = gazeLineColor;
|
| 710 |
ctx.fill();
|
| 711 |
+
ctx.strokeStyle = '#FFFFFF';
|
| 712 |
+
ctx.lineWidth = 1;
|
| 713 |
+
ctx.stroke();
|
| 714 |
+
|
| 715 |
+
// Gaze direction line — use L2CS angles when available, else geometric fallback
|
| 716 |
+
if (hasL2CSGaze) {
|
| 717 |
+
// L2CS pitch/yaw in radians -> pixel direction vector
|
| 718 |
+
// Matches upstream L2CS-Net vis.py draw_gaze formula:
|
| 719 |
+
// dx = -length * sin(pitch) * cos(yaw)
|
| 720 |
+
// dy = -length * sin(yaw)
|
| 721 |
+
const dx = -gazeLineLength * Math.sin(gazePitch) * Math.cos(gazeYaw);
|
| 722 |
+
const dy = -gazeLineLength * Math.sin(gazeYaw);
|
| 723 |
+
const ex = cx + dx;
|
| 724 |
+
const ey = cy + dy;
|
| 725 |
+
|
| 726 |
+
// Main gaze line (thick, color-coded)
|
| 727 |
ctx.beginPath();
|
| 728 |
ctx.moveTo(cx, cy);
|
| 729 |
+
ctx.lineTo(ex, ey);
|
| 730 |
+
ctx.strokeStyle = gazeLineColor;
|
| 731 |
+
ctx.lineWidth = 3;
|
| 732 |
+
ctx.stroke();
|
| 733 |
+
|
| 734 |
+
// Arrowhead
|
| 735 |
+
const angle = Math.atan2(ey - cy, ex - cx);
|
| 736 |
+
const arrowLen = 10;
|
| 737 |
+
ctx.beginPath();
|
| 738 |
+
ctx.moveTo(ex, ey);
|
| 739 |
+
ctx.lineTo(ex - arrowLen * Math.cos(angle - 0.4), ey - arrowLen * Math.sin(angle - 0.4));
|
| 740 |
+
ctx.moveTo(ex, ey);
|
| 741 |
+
ctx.lineTo(ex - arrowLen * Math.cos(angle + 0.4), ey - arrowLen * Math.sin(angle + 0.4));
|
| 742 |
+
ctx.strokeStyle = gazeLineColor;
|
| 743 |
+
ctx.lineWidth = 2;
|
| 744 |
ctx.stroke();
|
| 745 |
+
} else {
|
| 746 |
+
// Geometric fallback: iris displacement from eye center (scaled up)
|
| 747 |
+
const innerPt = _get(inner);
|
| 748 |
+
const outerPt = _get(outer);
|
| 749 |
+
if (innerPt && outerPt) {
|
| 750 |
+
const eyeCx = (innerPt[0] + outerPt[0]) / 2.0 * w;
|
| 751 |
+
const eyeCy = (innerPt[1] + outerPt[1]) / 2.0 * h;
|
| 752 |
+
const fdx = cx - eyeCx;
|
| 753 |
+
const fdy = cy - eyeCy;
|
| 754 |
+
const flen = Math.hypot(fdx, fdy);
|
| 755 |
+
if (flen > 0.5) {
|
| 756 |
+
const scale = gazeLineLength / flen;
|
| 757 |
+
ctx.beginPath();
|
| 758 |
+
ctx.moveTo(cx, cy);
|
| 759 |
+
ctx.lineTo(cx + fdx * scale, cy + fdy * scale);
|
| 760 |
+
ctx.strokeStyle = '#00FFFF';
|
| 761 |
+
ctx.lineWidth = 2;
|
| 762 |
+
ctx.stroke();
|
| 763 |
+
}
|
| 764 |
+
}
|
| 765 |
}
|
| 766 |
}
|
| 767 |
}
|
|
|
|
| 790 |
this.currentStatus = false;
|
| 791 |
}
|
| 792 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 793 |
}
|
| 794 |
|
| 795 |
async stopStreaming() {
|
|
|
|
| 879 |
ctx.clearRect(0, 0, this.displayCanvas.width, this.displayCanvas.height);
|
| 880 |
}
|
| 881 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 882 |
console.log('Streaming stopped');
|
| 883 |
console.log('Stats:', this.stats);
|
| 884 |
}
|
tests/test_gaze_pipeline.py
ADDED
|
@@ -0,0 +1,363 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Diagnostic test for the full gaze pipeline:
|
| 3 |
+
calibration → predict → fusion → focus decision
|
| 4 |
+
|
| 5 |
+
Tests that looking at screen center reads as focused,
|
| 6 |
+
and looking away reads as not focused.
|
| 7 |
+
"""
|
| 8 |
+
import math
|
| 9 |
+
import sys
|
| 10 |
+
import os
|
| 11 |
+
import numpy as np
|
| 12 |
+
|
| 13 |
+
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
| 14 |
+
|
| 15 |
+
from models.gaze_calibration import GazeCalibration, DEFAULT_TARGETS
|
| 16 |
+
from models.gaze_eye_fusion import GazeEyeFusion
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def _make_landmarks_with_ear(ear_value=0.28):
|
| 20 |
+
"""Create a minimal 478-landmark array with given EAR.
|
| 21 |
+
Only the EAR indices (6 per eye) and iris indices need real values."""
|
| 22 |
+
lm = np.full((478, 3), 0.5, dtype=np.float32)
|
| 23 |
+
# Left eye EAR landmarks [33, 160, 158, 133, 153, 145]
|
| 24 |
+
# p1=33, p2=160, p3=158, p4=133, p5=153, p6=145
|
| 25 |
+
# EAR = (|p2-p6| + |p3-p5|) / (2 * |p1-p4|)
|
| 26 |
+
# Set them so EAR ≈ ear_value with horizontal dist = 0.1
|
| 27 |
+
h_dist = 0.1
|
| 28 |
+
v_dist = ear_value * h_dist # EAR = v_dist / h_dist when both verticals equal
|
| 29 |
+
lm[33] = [0.4, 0.5, 0] # p1 outer
|
| 30 |
+
lm[133] = [0.5, 0.5, 0] # p4 inner
|
| 31 |
+
lm[160] = [0.45, 0.5 - v_dist/2, 0] # p2 top
|
| 32 |
+
lm[145] = [0.45, 0.5 + v_dist/2, 0] # p6 bottom
|
| 33 |
+
lm[158] = [0.45, 0.5 - v_dist/2, 0] # p3 top
|
| 34 |
+
lm[153] = [0.45, 0.5 + v_dist/2, 0] # p5 bottom
|
| 35 |
+
# Right eye — mirror
|
| 36 |
+
lm[362] = [0.6, 0.5, 0]
|
| 37 |
+
lm[263] = [0.5, 0.5, 0]
|
| 38 |
+
lm[385] = [0.55, 0.5 - v_dist/2, 0]
|
| 39 |
+
lm[380] = [0.55, 0.5 + v_dist/2, 0]
|
| 40 |
+
lm[387] = [0.55, 0.5 - v_dist/2, 0]
|
| 41 |
+
lm[373] = [0.55, 0.5 + v_dist/2, 0]
|
| 42 |
+
return lm
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def simulate_calibration(noise_std=0.01):
|
| 46 |
+
"""Simulate a 9-point calibration where the user looks at each target.
|
| 47 |
+
|
| 48 |
+
For each target (screen_x, screen_y), we generate synthetic gaze angles:
|
| 49 |
+
yaw ≈ (screen_x - 0.5) * 0.7 radians (maps 0..1 to roughly ±20°)
|
| 50 |
+
pitch ≈ (screen_y - 0.5) * 0.5 radians (maps 0..1 to roughly ±15°)
|
| 51 |
+
Plus some noise to simulate real jitter.
|
| 52 |
+
"""
|
| 53 |
+
cal = GazeCalibration()
|
| 54 |
+
|
| 55 |
+
# Simulate gaze angle for a given screen target
|
| 56 |
+
def target_to_gaze(tx, ty):
|
| 57 |
+
yaw = (tx - 0.5) * 0.7 # ~±20° across screen width
|
| 58 |
+
pitch = (ty - 0.5) * 0.5 # ~±14° across screen height
|
| 59 |
+
return yaw, pitch
|
| 60 |
+
|
| 61 |
+
for i, (tx, ty) in enumerate(DEFAULT_TARGETS):
|
| 62 |
+
base_yaw, base_pitch = target_to_gaze(tx, ty)
|
| 63 |
+
n_samples = 45 if i == 0 else 30 # center gets more
|
| 64 |
+
for _ in range(n_samples):
|
| 65 |
+
yaw = base_yaw + np.random.normal(0, noise_std)
|
| 66 |
+
pitch = base_pitch + np.random.normal(0, noise_std)
|
| 67 |
+
cal.collect_sample(yaw, pitch)
|
| 68 |
+
cal.advance()
|
| 69 |
+
|
| 70 |
+
ok = cal.fit()
|
| 71 |
+
return cal, ok
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def test_calibration_accuracy():
|
| 75 |
+
"""Test that calibration maps screen positions correctly."""
|
| 76 |
+
print("\n" + "="*60)
|
| 77 |
+
print("TEST 1: Calibration accuracy")
|
| 78 |
+
print("="*60)
|
| 79 |
+
|
| 80 |
+
np.random.seed(42)
|
| 81 |
+
cal, ok = simulate_calibration(noise_std=0.008)
|
| 82 |
+
assert ok, "Calibration fit failed!"
|
| 83 |
+
print(f" Calibration fitted: {ok}")
|
| 84 |
+
|
| 85 |
+
# Test prediction at each target
|
| 86 |
+
max_error = 0
|
| 87 |
+
for tx, ty in DEFAULT_TARGETS:
|
| 88 |
+
yaw = (tx - 0.5) * 0.7
|
| 89 |
+
pitch = (ty - 0.5) * 0.5
|
| 90 |
+
px, py = cal.predict(yaw, pitch)
|
| 91 |
+
err = math.sqrt((px - tx)**2 + (py - ty)**2)
|
| 92 |
+
max_error = max(max_error, err)
|
| 93 |
+
status = "OK" if err < 0.1 else "BAD"
|
| 94 |
+
print(f" Target ({tx:.2f},{ty:.2f}) → Predicted ({px:.3f},{py:.3f}) "
|
| 95 |
+
f"error={err:.4f} [{status}]")
|
| 96 |
+
|
| 97 |
+
print(f"\n Max error: {max_error:.4f}")
|
| 98 |
+
assert max_error < 0.15, f"Calibration error too high: {max_error:.4f}"
|
| 99 |
+
print(" PASSED")
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def test_fusion_focused_at_center():
|
| 103 |
+
"""Test that looking at screen center = focused."""
|
| 104 |
+
print("\n" + "="*60)
|
| 105 |
+
print("TEST 2: Looking at screen center → FOCUSED")
|
| 106 |
+
print("="*60)
|
| 107 |
+
|
| 108 |
+
np.random.seed(42)
|
| 109 |
+
cal, ok = simulate_calibration()
|
| 110 |
+
assert ok
|
| 111 |
+
|
| 112 |
+
fusion = GazeEyeFusion(cal)
|
| 113 |
+
lm = _make_landmarks_with_ear(0.28) # eyes open
|
| 114 |
+
|
| 115 |
+
# Looking at center: yaw≈0, pitch≈0
|
| 116 |
+
center_yaw = (0.5 - 0.5) * 0.7 # = 0
|
| 117 |
+
center_pitch = (0.5 - 0.5) * 0.5 # = 0
|
| 118 |
+
|
| 119 |
+
# Run a few frames to let EMA settle
|
| 120 |
+
for i in range(10):
|
| 121 |
+
result = fusion.update(center_yaw, center_pitch, lm)
|
| 122 |
+
|
| 123 |
+
print(f" gaze_x={result['gaze_x']:.3f} gaze_y={result['gaze_y']:.3f}")
|
| 124 |
+
print(f" on_screen={result['on_screen']}")
|
| 125 |
+
print(f" focus_score={result['focus_score']:.3f} (threshold=0.42)")
|
| 126 |
+
print(f" focused={result['focused']}")
|
| 127 |
+
print(f" ear={result['ear']:.4f}")
|
| 128 |
+
|
| 129 |
+
assert result["on_screen"], "Should be on screen!"
|
| 130 |
+
assert result["focused"], f"Should be focused! score={result['focus_score']}"
|
| 131 |
+
assert 0.35 < result["gaze_x"] < 0.65, f"gaze_x should be near 0.5, got {result['gaze_x']}"
|
| 132 |
+
assert 0.35 < result["gaze_y"] < 0.65, f"gaze_y should be near 0.5, got {result['gaze_y']}"
|
| 133 |
+
print(" PASSED")
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def test_fusion_focused_at_edges():
|
| 137 |
+
"""Test that looking at screen edges still = focused."""
|
| 138 |
+
print("\n" + "="*60)
|
| 139 |
+
print("TEST 3: Looking at screen edges → FOCUSED")
|
| 140 |
+
print("="*60)
|
| 141 |
+
|
| 142 |
+
np.random.seed(42)
|
| 143 |
+
cal, ok = simulate_calibration()
|
| 144 |
+
assert ok
|
| 145 |
+
lm = _make_landmarks_with_ear(0.28)
|
| 146 |
+
|
| 147 |
+
edge_targets = [
|
| 148 |
+
(0.15, 0.15, "top-left"),
|
| 149 |
+
(0.85, 0.15, "top-right"),
|
| 150 |
+
(0.15, 0.85, "bottom-left"),
|
| 151 |
+
(0.85, 0.85, "bottom-right"),
|
| 152 |
+
(0.5, 0.15, "top-center"),
|
| 153 |
+
(0.5, 0.85, "bottom-center"),
|
| 154 |
+
]
|
| 155 |
+
|
| 156 |
+
all_pass = True
|
| 157 |
+
for tx, ty, label in edge_targets:
|
| 158 |
+
fusion = GazeEyeFusion(cal)
|
| 159 |
+
yaw = (tx - 0.5) * 0.7
|
| 160 |
+
pitch = (ty - 0.5) * 0.5
|
| 161 |
+
for _ in range(10):
|
| 162 |
+
result = fusion.update(yaw, pitch, lm)
|
| 163 |
+
|
| 164 |
+
status = "PASS" if result["focused"] else "FAIL"
|
| 165 |
+
if not result["focused"]:
|
| 166 |
+
all_pass = False
|
| 167 |
+
print(f" {label:15s} → gaze=({result['gaze_x']:.3f},{result['gaze_y']:.3f}) "
|
| 168 |
+
f"on_screen={result['on_screen']} score={result['focus_score']:.3f} "
|
| 169 |
+
f"[{status}]")
|
| 170 |
+
|
| 171 |
+
assert all_pass, "Some edge positions reported unfocused!"
|
| 172 |
+
print(" PASSED")
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def test_fusion_unfocused_off_screen():
|
| 176 |
+
"""Test that looking far away = not focused."""
|
| 177 |
+
print("\n" + "="*60)
|
| 178 |
+
print("TEST 4: Looking far off screen → NOT FOCUSED")
|
| 179 |
+
print("="*60)
|
| 180 |
+
|
| 181 |
+
np.random.seed(42)
|
| 182 |
+
cal, ok = simulate_calibration()
|
| 183 |
+
assert ok
|
| 184 |
+
lm = _make_landmarks_with_ear(0.28)
|
| 185 |
+
|
| 186 |
+
off_screen_targets = [
|
| 187 |
+
(2.0, 0.5, "far right"),
|
| 188 |
+
(-1.0, 0.5, "far left"),
|
| 189 |
+
(0.5, 2.0, "far down"),
|
| 190 |
+
(0.5, -1.0, "far up"),
|
| 191 |
+
]
|
| 192 |
+
|
| 193 |
+
all_pass = True
|
| 194 |
+
for tx, ty, label in off_screen_targets:
|
| 195 |
+
fusion = GazeEyeFusion(cal)
|
| 196 |
+
yaw = (tx - 0.5) * 0.7
|
| 197 |
+
pitch = (ty - 0.5) * 0.5
|
| 198 |
+
for _ in range(10):
|
| 199 |
+
result = fusion.update(yaw, pitch, lm)
|
| 200 |
+
|
| 201 |
+
status = "PASS" if not result["focused"] else "FAIL"
|
| 202 |
+
if result["focused"]:
|
| 203 |
+
all_pass = False
|
| 204 |
+
print(f" {label:15s} → gaze=({result['gaze_x']:.3f},{result['gaze_y']:.3f}) "
|
| 205 |
+
f"on_screen={result['on_screen']} score={result['focus_score']:.3f} "
|
| 206 |
+
f"[{status}]")
|
| 207 |
+
|
| 208 |
+
assert all_pass, "Some off-screen positions reported focused!"
|
| 209 |
+
print(" PASSED")
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
def test_fusion_with_closed_eyes():
|
| 213 |
+
"""Test that sustained closed eyes = not focused, but brief blinks are OK."""
|
| 214 |
+
print("\n" + "="*60)
|
| 215 |
+
print("TEST 5: Sustained closed eyes → NOT FOCUSED, brief blink → still FOCUSED")
|
| 216 |
+
print("="*60)
|
| 217 |
+
|
| 218 |
+
np.random.seed(42)
|
| 219 |
+
cal, ok = simulate_calibration()
|
| 220 |
+
assert ok
|
| 221 |
+
|
| 222 |
+
lm_closed = _make_landmarks_with_ear(0.10) # eyes almost closed
|
| 223 |
+
lm_open = _make_landmarks_with_ear(0.28)
|
| 224 |
+
|
| 225 |
+
# 5a: Brief blink (2 frames closed) should NOT trigger unfocused
|
| 226 |
+
fusion = GazeEyeFusion(cal)
|
| 227 |
+
for _ in range(8):
|
| 228 |
+
fusion.update(0, 0, lm_open)
|
| 229 |
+
for _ in range(2): # 2-frame blink
|
| 230 |
+
result = fusion.update(0, 0, lm_closed)
|
| 231 |
+
print(f" Brief blink (2 frames): focused={result['focused']} score={result['focus_score']:.3f}")
|
| 232 |
+
assert result["focused"], "Brief blink should NOT trigger unfocused!"
|
| 233 |
+
|
| 234 |
+
# 5b: Sustained closure (6+ frames) SHOULD trigger unfocused
|
| 235 |
+
fusion2 = GazeEyeFusion(cal)
|
| 236 |
+
for _ in range(10):
|
| 237 |
+
result2 = fusion2.update(0, 0, lm_closed)
|
| 238 |
+
|
| 239 |
+
print(f" Sustained closure (10 frames): focused={result2['focused']} score={result2['focus_score']:.3f}")
|
| 240 |
+
assert not result2["focused"], f"Sustained closed eyes should be unfocused! score={result2['focus_score']}"
|
| 241 |
+
print(" PASSED")
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
def test_l2cs_cosine_scoring():
|
| 245 |
+
"""Test the L2CSPipeline cosine scoring directly."""
|
| 246 |
+
print("\n" + "="*60)
|
| 247 |
+
print("TEST 6: L2CS cosine scoring (no calibration)")
|
| 248 |
+
print("="*60)
|
| 249 |
+
|
| 250 |
+
YAW_THRESHOLD = 22.0
|
| 251 |
+
PITCH_THRESHOLD = 20.0
|
| 252 |
+
|
| 253 |
+
test_angles = [
|
| 254 |
+
(0, 0, "dead center"),
|
| 255 |
+
(5, 3, "slightly off"),
|
| 256 |
+
(10, 8, "moderate off"),
|
| 257 |
+
(15, 12, "near edge"),
|
| 258 |
+
(20, 18, "at threshold"),
|
| 259 |
+
(25, 22, "beyond threshold"),
|
| 260 |
+
(35, 30, "way off"),
|
| 261 |
+
]
|
| 262 |
+
|
| 263 |
+
for yaw_deg, pitch_deg, label in test_angles:
|
| 264 |
+
yaw_t = min(yaw_deg / YAW_THRESHOLD, 1.0)
|
| 265 |
+
pitch_t = min(pitch_deg / PITCH_THRESHOLD, 1.0)
|
| 266 |
+
yaw_score = 0.5 * (1.0 + math.cos(math.pi * yaw_t))
|
| 267 |
+
pitch_score = 0.5 * (1.0 + math.cos(math.pi * pitch_t))
|
| 268 |
+
gaze_score = 0.55 * yaw_score + 0.45 * pitch_score
|
| 269 |
+
focused = gaze_score >= 0.52
|
| 270 |
+
|
| 271 |
+
print(f" yaw={yaw_deg:3d}° pitch={pitch_deg:3d}° → "
|
| 272 |
+
f"score={gaze_score:.3f} focused={focused} [{label}]")
|
| 273 |
+
|
| 274 |
+
print(" (informational — no assertion)")
|
| 275 |
+
|
| 276 |
+
|
| 277 |
+
def test_derotation_consistency():
|
| 278 |
+
"""Test that derotation produces stable results."""
|
| 279 |
+
print("\n" + "="*60)
|
| 280 |
+
print("TEST 7: Derotation consistency")
|
| 281 |
+
print("="*60)
|
| 282 |
+
|
| 283 |
+
def _derotate_gaze(pitch_rad, yaw_rad, roll_deg):
|
| 284 |
+
roll_rad = -math.radians(roll_deg)
|
| 285 |
+
cos_r, sin_r = math.cos(roll_rad), math.sin(roll_rad)
|
| 286 |
+
return (yaw_rad * sin_r + pitch_rad * cos_r,
|
| 287 |
+
yaw_rad * cos_r - pitch_rad * sin_r)
|
| 288 |
+
|
| 289 |
+
pitch, yaw = 0.1, 0.2 # radians
|
| 290 |
+
|
| 291 |
+
results = []
|
| 292 |
+
for roll_deg in [0, 5, -5, 10, -10, 15]:
|
| 293 |
+
dr_pitch, dr_yaw = _derotate_gaze(pitch, yaw, roll_deg)
|
| 294 |
+
results.append((roll_deg, dr_pitch, dr_yaw))
|
| 295 |
+
print(f" roll={roll_deg:+4d}° → pitch={dr_pitch:.4f} yaw={dr_yaw:.4f}")
|
| 296 |
+
|
| 297 |
+
# At roll=0, should pass through unchanged
|
| 298 |
+
assert abs(results[0][1] - pitch) < 0.001, "Derotation at roll=0 should be identity for pitch"
|
| 299 |
+
# Note: derotation formula swaps pitch/yaw, so at roll=0:
|
| 300 |
+
# returns (yaw*sin(0) + pitch*cos(0), yaw*cos(0) - pitch*sin(0)) = (pitch, yaw)
|
| 301 |
+
print(f"\n Note: _derotate_gaze returns (pitch', yaw') = "
|
| 302 |
+
f"(yaw*sin(-roll) + pitch*cos(-roll), yaw*cos(-roll) - pitch*sin(-roll))")
|
| 303 |
+
print(" At roll=0: returns (pitch, yaw) — identity ✓")
|
| 304 |
+
print(" PASSED")
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
def test_calibration_with_verification_points():
|
| 308 |
+
"""Simulate a full calibration + verification workflow.
|
| 309 |
+
After calibrating, test 5 verification targets that weren't in calibration."""
|
| 310 |
+
print("\n" + "="*60)
|
| 311 |
+
print("TEST 8: Calibration + verification targets")
|
| 312 |
+
print("="*60)
|
| 313 |
+
|
| 314 |
+
np.random.seed(42)
|
| 315 |
+
cal, ok = simulate_calibration(noise_std=0.005)
|
| 316 |
+
assert ok
|
| 317 |
+
|
| 318 |
+
# Verification points NOT in the calibration grid
|
| 319 |
+
verify_targets = [
|
| 320 |
+
(0.3, 0.3, "upper-left quarter"),
|
| 321 |
+
(0.7, 0.3, "upper-right quarter"),
|
| 322 |
+
(0.5, 0.5, "dead center"),
|
| 323 |
+
(0.3, 0.7, "lower-left quarter"),
|
| 324 |
+
(0.7, 0.7, "lower-right quarter"),
|
| 325 |
+
]
|
| 326 |
+
|
| 327 |
+
lm = _make_landmarks_with_ear(0.28)
|
| 328 |
+
all_pass = True
|
| 329 |
+
|
| 330 |
+
for tx, ty, label in verify_targets:
|
| 331 |
+
fusion = GazeEyeFusion(cal)
|
| 332 |
+
yaw = (tx - 0.5) * 0.7
|
| 333 |
+
pitch = (ty - 0.5) * 0.5
|
| 334 |
+
for _ in range(15):
|
| 335 |
+
result = fusion.update(yaw, pitch, lm)
|
| 336 |
+
|
| 337 |
+
px, py = result["gaze_x"], result["gaze_y"]
|
| 338 |
+
err = math.sqrt((px - tx)**2 + (py - ty)**2)
|
| 339 |
+
status = "PASS" if result["focused"] and err < 0.2 else "FAIL"
|
| 340 |
+
if status == "FAIL":
|
| 341 |
+
all_pass = False
|
| 342 |
+
|
| 343 |
+
print(f" {label:25s} target=({tx:.1f},{ty:.1f}) → "
|
| 344 |
+
f"gaze=({px:.3f},{py:.3f}) err={err:.3f} "
|
| 345 |
+
f"focused={result['focused']} [{status}]")
|
| 346 |
+
|
| 347 |
+
assert all_pass, "Verification targets failed!"
|
| 348 |
+
print(" PASSED")
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
if __name__ == "__main__":
|
| 352 |
+
test_calibration_accuracy()
|
| 353 |
+
test_fusion_focused_at_center()
|
| 354 |
+
test_fusion_focused_at_edges()
|
| 355 |
+
test_fusion_unfocused_off_screen()
|
| 356 |
+
test_fusion_with_closed_eyes()
|
| 357 |
+
test_l2cs_cosine_scoring()
|
| 358 |
+
test_derotation_consistency()
|
| 359 |
+
test_calibration_with_verification_points()
|
| 360 |
+
|
| 361 |
+
print("\n" + "="*60)
|
| 362 |
+
print("ALL TESTS PASSED")
|
| 363 |
+
print("="*60)
|
ui/pipeline.py
CHANGED
|
@@ -22,6 +22,7 @@ from models.face_mesh import FaceMeshDetector
|
|
| 22 |
from models.head_pose import HeadPoseEstimator
|
| 23 |
from models.eye_scorer import EyeBehaviourScorer, compute_mar, MAR_YAWN_THRESHOLD
|
| 24 |
from models.collect_features import FEATURE_NAMES, TemporalTracker, extract_features
|
|
|
|
| 25 |
|
| 26 |
# Same 10 features used for MLP training (prepare_dataset) and inference
|
| 27 |
MLP_FEATURE_NAMES = SELECTED_FEATURES["face_orientation"]
|
|
@@ -653,13 +654,22 @@ class XGBoostPipeline:
|
|
| 653 |
self.close()
|
| 654 |
|
| 655 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 656 |
def _resolve_l2cs_weights():
|
| 657 |
for p in [
|
|
|
|
| 658 |
os.path.join(_PROJECT_ROOT, "models", "L2CS-Net", "models", "L2CSNet_gaze360.pkl"),
|
| 659 |
os.path.join(_PROJECT_ROOT, "models", "L2CSNet_gaze360.pkl"),
|
| 660 |
-
os.path.join(_PROJECT_ROOT, "checkpoints", "L2CSNet_gaze360.pkl"),
|
| 661 |
]:
|
| 662 |
-
if os.path.isfile(p):
|
| 663 |
return p
|
| 664 |
return None
|
| 665 |
|
|
@@ -671,17 +681,22 @@ def is_l2cs_weights_available():
|
|
| 671 |
class L2CSPipeline:
|
| 672 |
# Uses in-tree l2cs.Pipeline (RetinaFace + ResNet50) for gaze estimation
|
| 673 |
# and MediaPipe for head pose, EAR, MAR, and roll de-rotation.
|
|
|
|
|
|
|
| 674 |
|
| 675 |
YAW_THRESHOLD = 22.0
|
| 676 |
PITCH_THRESHOLD = 20.0
|
|
|
|
|
|
|
| 677 |
|
| 678 |
-
def __init__(self, weights_path=None, arch="ResNet50", device=
|
| 679 |
threshold=0.52, detector=None):
|
| 680 |
resolved = weights_path or _resolve_l2cs_weights()
|
| 681 |
if resolved is None or not os.path.isfile(resolved):
|
| 682 |
raise FileNotFoundError(
|
| 683 |
-
"L2CS weights
|
| 684 |
-
"
|
|
|
|
| 685 |
)
|
| 686 |
|
| 687 |
# add in-tree L2CS-Net to import path
|
|
@@ -691,7 +706,14 @@ class L2CSPipeline:
|
|
| 691 |
from l2cs import Pipeline as _L2CSPipeline
|
| 692 |
|
| 693 |
import torch
|
| 694 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 695 |
self._pipeline = _L2CSPipeline(
|
| 696 |
weights=pathlib.Path(resolved), arch=arch, device=torch.device(device),
|
| 697 |
)
|
|
@@ -704,10 +726,21 @@ class L2CSPipeline:
|
|
| 704 |
self._threshold = threshold
|
| 705 |
self._smoother = _OutputSmoother()
|
| 706 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 707 |
print(
|
| 708 |
f"[L2CS] Loaded {resolved} | arch={arch} device={device} "
|
| 709 |
f"yaw_thresh={self.YAW_THRESHOLD} pitch_thresh={self.PITCH_THRESHOLD} "
|
| 710 |
-
f"threshold={threshold}"
|
| 711 |
)
|
| 712 |
|
| 713 |
@staticmethod
|
|
@@ -728,8 +761,9 @@ class L2CSPipeline:
|
|
| 728 |
"yaw": None, "pitch": None, "roll": None, "mar": None, "is_yawning": False,
|
| 729 |
}
|
| 730 |
|
| 731 |
-
# MediaPipe: head pose, eye/mouth scores
|
| 732 |
roll_deg = 0.0
|
|
|
|
| 733 |
if landmarks is not None:
|
| 734 |
angles = self._head_pose.estimate(landmarks, w, h)
|
| 735 |
if angles is not None:
|
|
@@ -740,19 +774,54 @@ class L2CSPipeline:
|
|
| 740 |
out["mar"] = compute_mar(landmarks)
|
| 741 |
out["is_yawning"] = out["mar"] > MAR_YAWN_THRESHOLD
|
| 742 |
|
| 743 |
-
|
| 744 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 745 |
|
| 746 |
-
if
|
| 747 |
smoothed = self._smoother.update(0.0, landmarks is not None)
|
| 748 |
out["raw_score"] = smoothed
|
| 749 |
out["is_focused"] = smoothed >= self._threshold
|
| 750 |
return out
|
| 751 |
|
| 752 |
-
pitch_rad =
|
| 753 |
-
|
| 754 |
-
|
| 755 |
-
pitch_rad, yaw_rad = self._derotate_gaze(pitch_rad, yaw_rad, roll_deg)
|
| 756 |
out["gaze_pitch"] = pitch_rad
|
| 757 |
out["gaze_yaw"] = yaw_rad
|
| 758 |
|
|
@@ -773,12 +842,20 @@ class L2CSPipeline:
|
|
| 773 |
if out["is_yawning"]:
|
| 774 |
gaze_score = 0.0
|
| 775 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 776 |
out["raw_score"] = self._smoother.update(float(gaze_score), True)
|
| 777 |
out["is_focused"] = out["raw_score"] >= self._threshold
|
| 778 |
return out
|
| 779 |
|
| 780 |
def reset_session(self):
|
| 781 |
self._smoother.reset()
|
|
|
|
|
|
|
|
|
|
|
|
|
| 782 |
|
| 783 |
def close(self):
|
| 784 |
if self._owns_detector:
|
|
|
|
| 22 |
from models.head_pose import HeadPoseEstimator
|
| 23 |
from models.eye_scorer import EyeBehaviourScorer, compute_mar, MAR_YAWN_THRESHOLD
|
| 24 |
from models.collect_features import FEATURE_NAMES, TemporalTracker, extract_features
|
| 25 |
+
from models.eye_scorer import compute_avg_ear
|
| 26 |
|
| 27 |
# Same 10 features used for MLP training (prepare_dataset) and inference
|
| 28 |
MLP_FEATURE_NAMES = SELECTED_FEATURES["face_orientation"]
|
|
|
|
| 654 |
self.close()
|
| 655 |
|
| 656 |
|
| 657 |
+
def _is_git_lfs_pointer(path: str) -> bool:
|
| 658 |
+
# *.pkl in repo are often LFS stubs; torch.load sees "v" from "version ..." and dies
|
| 659 |
+
try:
|
| 660 |
+
with open(path, "rb") as f:
|
| 661 |
+
return f.read(64).startswith(b"version https://git-lfs.github.com/spec/v1")
|
| 662 |
+
except OSError:
|
| 663 |
+
return False
|
| 664 |
+
|
| 665 |
+
|
| 666 |
def _resolve_l2cs_weights():
|
| 667 |
for p in [
|
| 668 |
+
os.path.join(_PROJECT_ROOT, "checkpoints", "L2CSNet_gaze360.pkl"),
|
| 669 |
os.path.join(_PROJECT_ROOT, "models", "L2CS-Net", "models", "L2CSNet_gaze360.pkl"),
|
| 670 |
os.path.join(_PROJECT_ROOT, "models", "L2CSNet_gaze360.pkl"),
|
|
|
|
| 671 |
]:
|
| 672 |
+
if os.path.isfile(p) and not _is_git_lfs_pointer(p):
|
| 673 |
return p
|
| 674 |
return None
|
| 675 |
|
|
|
|
| 681 |
class L2CSPipeline:
|
| 682 |
# Uses in-tree l2cs.Pipeline (RetinaFace + ResNet50) for gaze estimation
|
| 683 |
# and MediaPipe for head pose, EAR, MAR, and roll de-rotation.
|
| 684 |
+
# L2CS inference is throttled to every Nth frame to reduce latency;
|
| 685 |
+
# intermediate frames reuse the last gaze result.
|
| 686 |
|
| 687 |
YAW_THRESHOLD = 22.0
|
| 688 |
PITCH_THRESHOLD = 20.0
|
| 689 |
+
_SKIP_CPU = 5 # run L2CS every 5th frame on CPU
|
| 690 |
+
_SKIP_GPU = 1 # run every frame on GPU (fast enough)
|
| 691 |
|
| 692 |
+
def __init__(self, weights_path=None, arch="ResNet50", device=None,
|
| 693 |
threshold=0.52, detector=None):
|
| 694 |
resolved = weights_path or _resolve_l2cs_weights()
|
| 695 |
if resolved is None or not os.path.isfile(resolved):
|
| 696 |
raise FileNotFoundError(
|
| 697 |
+
"L2CS weights missing or Git LFS not pulled. "
|
| 698 |
+
"Run: git lfs pull or python download_l2cs_weights.py "
|
| 699 |
+
"(real .pkl in checkpoints/ or models/L2CS-Net/models/)"
|
| 700 |
)
|
| 701 |
|
| 702 |
# add in-tree L2CS-Net to import path
|
|
|
|
| 706 |
from l2cs import Pipeline as _L2CSPipeline
|
| 707 |
|
| 708 |
import torch
|
| 709 |
+
|
| 710 |
+
# Auto-detect GPU if no device specified
|
| 711 |
+
if device is None:
|
| 712 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 713 |
+
self._device_str = device
|
| 714 |
+
self._on_gpu = device.startswith("cuda")
|
| 715 |
+
|
| 716 |
+
# torch.device passed explicitly for reliable CPU/CUDA selection
|
| 717 |
self._pipeline = _L2CSPipeline(
|
| 718 |
weights=pathlib.Path(resolved), arch=arch, device=torch.device(device),
|
| 719 |
)
|
|
|
|
| 726 |
self._threshold = threshold
|
| 727 |
self._smoother = _OutputSmoother()
|
| 728 |
|
| 729 |
+
# Frame skipping: GPU is fast enough to run every frame
|
| 730 |
+
self.L2CS_SKIP_FRAMES = self._SKIP_GPU if self._on_gpu else self._SKIP_CPU
|
| 731 |
+
self._frame_count = 0
|
| 732 |
+
self._last_l2cs_result = None # cached (derotated pitch_rad, yaw_rad)
|
| 733 |
+
self._calibrating = False # set True during calibration to disable frame skipping
|
| 734 |
+
|
| 735 |
+
# Blink tolerance: hold score steady during brief blinks
|
| 736 |
+
self._blink_streak = 0
|
| 737 |
+
self._BLINK_EAR = 0.18
|
| 738 |
+
self._BLINK_GRACE = 5 # ignore blinks shorter than this many frames (~300ms)
|
| 739 |
+
|
| 740 |
print(
|
| 741 |
f"[L2CS] Loaded {resolved} | arch={arch} device={device} "
|
| 742 |
f"yaw_thresh={self.YAW_THRESHOLD} pitch_thresh={self.PITCH_THRESHOLD} "
|
| 743 |
+
f"threshold={threshold} skip_frames={self.L2CS_SKIP_FRAMES}"
|
| 744 |
)
|
| 745 |
|
| 746 |
@staticmethod
|
|
|
|
| 761 |
"yaw": None, "pitch": None, "roll": None, "mar": None, "is_yawning": False,
|
| 762 |
}
|
| 763 |
|
| 764 |
+
# MediaPipe: head pose, eye/mouth scores (runs every frame — fast)
|
| 765 |
roll_deg = 0.0
|
| 766 |
+
blinking = False
|
| 767 |
if landmarks is not None:
|
| 768 |
angles = self._head_pose.estimate(landmarks, w, h)
|
| 769 |
if angles is not None:
|
|
|
|
| 774 |
out["mar"] = compute_mar(landmarks)
|
| 775 |
out["is_yawning"] = out["mar"] > MAR_YAWN_THRESHOLD
|
| 776 |
|
| 777 |
+
# Detect blink — EAR drops below threshold
|
| 778 |
+
ear = compute_avg_ear(landmarks)
|
| 779 |
+
if ear < self._BLINK_EAR:
|
| 780 |
+
self._blink_streak += 1
|
| 781 |
+
blinking = True
|
| 782 |
+
else:
|
| 783 |
+
self._blink_streak = 0
|
| 784 |
+
|
| 785 |
+
# During a brief blink, L2CS gaze angles are unreliable (eyes closed).
|
| 786 |
+
# Hold the previous score steady until blink ends or becomes sustained.
|
| 787 |
+
if blinking and self._blink_streak < self._BLINK_GRACE:
|
| 788 |
+
# Brief blink — freeze score, skip L2CS inference
|
| 789 |
+
out["raw_score"] = self._smoother._score
|
| 790 |
+
out["is_focused"] = out["raw_score"] >= self._threshold
|
| 791 |
+
# Keep previous gaze angles for visualization continuity
|
| 792 |
+
if self._last_l2cs_result is not None:
|
| 793 |
+
out["gaze_pitch"] = self._last_l2cs_result[0]
|
| 794 |
+
out["gaze_yaw"] = self._last_l2cs_result[1]
|
| 795 |
+
return out
|
| 796 |
+
|
| 797 |
+
# L2CS gaze — throttled: only run every Nth frame, reuse cached result otherwise.
|
| 798 |
+
# During calibration, run every frame for accurate sample collection.
|
| 799 |
+
self._frame_count += 1
|
| 800 |
+
if self._calibrating:
|
| 801 |
+
run_l2cs = True
|
| 802 |
+
else:
|
| 803 |
+
run_l2cs = (self._frame_count % self.L2CS_SKIP_FRAMES == 1) or self._last_l2cs_result is None
|
| 804 |
+
|
| 805 |
+
if run_l2cs:
|
| 806 |
+
results = self._pipeline.step(bgr_frame)
|
| 807 |
+
if results is not None and results.pitch.shape[0] > 0:
|
| 808 |
+
raw_pitch = float(results.pitch[0])
|
| 809 |
+
raw_yaw = float(results.yaw[0])
|
| 810 |
+
# Derotate immediately and cache the derotated result
|
| 811 |
+
# so cached frames don't get re-derotated with a different roll.
|
| 812 |
+
dr_pitch, dr_yaw = self._derotate_gaze(raw_pitch, raw_yaw, roll_deg)
|
| 813 |
+
self._last_l2cs_result = (dr_pitch, dr_yaw)
|
| 814 |
+
else:
|
| 815 |
+
self._last_l2cs_result = None
|
| 816 |
|
| 817 |
+
if self._last_l2cs_result is None:
|
| 818 |
smoothed = self._smoother.update(0.0, landmarks is not None)
|
| 819 |
out["raw_score"] = smoothed
|
| 820 |
out["is_focused"] = smoothed >= self._threshold
|
| 821 |
return out
|
| 822 |
|
| 823 |
+
pitch_rad, yaw_rad = self._last_l2cs_result
|
| 824 |
+
# Already derotated above — use directly
|
|
|
|
|
|
|
| 825 |
out["gaze_pitch"] = pitch_rad
|
| 826 |
out["gaze_yaw"] = yaw_rad
|
| 827 |
|
|
|
|
| 842 |
if out["is_yawning"]:
|
| 843 |
gaze_score = 0.0
|
| 844 |
|
| 845 |
+
# Sustained closed eyes — let score drop
|
| 846 |
+
if self._blink_streak >= self._BLINK_GRACE:
|
| 847 |
+
gaze_score = 0.0
|
| 848 |
+
|
| 849 |
out["raw_score"] = self._smoother.update(float(gaze_score), True)
|
| 850 |
out["is_focused"] = out["raw_score"] >= self._threshold
|
| 851 |
return out
|
| 852 |
|
| 853 |
def reset_session(self):
|
| 854 |
self._smoother.reset()
|
| 855 |
+
self._frame_count = 0
|
| 856 |
+
self._last_l2cs_result = None
|
| 857 |
+
self._calibrating = False
|
| 858 |
+
self._blink_streak = 0
|
| 859 |
|
| 860 |
def close(self):
|
| 861 |
if self._owns_detector:
|