final_v2 / src /components /FocusPageLocal.jsx
Yingtao-Zheng's picture
Fix: still fixing same bug for eye gaze
5440fdf
import React, { useState, useEffect, useRef } from 'react';
import CalibrationOverlay from './CalibrationOverlay';
const FLOW_STEPS = {
intro: 'intro',
permission: 'permission',
ready: 'ready'
};
const FOCUS_STATES = {
pending: 'pending',
focused: 'focused',
notFocused: 'not-focused'
};
function HelloIcon() {
return (
<svg width="96" height="96" viewBox="0 0 96 96" aria-hidden="true">
<circle cx="48" cy="48" r="40" fill="#007BFF" />
<path d="M30 38c0-4 2.7-7 6-7s6 3 6 7" fill="none" stroke="#fff" strokeWidth="6" strokeLinecap="round" />
<path d="M54 38c0-4 2.7-7 6-7s6 3 6 7" fill="none" stroke="#fff" strokeWidth="6" strokeLinecap="round" />
<path d="M30 52c3 11 10 17 18 17s15-6 18-17" fill="none" stroke="#fff" strokeWidth="6" strokeLinecap="round" />
</svg>
);
}
function CameraIcon() {
return (
<svg width="110" height="110" viewBox="0 0 110 110" aria-hidden="true">
<rect x="30" y="36" width="50" height="34" rx="5" fill="none" stroke="#007BFF" strokeWidth="6" />
<path d="M24 72h62c0 9-7 16-16 16H40c-9 0-16-7-16-16Z" fill="none" stroke="#007BFF" strokeWidth="6" />
<path d="M55 28v8" stroke="#007BFF" strokeWidth="6" strokeLinecap="round" />
<circle cx="55" cy="36" r="14" fill="none" stroke="#007BFF" strokeWidth="6" />
<circle cx="55" cy="36" r="4" fill="#007BFF" />
<path d="M46 83h18" stroke="#007BFF" strokeWidth="6" strokeLinecap="round" />
</svg>
);
}
const MODEL_ORDER = ['hybrid', 'xgboost', 'mlp', 'geometric'];
const MODEL_INFO = {
hybrid: {
label: 'Hybrid',
tagline: 'Best overall — combines ML with geometric scoring',
how: 'Fuses XGBoost predictions (30%) with geometric face/eye scores (70%). Uses a weighted blend tuned with LOPO evaluation.',
accuracy: 'N/A',
f1: '0.8409',
auc: 'N/A',
threshold: '0.46',
evaluation: 'LOPO tuning (9 participants, 144K frames)',
features: '10 features: head deviation, face score, eye scores (EAR), gaze offset, pitch, horizontal gaze, PERCLOS',
strengths: 'Most robust across different people. Latest LOPO mean F1 is 0.8409 at w_mlp=0.3.',
badge: 'Recommended',
},
xgboost: {
label: 'XGBoost',
tagline: 'Highest raw accuracy — gradient-boosted decision trees',
how: 'Ensemble of 600 decision trees (max depth 8). Each tree learns to correct errors from previous trees. Outputs probability of focused state.',
accuracy: '95.87%',
f1: '0.9585',
auc: '0.9908',
threshold: '0.38',
evaluation: 'Random split test (15%) + LOPO thresholds',
features: '10 features: head deviation, face score, eye scores (EAR), gaze offset, pitch, horizontal gaze, PERCLOS',
strengths: 'Strong pattern recognition and fast inference. LOPO: AUC 0.8695, optimal threshold 0.280, F1 0.8549.',
badge: null,
},
mlp: {
label: 'MLP',
tagline: 'Lightweight neural network — fast and efficient',
how: 'Two-layer neural network (64→32 neurons). Takes 10 face features, applies learned weights, outputs focused/unfocused probability via softmax.',
accuracy: '92.92%',
f1: '0.9287',
auc: '0.9714',
threshold: '0.23',
evaluation: 'Random split test (15%) + LOPO thresholds',
features: '10 features: head deviation, face score, eye scores (EAR), gaze offset, pitch, horizontal gaze, PERCLOS',
strengths: 'Fastest inference and smallest model size. LOPO: AUC 0.8624, optimal threshold 0.228, F1 0.8578.',
badge: null,
},
geometric: {
label: 'Geometric',
tagline: 'Baseline only — hardcoded thresholds, no learning',
how: 'Uses fixed thresholds on head orientation (70%) and eye openness (30%). No training — just hand-tuned rules on 478 face landmarks. Cannot adapt to new faces or environments.',
accuracy: 'N/A',
f1: '0.8195',
auc: 'N/A',
threshold: '0.55',
evaluation: 'LOPO geometric sweep',
features: 'Head yaw/pitch/roll angles, eye aspect ratio (EAR), iris gaze offset, mouth aspect ratio (MAR)',
strengths: 'No model files needed. Useful fallback when model checkpoints are unavailable.',
badge: 'Baseline',
},
};
function FocusPageLocal({ videoManager, sessionResult, setSessionResult, isActive, isTutorialActive, setIsTutorialActive, setHasSeenTutorial }) {
const [currentFrame, setCurrentFrame] = useState(15);
const [timelineEvents, setTimelineEvents] = useState([]);
const [stats, setStats] = useState(null);
const [systemStats, setSystemStats] = useState(null);
const [availableModels, setAvailableModels] = useState([]);
const [currentModel, setCurrentModel] = useState('mlp');
const [flowStep, setFlowStep] = useState(FLOW_STEPS.ready);
const [cameraReady, setCameraReady] = useState(false);
const [isStarting, setIsStarting] = useState(false);
const [focusState, setFocusState] = useState(FOCUS_STATES.pending);
const [cameraError, setCameraError] = useState('');
const [calibration, setCalibration] = useState(null);
const [l2csBoost, setL2csBoost] = useState(false);
const [l2csBoostAvailable, setL2csBoostAvailable] = useState(false);
const [showEyeGazeModal, setShowEyeGazeModal] = useState(false);
const [eyeGazeDontShow, setEyeGazeDontShow] = useState(false);
const localVideoRef = useRef(null);
const displayCanvasRef = useRef(null);
const pipVideoRef = useRef(null);
const pipStreamRef = useRef(null);
const previewFrameRef = useRef(null);
useEffect(() => {
if (isTutorialActive) {
setFlowStep(FLOW_STEPS.intro);
} else {
setFlowStep(FLOW_STEPS.ready);
}
}, [isTutorialActive]);
const formatDuration = (seconds) => {
if (seconds === 0) return '0s';
const mins = Math.floor(seconds / 60);
const secs = Math.floor(seconds % 60);
return `${mins}m ${secs}s`;
};
const stopPreviewLoop = () => {
if (previewFrameRef.current) {
cancelAnimationFrame(previewFrameRef.current);
previewFrameRef.current = null;
}
};
const startPreviewLoop = () => {
stopPreviewLoop();
const renderPreview = () => {
const canvas = displayCanvasRef.current;
const video = localVideoRef.current;
if (!canvas || !video || !cameraReady || videoManager?.isStreaming) {
previewFrameRef.current = null;
return;
}
if (video.readyState >= 2) {
const ctx = canvas.getContext('2d');
ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
}
previewFrameRef.current = requestAnimationFrame(renderPreview);
};
previewFrameRef.current = requestAnimationFrame(renderPreview);
};
const getErrorMessage = (err) => {
if (err?.name === 'NotAllowedError') {
return 'Camera permission denied. Please allow camera access.';
}
if (err?.name === 'NotFoundError') {
return 'No camera found. Please connect a camera.';
}
if (err?.name === 'NotReadableError') {
return 'Camera is already in use by another application.';
}
if (err?.target?.url) {
return `WebSocket connection failed: ${err.target.url}. Check that the backend server is running.`;
}
return err?.message || 'Failed to start focus session.';
};
useEffect(() => {
if (!videoManager) return;
const originalOnStatusUpdate = videoManager.callbacks.onStatusUpdate;
const originalOnSessionEnd = videoManager.callbacks.onSessionEnd;
videoManager.callbacks.onStatusUpdate = (isFocused) => {
setTimelineEvents((prev) => {
const newEvents = [...prev, { isFocused, timestamp: Date.now() }];
if (newEvents.length > 60) newEvents.shift();
return newEvents;
});
setFocusState(isFocused ? FOCUS_STATES.focused : FOCUS_STATES.notFocused);
if (originalOnStatusUpdate) originalOnStatusUpdate(isFocused);
};
videoManager.callbacks.onSessionEnd = (summary) => {
setFocusState(FOCUS_STATES.pending);
setCameraReady(false);
if (originalOnSessionEnd) originalOnSessionEnd(summary);
};
videoManager.callbacks.onCalibrationUpdate = (cal) => {
setCalibration(cal && cal.active ? { ...cal } : null);
};
const statsInterval = setInterval(() => {
if (videoManager && videoManager.getStats) {
setStats(videoManager.getStats());
}
}, 1000);
return () => {
if (videoManager) {
videoManager.callbacks.onStatusUpdate = originalOnStatusUpdate;
videoManager.callbacks.onSessionEnd = originalOnSessionEnd;
videoManager.callbacks.onCalibrationUpdate = null;
}
clearInterval(statsInterval);
};
}, [videoManager]);
useEffect(() => {
fetch('/api/models')
.then((res) => res.json())
.then((data) => {
if (data.available) setAvailableModels(data.available);
if (data.current) {
if (data.current === 'l2cs') {
const fallback = data.available.find((m) => m !== 'l2cs') || 'mlp';
setCurrentModel(fallback);
handleModelChange(fallback);
} else {
setCurrentModel(data.current);
}
}
if (data.l2cs_boost !== undefined) setL2csBoost(data.l2cs_boost);
if (data.l2cs_boost_available !== undefined) setL2csBoostAvailable(data.l2cs_boost_available);
})
.catch((err) => console.error('Failed to fetch models:', err));
}, []);
useEffect(() => {
if (flowStep === FLOW_STEPS.ready && cameraReady && !videoManager?.isStreaming) {
startPreviewLoop();
return;
}
stopPreviewLoop();
}, [cameraReady, flowStep, videoManager?.isStreaming]);
useEffect(() => {
if (!isActive) {
stopPreviewLoop();
}
}, [isActive]);
useEffect(() => {
return () => {
stopPreviewLoop();
if (pipVideoRef.current) {
pipVideoRef.current.pause();
pipVideoRef.current.srcObject = null;
}
if (pipStreamRef.current) {
pipStreamRef.current.getTracks().forEach((t) => t.stop());
pipStreamRef.current = null;
}
};
}, []);
useEffect(() => {
const fetchSystem = () => {
fetch('/api/stats/system')
.then(res => res.json())
.then(data => setSystemStats(data))
.catch(() => setSystemStats(null));
};
fetchSystem();
const interval = setInterval(fetchSystem, 3000);
return () => clearInterval(interval);
}, []);
const handleModelChange = async (modelName) => {
try {
const res = await fetch('/api/settings', {
method: 'PUT',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ model_name: modelName })
});
const result = await res.json();
if (result.updated) {
setCurrentModel(modelName);
}
} catch (err) {
console.error('Failed to switch model:', err);
}
};
const closeTutorial = () => {
setFlowStep(FLOW_STEPS.ready);
setIsTutorialActive(false);
setHasSeenTutorial(true);
};
const handleEnableCamera = async () => {
if (!videoManager) return;
try {
setCameraError('');
await videoManager.initCamera(localVideoRef.current, displayCanvasRef.current);
setCameraReady(true);
closeTutorial();
setFocusState(FOCUS_STATES.pending);
} catch (err) {
const errorMessage = getErrorMessage(err);
setCameraError(errorMessage);
console.error('Camera init error:', err);
}
};
const applyEyeGazeChange = async (enable, withCalibration = true) => {
try {
const res = await fetch('/api/settings', {
method: 'PUT',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ l2cs_boost: enable })
});
if (!res.ok) return;
setL2csBoost(enable);
if (enable && withCalibration && videoManager && videoManager.isStreaming) {
videoManager.startCalibration();
} else if (!enable && videoManager) {
videoManager.cancelCalibration();
}
} catch (err) {
console.error('Failed to toggle eye gaze:', err);
}
};
const handleEyeGazeToggle = async () => {
const next = !l2csBoost;
if (next && !eyeGazeDontShow) {
setShowEyeGazeModal(true);
return;
}
await applyEyeGazeChange(next, false);
};
const handleEyeGazeModalAction = async (withCalibration) => {
setShowEyeGazeModal(false);
await applyEyeGazeChange(true, withCalibration);
};
const handleStart = async () => {
try {
setIsStarting(true);
setSessionResult(null);
setTimelineEvents([]);
setFocusState(FOCUS_STATES.pending);
setCameraError('');
if (!cameraReady) {
await videoManager.initCamera(localVideoRef.current, displayCanvasRef.current);
setCameraReady(true);
setFlowStep(FLOW_STEPS.ready);
}
await videoManager.startStreaming();
} catch (err) {
const errorMessage = getErrorMessage(err);
setCameraError(errorMessage);
setFocusState(FOCUS_STATES.pending);
console.error('Start error:', err);
alert(`Failed to start: ${errorMessage}\n\nCheck browser console for details.`);
} finally {
setIsStarting(false);
}
};
const handleStop = async () => {
if (videoManager) {
await videoManager.stopStreaming();
}
try {
if (document.pictureInPictureElement === pipVideoRef.current) {
await document.exitPictureInPicture();
}
} catch (_) {}
if (pipVideoRef.current) {
pipVideoRef.current.pause();
pipVideoRef.current.srcObject = null;
}
if (pipStreamRef.current) {
pipStreamRef.current.getTracks().forEach((t) => t.stop());
pipStreamRef.current = null;
}
stopPreviewLoop();
setFocusState(FOCUS_STATES.pending);
setCameraReady(false);
};
const handlePiP = async () => {
try {
if (!videoManager || !videoManager.isStreaming) {
alert('Please start the video first.');
return;
}
if (!displayCanvasRef.current) {
alert('Video not ready.');
return;
}
if (document.pictureInPictureElement === pipVideoRef.current) {
await document.exitPictureInPicture();
console.log('PiP exited');
return;
}
if (!document.pictureInPictureEnabled) {
alert('Picture-in-Picture is not supported in this browser.');
return;
}
const pipVideo = pipVideoRef.current;
if (!pipVideo) {
alert('PiP video element not ready.');
return;
}
const isSafariPiP = typeof pipVideo.webkitSetPresentationMode === 'function';
let stream = pipStreamRef.current;
if (!stream) {
const capture = displayCanvasRef.current.captureStream;
if (typeof capture === 'function') {
stream = capture.call(displayCanvasRef.current, 30);
}
if (!stream || stream.getTracks().length === 0) {
const cameraStream = localVideoRef.current?.srcObject;
if (!cameraStream) {
alert('Camera stream not ready.');
return;
}
stream = cameraStream;
}
pipStreamRef.current = stream;
}
if (!stream || stream.getTracks().length === 0) {
alert('Failed to capture video stream from canvas.');
return;
}
pipVideo.srcObject = stream;
if (pipVideo.readyState < 2) {
await new Promise((resolve) => {
const onReady = () => {
pipVideo.removeEventListener('loadeddata', onReady);
pipVideo.removeEventListener('canplay', onReady);
resolve();
};
pipVideo.addEventListener('loadeddata', onReady);
pipVideo.addEventListener('canplay', onReady);
setTimeout(resolve, 600);
});
}
try {
await pipVideo.play();
} catch (_) {}
if (isSafariPiP) {
try {
pipVideo.webkitSetPresentationMode('picture-in-picture');
console.log('PiP activated (Safari)');
return;
} catch (e) {
const cameraStream = localVideoRef.current?.srcObject;
if (cameraStream && cameraStream !== pipVideo.srcObject) {
pipVideo.srcObject = cameraStream;
try {
await pipVideo.play();
} catch (_) {}
pipVideo.webkitSetPresentationMode('picture-in-picture');
console.log('PiP activated (Safari fallback)');
return;
}
throw e;
}
}
if (typeof pipVideo.requestPictureInPicture === 'function') {
await pipVideo.requestPictureInPicture();
console.log('PiP activated');
} else {
alert('Picture-in-Picture is not supported in this browser.');
}
} catch (err) {
console.error('PiP error:', err);
alert(`Failed to enter Picture-in-Picture: ${err.message}`);
}
};
const handleFloatingWindow = () => {
handlePiP();
};
const handleFrameChange = (val) => {
const rate = parseInt(val, 10);
setCurrentFrame(rate);
if (videoManager) {
videoManager.setFrameRate(rate);
}
};
const handlePreview = () => {
if (!videoManager || !videoManager.isStreaming) {
alert('Please start a session first.');
return;
}
const currentStats = videoManager.getStats();
if (!currentStats.sessionId) {
alert('No active session.');
return;
}
const sessionDuration = Math.floor((Date.now() - (videoManager.sessionStartTime || Date.now())) / 1000);
const totalFrames = currentStats.framesProcessed || 0;
const focusedFrames = currentStats.focusedFrames ?? 0;
const focusScore = totalFrames > 0 ? focusedFrames / totalFrames : 0;
setSessionResult({
duration_seconds: sessionDuration,
focus_score: focusScore,
total_frames: totalFrames,
focused_frames: focusedFrames
});
};
const handleCloseOverlay = () => {
setSessionResult(null);
};
const pageStyle = isActive
? undefined
: {
position: 'absolute',
width: '1px',
height: '1px',
overflow: 'hidden',
opacity: 0,
pointerEvents: 'none'
};
const focusStateLabel = {
[FOCUS_STATES.pending]: 'Pending',
[FOCUS_STATES.focused]: 'Focused',
[FOCUS_STATES.notFocused]: 'Not Focused'
}[focusState];
const introHighlights = [
{
title: 'Live focus tracking',
text: 'Head pose, gaze, and eye openness are read continuously during the session.'
},
{
title: 'Quick setup',
text: 'Front-facing light and a stable camera angle give the cleanest preview.'
},
{
title: 'Private by default',
text: 'Only session metadata is stored, not the raw camera footage.'
},
{
title: 'Sync across devices',
text: 'Your history auto-saves to this browser. To switch devices, use the Data Management tools at the bottom of the My Records tab to export or import your data.'
}
];
const permissionSteps = [
{
title: 'Allow browser access',
text: 'Approve the camera prompt so the preview can appear immediately.'
},
{
title: 'Check your framing',
text: 'Keep your face visible and centered for more stable landmark detection.'
},
{
title: 'Start when ready',
text: 'After the preview appears, use the page controls to begin or stop.'
}
];
const renderIntroCard = () => {
if (flowStep === FLOW_STEPS.intro) {
return (
<div className="focus-flow-overlay">
<div className="focus-flow-card">
<div className="focus-flow-header">
<div>
<div className="focus-flow-eyebrow">Focus Session</div>
<h2>Before you begin</h2>
</div>
<div className="focus-flow-icon">
<HelloIcon />
</div>
</div>
<p className="focus-flow-lead">
The focus page uses your live camera preview to estimate attention in real time.
Review the setup notes below, then continue to camera access.
</p>
<div className="focus-flow-grid">
{introHighlights.map((item) => (
<article key={item.title} className="focus-flow-panel">
<h3>{item.title}</h3>
<p>{item.text}</p>
</article>
))}
</div>
<div className="focus-flow-glasses-note">
<strong>Wearing glasses?</strong> Glasses may reduce detection accuracy on some models. If results seem inaccurate, try switching to a different model (e.g. Geometric or MLP).
</div>
<div className="focus-flow-footer">
<div className="focus-flow-note">
You can still change frame rate and available model options after the preview loads.
</div>
<div style={{ display: 'flex', gap: '10px' }}>
<button className="focus-flow-secondary" onClick={closeTutorial}>
Skip
</button>
<button className="focus-flow-button" onClick={() => setFlowStep(FLOW_STEPS.permission)}>
Continue
</button>
</div>
</div>
</div>
</div>
);
}
if (flowStep === FLOW_STEPS.permission && !cameraReady) {
return (
<div className="focus-flow-overlay">
<div className="focus-flow-card">
<div className="focus-flow-header">
<div>
<div className="focus-flow-eyebrow">Camera Setup</div>
<h2>Enable camera access</h2>
</div>
<div className="focus-flow-icon">
<CameraIcon />
</div>
</div>
<p className="focus-flow-lead">
Once access is granted, your preview appears here and the rest of the Focus page
behaves like the other dashboard screens.
</p>
<div className="focus-flow-steps">
{permissionSteps.map((item, index) => (
<div key={item.title} className="focus-flow-step">
<div className="focus-flow-step-number">{index + 1}</div>
<div className="focus-flow-step-copy">
<h3>{item.title}</h3>
<p>{item.text}</p>
</div>
</div>
))}
</div>
{cameraError ? <div className="focus-inline-error">{cameraError}</div> : null}
<div className="focus-flow-footer">
<button
type="button"
className="focus-flow-secondary"
onClick={() => setFlowStep(FLOW_STEPS.intro)}
>
Back
</button>
<button className="focus-flow-button" onClick={handleEnableCamera}>
Enable Camera
</button>
</div>
</div>
</div>
);
}
return null;
};
const renderEyeGazeModal = () => {
if (!showEyeGazeModal) return null;
return (
<div className="focus-flow-overlay" style={{ zIndex: 2000 }}>
<div className="focus-flow-card">
<div className="focus-flow-header">
<div>
<div className="focus-flow-eyebrow">Eye Gaze Tracking</div>
<h2>Before you enable</h2>
</div>
<div className="focus-flow-icon">
<svg width="96" height="96" viewBox="0 0 96 96" aria-hidden="true">
<ellipse cx="48" cy="48" rx="38" ry="24" fill="none" stroke="#007BFF" strokeWidth="5" />
<circle cx="48" cy="48" r="13" fill="none" stroke="#007BFF" strokeWidth="5" />
<circle cx="48" cy="48" r="5" fill="#007BFF" />
</svg>
</div>
</div>
<p className="focus-flow-lead">
Eye gaze tracking runs an additional deep neural network (L2CS-Net) alongside your current model.
Please read the notes below before proceeding.
</p>
<div className="focus-flow-grid">
<article className="focus-flow-panel focus-flow-panel-warn">
<h3>Performance impact</h3>
<p>Enabling eye gaze tracking increases CPU usage and may reduce frame rate. If the system feels sluggish, consider disabling it.</p>
</article>
<article className="focus-flow-panel">
<h3>Calibration (recommended)</h3>
<p>For best accuracy, calibrate by looking at 9 screen positions one at a time, followed by 1 validation point. The whole process takes about 30 seconds.</p>
</article>
</div>
<div className="focus-flow-steps">
<div className="focus-flow-step">
<div className="focus-flow-step-number">1</div>
<div className="focus-flow-step-copy">
<h3>Click "Start Calibration"</h3>
<p>A dot will appear on screen. Look directly at it and keep your gaze steady. It will cycle through 9 positions then show a final validation dot.</p>
</div>
</div>
<div className="focus-flow-step">
<div className="focus-flow-step-number">2</div>
<div className="focus-flow-step-copy">
<h3>Or skip for now</h3>
<p>Click "Skip" to enable eye gaze tracking without calibrating. You can recalibrate at any time using the "Recalibrate" button during a session.</p>
</div>
</div>
</div>
<label className="eye-gaze-modal-checkbox">
<input
type="checkbox"
checked={eyeGazeDontShow}
onChange={(e) => setEyeGazeDontShow(e.target.checked)}
/>
Don't show this again
</label>
<div className="focus-flow-footer">
<button
type="button"
className="focus-flow-secondary"
onClick={() => handleEyeGazeModalAction(false)}
>
Skip
</button>
<button
className="focus-flow-button"
onClick={() => handleEyeGazeModalAction(true)}
>
Start Calibration
</button>
</div>
</div>
</div>
);
};
return (
<main id="page-b" className="page" style={pageStyle}>
{renderIntroCard()}
{renderEyeGazeModal()}
<section id="display-area" className="focus-display-shell">
<video
ref={pipVideoRef}
muted
playsInline
autoPlay
style={{
position: 'absolute',
width: '1px',
height: '1px',
opacity: 0,
pointerEvents: 'none'
}}
/>
<video
ref={localVideoRef}
muted
playsInline
autoPlay
style={{ display: 'none' }}
/>
<canvas
ref={displayCanvasRef}
width={640}
height={480}
style={{
width: '100%',
height: '100%',
objectFit: 'contain',
backgroundColor: '#101010'
}}
/>
{flowStep === FLOW_STEPS.ready ? (
<>
<div className={`focus-state-pill ${focusState}`}>
<span className="focus-state-dot" />
{focusStateLabel}
</div>
{!cameraReady && !videoManager?.isStreaming ? (
<div className="focus-idle-overlay">
<p>Camera is paused.</p>
<span>Use Start to enable the camera and begin detection.</span>
</div>
) : null}
</>
) : null}
{sessionResult && (
<div className="session-result-overlay">
<h3>Session Complete!</h3>
<div className="result-item">
<span className="label">Duration:</span>
<span className="value">{formatDuration(sessionResult.duration_seconds)}</span>
</div>
<div className="result-item">
<span className="label">Focus Score:</span>
<span className="value">{(sessionResult.focus_score * 100).toFixed(1)}%</span>
</div>
<button
onClick={handleCloseOverlay}
style={{
marginTop: '20px',
padding: '8px 20px',
background: 'transparent',
border: '1px solid white',
color: 'white',
borderRadius: '20px',
cursor: 'pointer'
}}
>
Close
</button>
</div>
)}
</section>
{flowStep === FLOW_STEPS.ready ? (
<>
{availableModels.length > 0 ? (
<section className="focus-model-strip">
<span className="focus-model-label">Model:</span>
{MODEL_ORDER.filter((n) => availableModels.includes(n)).map((name) => (
<button
key={name}
onClick={() => handleModelChange(name)}
className={`focus-model-button ${currentModel === name ? 'active' : ''}`}
>
{MODEL_INFO[name]?.label || name}
</button>
))}
{l2csBoostAvailable && (
<>
<span className="focus-model-sep" />
<button
onClick={handleEyeGazeToggle}
className={`eye-gaze-toggle ${l2csBoost ? 'on' : 'off'}`}
title={l2csBoost ? 'Eye gaze tracking activeclick to disable' : 'Enable eye gaze tracking (requires calibration)'}
>
<svg width="16" height="16" viewBox="0 0 16 16" className="eye-gaze-icon" aria-hidden="true">
<ellipse cx="8" cy="8" rx="7" ry="4.5" fill="none" stroke="currentColor" strokeWidth="1.4" />
<circle cx="8" cy="8" r="2.2" fill="currentColor" />
</svg>
{l2csBoost ? 'Eye Gaze On' : 'Eye Gaze'}
</button>
{l2csBoost && stats && stats.isStreaming && (
<button
onClick={() => videoManager && videoManager.startCalibration()}
className="focus-model-button recalibrate"
title="Re-run gaze calibration"
>
Recalibrate
</button>
)}
</>
)}
</section>
) : null}
{systemStats && systemStats.cpu_percent != null && (
<section className="focus-system-stats">
<span>CPU: <strong>{systemStats.cpu_percent}%</strong></span>
<span className="focus-system-stats-sep" />
<span>RAM: <strong>{systemStats.memory_percent}%</strong> ({systemStats.memory_used_mb}/{systemStats.memory_total_mb} MB)</span>
</section>
)}
<section id="timeline-area">
<div className="timeline-label">Timeline</div>
<div id="timeline-visuals">
{timelineEvents.map((event, index) => (
<div
key={index}
className="timeline-block"
style={{
backgroundColor: event.isFocused ? '#43ff6f' : '#ff2c41',
width: '10px',
height: '20px',
borderRadius: '2px',
flexShrink: 0
}}
title={event.isFocused ? 'Focused' : 'Distracted'}
/>
))}
</div>
<div id="timeline-line" />
</section>
<section id="control-panel">
<button id="btn-cam-start" className="action-btn green" onClick={handleStart} disabled={isStarting}>
{isStarting ? 'Starting...' : 'Start'}
</button>
<button id="btn-floating" className="action-btn blue" onClick={handlePiP}>
Floating Window
</button>
<button id="btn-preview" className="action-btn orange" onClick={handlePreview}>
Preview Result
</button>
<button id="btn-cam-stop" className="action-btn red" onClick={handleStop}>
Stop
</button>
</section>
{cameraError ? (
<div className="focus-inline-error focus-inline-error-standalone">{cameraError}</div>
) : null}
{MODEL_INFO[currentModel] && (
<section className="model-card">
<div className="model-card-header">
<h3 className="model-card-title">{MODEL_INFO[currentModel].label}</h3>
{MODEL_INFO[currentModel].badge && (
<span className={MODEL_INFO[currentModel].badge === 'Baseline' ? 'model-card-badge-baseline' : 'model-card-badge'}>
{MODEL_INFO[currentModel].badge}
</span>
)}
</div>
<p className="model-card-tagline">{MODEL_INFO[currentModel].tagline}</p>
<div className="model-card-metrics">
<div className="model-card-metric">
<span className="model-card-metric-value">{MODEL_INFO[currentModel].accuracy}</span>
<span className="model-card-metric-label">Accuracy</span>
</div>
<div className="model-card-metric">
<span className="model-card-metric-value">{MODEL_INFO[currentModel].f1}</span>
<span className="model-card-metric-label">F1 Score</span>
</div>
<div className="model-card-metric">
<span className="model-card-metric-value">{MODEL_INFO[currentModel].auc}</span>
<span className="model-card-metric-label">ROC-AUC</span>
</div>
<div className="model-card-metric">
<span className="model-card-metric-value">{MODEL_INFO[currentModel].threshold}</span>
<span className="model-card-metric-label">Threshold</span>
</div>
</div>
<div className="model-card-details">
<div className="model-card-section">
<h4>How it works</h4>
<p>{MODEL_INFO[currentModel].how}</p>
</div>
<div className="model-card-section">
<h4>Features used</h4>
<p>{MODEL_INFO[currentModel].features}</p>
</div>
<div className="model-card-section">
<h4>Strengths</h4>
<p>{MODEL_INFO[currentModel].strengths}</p>
</div>
</div>
<div className="model-card-eval">
Evaluated with {MODEL_INFO[currentModel].evaluation}
</div>
</section>
)}
<section id="frame-control">
<label htmlFor="frame-slider">Frame Rate (FPS)</label>
<input
type="range"
id="frame-slider"
min="10"
max="30"
value={currentFrame}
onChange={(e) => handleFrameChange(e.target.value)}
/>
<input
type="number"
id="frame-input"
min="10"
max="30"
value={currentFrame}
onChange={(e) => handleFrameChange(e.target.value)}
/>
</section>
</>
) : null}
<CalibrationOverlay calibration={calibration} videoManager={videoManager} />
</main>
);
}
export default FocusPageLocal;