shiven99 commited on
Commit
8ee5513
·
1 Parent(s): 900d8bc

Deploy SentinelEdge demo to HF Spaces

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitignore +6 -0
  2. Dockerfile +43 -0
  3. README.md +7 -4
  4. demo/backend/audio_streamer.py +185 -0
  5. demo/backend/live_mic.py +212 -0
  6. demo/backend/main.py +1389 -0
  7. demo/backend/sample_calls/amazon_refund_scam.txt +12 -0
  8. demo/backend/sample_calls/bank_fraud_scam.txt +11 -0
  9. demo/backend/sample_calls/crypto_investment_scam.txt +12 -0
  10. demo/backend/sample_calls/grandparent_scam.txt +12 -0
  11. demo/backend/sample_calls/irs_scam.txt +11 -0
  12. demo/backend/sample_calls/legitimate_call.txt +7 -0
  13. demo/backend/sample_calls/prize_notification_scam.txt +12 -0
  14. demo/backend/sample_calls/tech_support_scam.txt +11 -0
  15. demo/backend/sample_calls/utility_shutoff_scam.txt +12 -0
  16. demo/frontend/index.html +16 -0
  17. demo/frontend/package-lock.json +0 -0
  18. demo/frontend/package.json +27 -0
  19. demo/frontend/postcss.config.js +6 -0
  20. demo/frontend/public/favicon-16x16.svg +19 -0
  21. demo/frontend/public/favicon-32x32.svg +30 -0
  22. demo/frontend/public/sentineledge-logo-dark.svg +81 -0
  23. demo/frontend/src/App.tsx +619 -0
  24. demo/frontend/src/components/CallHistory.tsx +240 -0
  25. demo/frontend/src/components/CallScreen.tsx +238 -0
  26. demo/frontend/src/components/DemoControls.tsx +143 -0
  27. demo/frontend/src/components/FeatureBreakdown.tsx +165 -0
  28. demo/frontend/src/components/FederatedDashboard.tsx +587 -0
  29. demo/frontend/src/components/FraudAlert.tsx +144 -0
  30. demo/frontend/src/components/PhoneSimulator.tsx +41 -0
  31. demo/frontend/src/components/PrivacyDemo.tsx +446 -0
  32. demo/frontend/src/components/ScoreGauge.tsx +260 -0
  33. demo/frontend/src/components/TranscriptPanel.tsx +212 -0
  34. demo/frontend/src/hooks/useWebSocket.ts +112 -0
  35. demo/frontend/src/main.tsx +11 -0
  36. demo/frontend/src/styles/index.css +120 -0
  37. demo/frontend/src/styles/phone.css +535 -0
  38. demo/frontend/tailwind.config.js +70 -0
  39. demo/frontend/tsconfig.json +25 -0
  40. demo/frontend/tsconfig.node.json +10 -0
  41. demo/frontend/vite.config.ts +16 -0
  42. deploy_server.py +43 -0
  43. federated/__init__.py +1 -0
  44. federated/dp_injector.py +83 -0
  45. federated/local_trainer.py +211 -0
  46. federated/simulate.py +507 -0
  47. federated/simulation_results.json +206 -0
  48. federated/visualization.py +219 -0
  49. hub/__init__.py +21 -0
  50. hub/aggregator.py +192 -0
.gitignore ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ __pycache__/
2
+ *.pyc
3
+ node_modules/
4
+ .env
5
+ *.egg-info/
6
+ dist/
Dockerfile ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SentinelEdge Demo - Hugging Face Spaces Deployment
2
+ # Serves React frontend + FastAPI backend from a single container
3
+
4
+ FROM node:20-slim AS frontend-build
5
+
6
+ WORKDIR /build
7
+ COPY demo/frontend/package.json demo/frontend/package-lock.json* ./
8
+ RUN npm install --production=false
9
+ COPY demo/frontend/ .
10
+ RUN npm run build
11
+
12
+ # --- Python runtime ---
13
+ FROM python:3.11-slim
14
+
15
+ # System deps for PyNaCl (libsodium)
16
+ RUN apt-get update && \
17
+ apt-get install -y --no-install-recommends libsodium-dev && \
18
+ rm -rf /var/lib/apt/lists/*
19
+
20
+ WORKDIR /app
21
+
22
+ # Install only the deps we actually need for the demo (no whisper/pyaudio/onnx)
23
+ COPY requirements-deploy.txt .
24
+ RUN pip install --no-cache-dir -r requirements-deploy.txt
25
+
26
+ # Copy project code
27
+ COPY sentinel_edge/ sentinel_edge/
28
+ COPY hub/ hub/
29
+ COPY demo/backend/ demo/backend/
30
+ COPY models/ models/
31
+ COPY federated/ federated/
32
+
33
+ # Copy built frontend into a static dir the backend will serve
34
+ COPY --from=frontend-build /build/dist /app/static
35
+
36
+ # Copy the deployment server entrypoint
37
+ COPY deploy_server.py .
38
+
39
+ # HF Spaces expects port 7860
40
+ ENV PORT=7860
41
+ EXPOSE 7860
42
+
43
+ CMD ["python", "deploy_server.py"]
README.md CHANGED
@@ -1,12 +1,15 @@
1
  ---
2
  title: SentinelEdge
3
- emoji: 📊
4
- colorFrom: red
5
- colorTo: gray
6
  sdk: docker
 
7
  pinned: false
8
  license: mit
9
  short_description: Federated Edge AI for Real-Time Phone Call Fraud Detection
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
1
  ---
2
  title: SentinelEdge
3
+ emoji: 🛡️
4
+ colorFrom: green
5
+ colorTo: blue
6
  sdk: docker
7
+ app_port: 7860
8
  pinned: false
9
  license: mit
10
  short_description: Federated Edge AI for Real-Time Phone Call Fraud Detection
11
  ---
12
 
13
+ # SentinelEdge Demo
14
+
15
+ Real-time phone call fraud detection powered by federated edge AI. Try the interactive demo to see how SentinelEdge analyzes phone calls in real-time and detects scam patterns.
demo/backend/audio_streamer.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Stream audio files through the detection pipeline.
2
+
3
+ Reads WAV files and yields fixed-duration chunks of audio samples,
4
+ simulating real-time microphone input for the fraud-detection pipeline.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import io
10
+ import wave
11
+ from pathlib import Path
12
+
13
+ import numpy as np
14
+
15
+
16
+ class AudioStreamer:
17
+ """Reads a WAV file and streams chunks to simulate real-time audio.
18
+
19
+ Parameters
20
+ ----------
21
+ sample_rate : int
22
+ Expected sample rate of the input WAV files (default 16 kHz).
23
+ """
24
+
25
+ def __init__(self, sample_rate: int = 16_000) -> None:
26
+ self.sample_rate = sample_rate
27
+
28
+ # ------------------------------------------------------------------
29
+ # Public API
30
+ # ------------------------------------------------------------------
31
+
32
+ def stream_file(
33
+ self, wav_path: str, chunk_duration: float = 1.0
34
+ ):
35
+ """Generator that yields audio chunks from a WAV file.
36
+
37
+ Each chunk is *chunk_duration* seconds of float32 audio normalised
38
+ to the range ``[-1, 1]``. The final chunk may be shorter than the
39
+ requested duration.
40
+
41
+ Parameters
42
+ ----------
43
+ wav_path : str
44
+ Path to a 16-bit PCM WAV file.
45
+ chunk_duration : float
46
+ Duration of each chunk in seconds.
47
+
48
+ Yields
49
+ ------
50
+ np.ndarray
51
+ 1-D float32 array of audio samples.
52
+
53
+ Raises
54
+ ------
55
+ FileNotFoundError
56
+ If *wav_path* does not exist.
57
+ ValueError
58
+ If the WAV file has an unexpected sample rate.
59
+ """
60
+ path = Path(wav_path)
61
+ if not path.exists():
62
+ raise FileNotFoundError(f"WAV file not found: {wav_path}")
63
+
64
+ with wave.open(str(path), "rb") as wf:
65
+ n_channels = wf.getnchannels()
66
+ sampwidth = wf.getsampwidth()
67
+ framerate = wf.getframerate()
68
+ n_frames = wf.getnframes()
69
+
70
+ if framerate != self.sample_rate:
71
+ raise ValueError(
72
+ f"Expected sample rate {self.sample_rate}, "
73
+ f"got {framerate} in {wav_path}"
74
+ )
75
+
76
+ chunk_frames = int(chunk_duration * framerate)
77
+
78
+ frames_read = 0
79
+ while frames_read < n_frames:
80
+ to_read = min(chunk_frames, n_frames - frames_read)
81
+ raw_bytes = wf.readframes(to_read)
82
+ frames_read += to_read
83
+
84
+ # Convert raw bytes to numpy float32
85
+ audio = self._bytes_to_float32(
86
+ raw_bytes, n_channels=n_channels, sampwidth=sampwidth
87
+ )
88
+ yield audio
89
+
90
+ def generate_silence(self, duration: float) -> np.ndarray:
91
+ """Generate silent audio.
92
+
93
+ Parameters
94
+ ----------
95
+ duration : float
96
+ Duration in seconds.
97
+
98
+ Returns
99
+ -------
100
+ np.ndarray
101
+ 1-D float32 zero array.
102
+ """
103
+ return np.zeros(int(duration * self.sample_rate), dtype=np.float32)
104
+
105
+ def generate_tone(
106
+ self,
107
+ frequency: float = 440.0,
108
+ duration: float = 1.0,
109
+ amplitude: float = 0.5,
110
+ ) -> np.ndarray:
111
+ """Generate a simple sine-wave tone (useful for testing).
112
+
113
+ Parameters
114
+ ----------
115
+ frequency : float
116
+ Tone frequency in Hz.
117
+ duration : float
118
+ Duration in seconds.
119
+ amplitude : float
120
+ Peak amplitude (0.0 -- 1.0).
121
+
122
+ Returns
123
+ -------
124
+ np.ndarray
125
+ 1-D float32 sine wave.
126
+ """
127
+ n_samples = int(duration * self.sample_rate)
128
+ t = np.linspace(0, duration, n_samples, dtype=np.float32)
129
+ return np.float32(amplitude) * np.sin(
130
+ np.float32(2.0 * np.pi * frequency) * t
131
+ )
132
+
133
+ # ------------------------------------------------------------------
134
+ # Internals
135
+ # ------------------------------------------------------------------
136
+
137
+ @staticmethod
138
+ def _bytes_to_float32(
139
+ raw_bytes: bytes,
140
+ n_channels: int = 1,
141
+ sampwidth: int = 2,
142
+ ) -> np.ndarray:
143
+ """Convert raw WAV bytes to a mono float32 array in ``[-1, 1]``.
144
+
145
+ Parameters
146
+ ----------
147
+ raw_bytes : bytes
148
+ Raw PCM bytes from ``wave.readframes()``.
149
+ n_channels : int
150
+ Number of audio channels (1 = mono, 2 = stereo).
151
+ sampwidth : int
152
+ Sample width in bytes (1, 2, or 4).
153
+
154
+ Returns
155
+ -------
156
+ np.ndarray
157
+ 1-D float32 array normalised to ``[-1.0, 1.0]``.
158
+ """
159
+ if sampwidth == 1:
160
+ dtype = np.uint8
161
+ max_val = 128.0
162
+ offset = 128.0 # unsigned 8-bit
163
+ elif sampwidth == 2:
164
+ dtype = np.int16
165
+ max_val = 32768.0
166
+ offset = 0.0
167
+ elif sampwidth == 4:
168
+ dtype = np.int32
169
+ max_val = 2_147_483_648.0
170
+ offset = 0.0
171
+ else:
172
+ raise ValueError(f"Unsupported sample width: {sampwidth}")
173
+
174
+ samples = np.frombuffer(raw_bytes, dtype=dtype).astype(np.float32)
175
+
176
+ if offset != 0.0:
177
+ samples = samples - offset
178
+
179
+ samples = samples / max_val
180
+
181
+ # Mix to mono if stereo
182
+ if n_channels > 1:
183
+ samples = samples.reshape(-1, n_channels).mean(axis=1)
184
+
185
+ return samples
demo/backend/live_mic.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Live microphone audio capture for demo.
2
+
3
+ Provides a thread-safe audio capture interface that reads from the system
4
+ microphone via PyAudio and exposes chunks through a queue. This allows
5
+ the WebSocket server to ingest live audio without blocking the event loop.
6
+
7
+ Note: PyAudio (``pip install pyaudio``) is required for live capture.
8
+ It is an optional dependency -- the demo works fine with pre-recorded
9
+ transcripts when PyAudio is not installed.
10
+ """
11
+
12
+ from __future__ import annotations
13
+
14
+ import queue
15
+ import threading
16
+ import time
17
+ from typing import Optional
18
+
19
+ import numpy as np
20
+
21
+
22
+ class LiveMicCapture:
23
+ """Capture audio from the system microphone.
24
+
25
+ Parameters
26
+ ----------
27
+ sample_rate : int
28
+ Audio sample rate in Hz (default 16 kHz for Whisper).
29
+ chunk_size : int
30
+ Number of frames per read from the audio device.
31
+ channels : int
32
+ Number of audio channels (1 = mono).
33
+ """
34
+
35
+ def __init__(
36
+ self,
37
+ sample_rate: int = 16_000,
38
+ chunk_size: int = 1024,
39
+ channels: int = 1,
40
+ input_device: str | int | None = None,
41
+ ) -> None:
42
+ self.sample_rate = sample_rate
43
+ self.chunk_size = chunk_size
44
+ self.channels = channels
45
+ self.input_device = input_device
46
+ self.audio_queue: queue.Queue[np.ndarray] = queue.Queue()
47
+ self._running = False
48
+ self._thread: Optional[threading.Thread] = None
49
+ self._stream = None
50
+ self._pyaudio_instance = None
51
+ self._backend: str | None = None
52
+
53
+ # ------------------------------------------------------------------
54
+ # Public API
55
+ # ------------------------------------------------------------------
56
+
57
+ def start(self) -> None:
58
+ """Start capturing audio in a background thread.
59
+
60
+ Uses PyAudio when available, otherwise falls back to sounddevice.
61
+
62
+ Raises
63
+ ------
64
+ ImportError
65
+ If neither PyAudio nor sounddevice is installed.
66
+ RuntimeError
67
+ If the capture is already running.
68
+ """
69
+ if self._running:
70
+ raise RuntimeError("Capture is already running")
71
+
72
+ try:
73
+ import pyaudio # noqa: F401
74
+ self._backend = "pyaudio"
75
+ except ImportError:
76
+ try:
77
+ import sounddevice # noqa: F401
78
+ self._backend = "sounddevice"
79
+ except ImportError as exc:
80
+ raise ImportError(
81
+ "Live microphone capture requires PyAudio or sounddevice. "
82
+ "Install one with: pip install pyaudio OR pip install sounddevice"
83
+ ) from exc
84
+
85
+ self._running = True
86
+ self._thread = threading.Thread(
87
+ target=self._capture_loop, daemon=True, name="mic-capture"
88
+ )
89
+ self._thread.start()
90
+
91
+ def stop(self) -> None:
92
+ """Stop capturing and release the audio device."""
93
+ self._running = False
94
+ if self._thread is not None:
95
+ self._thread.join(timeout=3.0)
96
+ self._thread = None
97
+
98
+ def get_chunk(self, timeout: float = 1.0) -> Optional[np.ndarray]:
99
+ """Get next audio chunk from the queue.
100
+
101
+ Parameters
102
+ ----------
103
+ timeout : float
104
+ Maximum seconds to wait for a chunk.
105
+
106
+ Returns
107
+ -------
108
+ np.ndarray or None
109
+ Float32 audio chunk, or ``None`` if the timeout expired.
110
+ """
111
+ try:
112
+ return self.audio_queue.get(timeout=timeout)
113
+ except queue.Empty:
114
+ return None
115
+
116
+ def drain_queue(self) -> None:
117
+ """Drop any buffered audio chunks currently waiting in the queue."""
118
+ while True:
119
+ try:
120
+ self.audio_queue.get_nowait()
121
+ except queue.Empty:
122
+ break
123
+
124
+ @property
125
+ def is_running(self) -> bool:
126
+ """Whether the capture loop is active."""
127
+ return self._running
128
+
129
+ # ------------------------------------------------------------------
130
+ # Internals
131
+ # ------------------------------------------------------------------
132
+
133
+ def _capture_loop(self) -> None:
134
+ """Background thread that reads from the microphone.
135
+
136
+ Opens an input stream (PyAudio or sounddevice), reads chunks,
137
+ and puts float32 audio on the queue for the main application
138
+ to consume.
139
+ """
140
+ if self._backend == "sounddevice":
141
+ self._capture_loop_sounddevice()
142
+ return
143
+
144
+ import pyaudio
145
+
146
+ pa = pyaudio.PyAudio()
147
+ self._pyaudio_instance = pa
148
+
149
+ try:
150
+ stream = pa.open(
151
+ format=pyaudio.paInt16,
152
+ channels=self.channels,
153
+ rate=self.sample_rate,
154
+ input=True,
155
+ frames_per_buffer=self.chunk_size,
156
+ input_device_index=(self.input_device if isinstance(self.input_device, int) else None),
157
+ )
158
+ self._stream = stream
159
+
160
+ while self._running:
161
+ try:
162
+ raw_data = stream.read(
163
+ self.chunk_size, exception_on_overflow=False
164
+ )
165
+ except OSError:
166
+ # Audio device error -- skip this chunk
167
+ continue
168
+
169
+ # Convert int16 bytes to float32 in [-1, 1]
170
+ audio = (
171
+ np.frombuffer(raw_data, dtype=np.int16).astype(np.float32)
172
+ / 32768.0
173
+ )
174
+ self.audio_queue.put(audio)
175
+
176
+ finally:
177
+ if self._stream is not None:
178
+ try:
179
+ self._stream.stop_stream()
180
+ self._stream.close()
181
+ except OSError:
182
+ pass
183
+ self._stream = None
184
+
185
+ pa.terminate()
186
+ self._pyaudio_instance = None
187
+
188
+ def _capture_loop_sounddevice(self) -> None:
189
+ """Capture loop implemented with sounddevice InputStream."""
190
+ import sounddevice as sd
191
+
192
+ def _callback(indata, frames, _time_info, status):
193
+ if status:
194
+ return
195
+ # sounddevice already provides float32 in [-1, 1] when dtype=float32
196
+ audio = np.asarray(indata[:, 0], dtype=np.float32).copy()
197
+ self.audio_queue.put(audio)
198
+
199
+ try:
200
+ with sd.InputStream(
201
+ samplerate=self.sample_rate,
202
+ channels=self.channels,
203
+ dtype="float32",
204
+ blocksize=self.chunk_size,
205
+ device=self.input_device,
206
+ callback=_callback,
207
+ ) as stream:
208
+ self._stream = stream
209
+ while self._running:
210
+ time.sleep(0.05)
211
+ finally:
212
+ self._stream = None
demo/backend/main.py ADDED
@@ -0,0 +1,1389 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """SentinelEdge Demo Backend - Real-time fraud detection server."""
2
+
3
+ import asyncio
4
+ import json
5
+ import re
6
+ import time
7
+ import os
8
+ import sys
9
+ import re
10
+ import urllib.request
11
+ import urllib.error
12
+ import numpy as np
13
+ from pathlib import Path
14
+ from fastapi import FastAPI, WebSocket, WebSocketDisconnect
15
+ from fastapi.middleware.cors import CORSMiddleware
16
+ import uvicorn
17
+
18
+ # Load environment variables from .env file if present
19
+ try:
20
+ from dotenv import load_dotenv
21
+ load_dotenv()
22
+ except ImportError:
23
+ pass # python-dotenv not installed, use system env vars only
24
+
25
+ # Add project root to path so sentinel_edge package is importable
26
+ _PROJECT_ROOT = str(Path(__file__).resolve().parent.parent.parent)
27
+ if _PROJECT_ROOT not in sys.path:
28
+ sys.path.insert(0, _PROJECT_ROOT)
29
+
30
+ app = FastAPI(title="SentinelEdge Demo", version="0.1.0")
31
+
32
+ INTERACTIVE_REPLY_TIMEOUT_SECONDS = 15.0
33
+ MIN_SCAMMER_TURNS_FOR_ALERT = 3
34
+ MIN_USER_REPLIES_FOR_ALERT = 2
35
+ MIN_CALL_SECONDS_FOR_ALERT = 20.0
36
+ REPLY_LISTEN_START_DELAY_SECONDS = 0.25
37
+ VOICE_ACTIVITY_RMS_THRESHOLD = 0.0015
38
+ MIN_VOICED_SECONDS_FOR_TRANSCRIBE = 0.3
39
+ END_OF_UTTERANCE_SILENCE_SECONDS = 0.8
40
+ MAX_UTTERANCE_SECONDS = 7.0
41
+ ANTHROPIC_API_URL = "https://api.anthropic.com/v1/messages"
42
+ ANTHROPIC_API_VERSION = "2023-06-01"
43
+
44
+
45
+ def _read_anthropic_api_key_from_env() -> str | None:
46
+ """Read optional Anthropic key from env without hardcoding secrets."""
47
+ key = os.getenv("SENTINEL_ANTHROPIC_API_KEY") or os.getenv("ANTHROPIC_API_KEY")
48
+ if key is None:
49
+ return None
50
+ key = key.strip()
51
+ return key if key else None
52
+
53
+
54
+ def _extract_anthropic_text(response_json: dict) -> str | None:
55
+ """Extract first text block from Anthropic messages response."""
56
+ content = response_json.get("content")
57
+ if not isinstance(content, list):
58
+ return None
59
+
60
+ for block in content:
61
+ if isinstance(block, dict) and block.get("type") == "text":
62
+ text = block.get("text")
63
+ if isinstance(text, str) and text.strip():
64
+ return text.strip()
65
+ return None
66
+
67
+
68
+ def _candidate_anthropic_models(configured_model: str) -> list[str]:
69
+ """Return ordered, de-duplicated Anthropic model candidates."""
70
+ candidates = [
71
+ configured_model,
72
+ "claude-sonnet-4-5-latest",
73
+ "claude-sonnet-4-0",
74
+ "claude-3-7-sonnet-latest",
75
+ "claude-3-5-haiku-latest",
76
+ ]
77
+
78
+ seen: set[str] = set()
79
+ ordered: list[str] = []
80
+ for model in candidates:
81
+ if model and model not in seen:
82
+ seen.add(model)
83
+ ordered.append(model)
84
+ return ordered
85
+
86
+
87
+ def _personalize_scammer_line_sync(
88
+ *,
89
+ base_sentence: str,
90
+ call_description: str,
91
+ recent_user_replies: list[str],
92
+ recent_scammer_lines: list[str],
93
+ ) -> str | None:
94
+ """Call Anthropic API and return one personalized scammer sentence."""
95
+ api_key = _read_anthropic_api_key_from_env()
96
+ if api_key is None:
97
+ print("[Claude] API key not found in env; personalization disabled.")
98
+ return None
99
+
100
+ # Redact key for logging (show only last 8 chars)
101
+ key_display = f"...{api_key[-8:]}" if len(api_key) > 8 else "***"
102
+ print(f"[Claude] API key loaded: {key_display}")
103
+
104
+ configured_model = os.getenv("SENTINEL_ANTHROPIC_MODEL", "claude-sonnet-4-5-latest")
105
+ model_candidates = _candidate_anthropic_models(configured_model)
106
+ print(f"[Claude] Attempting personalization with model candidates: {model_candidates}")
107
+ user_context = " | ".join(recent_user_replies[-3:])
108
+ scammer_context = " | ".join(recent_scammer_lines[-2:])
109
+
110
+ system_prompt = (
111
+ "You are generating a scam call simulation sentence for cybersecurity training. "
112
+ "Return exactly one sentence, under 35 words, plain text only. "
113
+ "Keep it plausible for a phone scam and adapt to the victim reply context. "
114
+ "Do not include markdown, bullet points, labels, or safety disclaimers."
115
+ )
116
+
117
+ user_prompt = (
118
+ f"Call scenario: {call_description}. "
119
+ f"Script baseline sentence: {base_sentence} "
120
+ f"Recent scammer lines: {scammer_context or 'none'} "
121
+ f"Recent victim replies: {user_context or 'none'}"
122
+ )
123
+
124
+ payload = {
125
+ "model": model_candidates[0],
126
+ "max_tokens": 80,
127
+ "temperature": 0.7,
128
+ "system": system_prompt,
129
+ "messages": [{"role": "user", "content": user_prompt}],
130
+ }
131
+
132
+ headers = {
133
+ "x-api-key": api_key,
134
+ "anthropic-version": ANTHROPIC_API_VERSION,
135
+ "content-type": "application/json",
136
+ }
137
+
138
+ for model in model_candidates:
139
+ payload["model"] = model
140
+ body = json.dumps(payload).encode("utf-8")
141
+ request = urllib.request.Request(
142
+ ANTHROPIC_API_URL,
143
+ data=body,
144
+ headers=headers,
145
+ method="POST",
146
+ )
147
+
148
+ try:
149
+ print(f"[Claude] API request to {ANTHROPIC_API_URL} using model: {model}")
150
+ with urllib.request.urlopen(request, timeout=8) as response:
151
+ response_json = json.loads(response.read().decode("utf-8"))
152
+
153
+ text = _extract_anthropic_text(response_json)
154
+ if text is None:
155
+ print(f"[Claude] Model {model} returned empty text block.")
156
+ continue
157
+
158
+ personalized = " ".join(text.split())
159
+ print(f"[Claude] Personalized sentence ({model}): {personalized[:80]}...")
160
+ return personalized
161
+ except urllib.error.HTTPError as e:
162
+ error_body = e.read().decode("utf-8") if e.fp else ""
163
+ print(f"[Claude] HTTP Error {e.code} on model {model}: {e.reason}")
164
+ print(f"[Claude] Response body: {error_body[:200]}")
165
+
166
+ is_model_not_found = (
167
+ e.code == 404
168
+ and "not_found_error" in error_body
169
+ and "model:" in error_body
170
+ )
171
+ if is_model_not_found:
172
+ print(f"[Claude] Model not found: {model}. Trying next fallback.")
173
+ continue
174
+ return None
175
+ except urllib.error.URLError as e:
176
+ print(f"[Claude] URL Error on model {model}: {e.reason}")
177
+ return None
178
+ except (TimeoutError, json.JSONDecodeError) as e:
179
+ print(f"[Claude] Request error on model {model}: {e}")
180
+ return None
181
+
182
+ print("[Claude] No usable Anthropic model found; personalization disabled for this turn.")
183
+ return None
184
+
185
+
186
+ async def _personalize_scammer_line(
187
+ *,
188
+ base_sentence: str,
189
+ call_description: str,
190
+ recent_user_replies: list[str],
191
+ recent_scammer_lines: list[str],
192
+ ) -> str | None:
193
+ """Async wrapper for sentence personalization."""
194
+ return await asyncio.to_thread(
195
+ _personalize_scammer_line_sync,
196
+ base_sentence=base_sentence,
197
+ call_description=call_description,
198
+ recent_user_replies=recent_user_replies,
199
+ recent_scammer_lines=recent_scammer_lines,
200
+ )
201
+
202
+
203
+ def _read_input_device_from_env() -> str | int | None:
204
+ """Read optional microphone input device from SENTINEL_INPUT_DEVICE.
205
+
206
+ Supports either an integer device index or a device name string.
207
+ """
208
+ value = os.getenv("SENTINEL_INPUT_DEVICE")
209
+ if value is None or not value.strip():
210
+ return None
211
+
212
+ value = value.strip()
213
+ if value.isdigit():
214
+ return int(value)
215
+ return value
216
+
217
+
218
+ def _parse_input_device_query(value: str | None) -> str | int | None:
219
+ """Parse optional input device from websocket query parameter."""
220
+ if value is None:
221
+ return None
222
+ value = value.strip()
223
+ if not value:
224
+ return None
225
+ if value.isdigit():
226
+ return int(value)
227
+ return value
228
+
229
+ app.add_middleware(
230
+ CORSMiddleware,
231
+ allow_origins=["*"],
232
+ allow_methods=["*"],
233
+ allow_headers=["*"],
234
+ )
235
+
236
+ # Track active WebSocket connections
237
+ active_connections: list[WebSocket] = []
238
+
239
+ # ---------------------------------------------------------------------------
240
+ # Available demo calls
241
+ # ---------------------------------------------------------------------------
242
+
243
+ SAMPLE_CALLS = {
244
+ "live_mic": {
245
+ "file": None,
246
+ "caller": "Live microphone",
247
+ "caller_name": "Local Device Input",
248
+ "description": "Real-time microphone detection",
249
+ },
250
+ "irs_scam": {
251
+ "file": "sample_calls/irs_scam.txt",
252
+ "caller": "+1 (800) 555-0199",
253
+ "caller_name": "Unknown Number",
254
+ "description": "IRS Impersonation Scam",
255
+ },
256
+ "tech_support": {
257
+ "file": "sample_calls/tech_support_scam.txt",
258
+ "caller": "+1 (888) 555-0147",
259
+ "caller_name": "Microsoft Support",
260
+ "description": "Tech Support Scam",
261
+ },
262
+ "bank_fraud": {
263
+ "file": "sample_calls/bank_fraud_scam.txt",
264
+ "caller": "+1 (800) 555-0123",
265
+ "caller_name": "Bank Security",
266
+ "description": "Bank Fraud Scam",
267
+ },
268
+ "legitimate": {
269
+ "file": "sample_calls/legitimate_call.txt",
270
+ "caller": "+1 (555) 234-5678",
271
+ "caller_name": "Dr. Smith Office",
272
+ "description": "Legitimate Appointment Reminder",
273
+ },
274
+ "crypto_investment": {
275
+ "file": "sample_calls/crypto_investment_scam.txt",
276
+ "caller": "+1 (833) 555-0291",
277
+ "caller_name": "Digital Asset Partners",
278
+ "description": "Crypto Investment Scam",
279
+ },
280
+ "grandparent": {
281
+ "file": "sample_calls/grandparent_scam.txt",
282
+ "caller": "+1 (555) 867-5309",
283
+ "caller_name": "Unknown Number",
284
+ "description": "Grandparent Scam",
285
+ },
286
+ "amazon_refund": {
287
+ "file": "sample_calls/amazon_refund_scam.txt",
288
+ "caller": "+1 (888) 555-0342",
289
+ "caller_name": "Amazon Support",
290
+ "description": "Amazon Refund Scam",
291
+ },
292
+ "utility_shutoff": {
293
+ "file": "sample_calls/utility_shutoff_scam.txt",
294
+ "caller": "+1 (800) 555-0476",
295
+ "caller_name": "Utility Company",
296
+ "description": "Utility Shutoff Scam",
297
+ },
298
+ "prize_notification": {
299
+ "file": "sample_calls/prize_notification_scam.txt",
300
+ "caller": "+1 (877) 555-0188",
301
+ "caller_name": "National Sweepstakes",
302
+ "description": "Prize Notification Scam",
303
+ },
304
+ }
305
+
306
+ # ---------------------------------------------------------------------------
307
+ # REST endpoints
308
+ # ---------------------------------------------------------------------------
309
+
310
+
311
+ @app.get("/api/calls")
312
+ async def list_calls():
313
+ """List available demo calls."""
314
+ return {"calls": [{"id": k, **v} for k, v in SAMPLE_CALLS.items()]}
315
+
316
+
317
+ @app.get("/api/health")
318
+ async def health_check():
319
+ """Health check endpoint."""
320
+ return {"status": "ok", "active_connections": len(active_connections)}
321
+
322
+
323
+ @app.get("/api/audio-devices")
324
+ async def list_audio_devices():
325
+ """List available input audio devices (when sounddevice is installed)."""
326
+ try:
327
+ import sounddevice as sd
328
+
329
+ devices = []
330
+ for idx, dev in enumerate(sd.query_devices()):
331
+ if dev.get("max_input_channels", 0) > 0:
332
+ devices.append(
333
+ {
334
+ "index": idx,
335
+ "name": dev.get("name", "unknown"),
336
+ "max_input_channels": int(dev.get("max_input_channels", 0)),
337
+ "default_samplerate": float(dev.get("default_samplerate", 0.0)),
338
+ }
339
+ )
340
+ return {"devices": devices}
341
+ except Exception as exc:
342
+ return {"devices": [], "error": str(exc)}
343
+
344
+
345
+ # ---------------------------------------------------------------------------
346
+ # WebSocket: real-time call fraud detection
347
+ # ---------------------------------------------------------------------------
348
+
349
+
350
+ @app.websocket("/ws/call/{call_id}")
351
+ async def call_detection(websocket: WebSocket, call_id: str):
352
+ """WebSocket endpoint for real-time call fraud detection."""
353
+ await websocket.accept()
354
+ active_connections.append(websocket)
355
+
356
+ try:
357
+ if call_id not in SAMPLE_CALLS:
358
+ await websocket.send_json({"error": f"Unknown call: {call_id}"})
359
+ return
360
+
361
+ call_info = SAMPLE_CALLS[call_id]
362
+
363
+ await websocket.send_json(
364
+ {
365
+ "type": "call_start",
366
+ "caller": call_info["caller"],
367
+ "caller_name": call_info["caller_name"],
368
+ "description": call_info["description"],
369
+ "timestamp": time.time(),
370
+ }
371
+ )
372
+
373
+ input_device = _parse_input_device_query(
374
+ websocket.query_params.get("input_device")
375
+ )
376
+
377
+ if call_id == "live_mic":
378
+ await run_live_mic_detection(websocket, input_device=input_device)
379
+ return
380
+
381
+ # Load transcript sentences
382
+ transcript_path = os.path.join(os.path.dirname(__file__), call_info["file"])
383
+ sentences = load_transcript(transcript_path)
384
+
385
+ interactive = websocket.query_params.get("interactive") == "1"
386
+ if interactive:
387
+ await run_interactive_scripted_call(
388
+ websocket,
389
+ sentences,
390
+ input_device=input_device,
391
+ )
392
+ return
393
+
394
+ # Import detection pipeline components
395
+ from sentinel_edge.features.handcrafted import extract_handcrafted_features
396
+ from sentinel_edge.features.feature_pipeline import FeaturePipeline
397
+ from sentinel_edge.classifier.xgb_classifier import FraudClassifier
398
+ from sentinel_edge.classifier.score_accumulator import ScoreAccumulator
399
+ from sentinel_edge.classifier.alert_engine import AlertEngine
400
+
401
+ accumulator = ScoreAccumulator(alpha=0.3)
402
+ alert_engine = AlertEngine()
403
+
404
+ _model_path = os.path.join(_PROJECT_ROOT, "models", "call_fraud_xgb.json")
405
+ _tfidf_path = os.path.join(_PROJECT_ROOT, "models", "tfidf_call_vectorizer.pkl")
406
+ _use_real_model = os.path.exists(_model_path) and os.path.exists(_tfidf_path)
407
+ if _use_real_model:
408
+ _classifier = FraudClassifier(_model_path)
409
+ _pipeline = FeaturePipeline(_tfidf_path)
410
+ else:
411
+ _classifier = None
412
+ _pipeline = None
413
+
414
+ call_start_time = time.time()
415
+
416
+ for i, sentence in enumerate(sentences):
417
+ await asyncio.sleep(1.5 + np.random.random() * 1.5)
418
+
419
+ features = extract_handcrafted_features(sentence)
420
+
421
+ if _use_real_model:
422
+ t0 = time.perf_counter()
423
+ feature_vec = _pipeline.extract(sentence)
424
+ fraud_score = _classifier.predict_proba(feature_vec)
425
+ _inference_ms = (time.perf_counter() - t0) * 1000
426
+ else:
427
+ fraud_score = compute_heuristic_score(features)
428
+ _inference_ms = np.random.uniform(5, 15)
429
+
430
+ ema_score = accumulator.update(fraud_score)
431
+ alert = alert_engine.evaluate(ema_score, features)
432
+ elapsed = time.time() - call_start_time
433
+
434
+ await websocket.send_json(
435
+ {
436
+ "type": "sentence",
437
+ "speaker": "scammer",
438
+ "index": i,
439
+ "text": sentence,
440
+ "raw_score": round(fraud_score, 4),
441
+ "ema_score": round(ema_score, 4),
442
+ "features": {
443
+ k: round(v, 4) if isinstance(v, float) else int(v)
444
+ for k, v in features.items()
445
+ },
446
+ "alert": {
447
+ "should_alert": alert.should_alert,
448
+ "risk_level": alert.risk_level.value,
449
+ "reasons": alert.reasons,
450
+ },
451
+ "elapsed_seconds": round(elapsed, 1),
452
+ "inference_ms": round(_inference_ms, 1),
453
+ "timestamp": time.time(),
454
+ }
455
+ )
456
+
457
+ try:
458
+ msg = await asyncio.wait_for(
459
+ websocket.receive_text(), timeout=0.01
460
+ )
461
+ data = json.loads(msg)
462
+ if data.get("action") == "block":
463
+ await websocket.send_json(
464
+ {"type": "call_blocked", "timestamp": time.time()}
465
+ )
466
+ return
467
+ elif data.get("action") == "dismiss":
468
+ pass
469
+ except asyncio.TimeoutError:
470
+ pass
471
+ except json.JSONDecodeError:
472
+ pass
473
+
474
+ await websocket.send_json(
475
+ {
476
+ "type": "call_end",
477
+ "final_score": round(accumulator.current_score, 4),
478
+ "peak_score": round(accumulator.peak_score, 4),
479
+ "mean_score": round(accumulator.mean_score, 4),
480
+ "total_sentences": len(sentences),
481
+ "duration_seconds": round(time.time() - call_start_time, 1),
482
+ "timestamp": time.time(),
483
+ }
484
+ )
485
+
486
+ except WebSocketDisconnect:
487
+ pass
488
+ except Exception as exc:
489
+ try:
490
+ await websocket.send_json(
491
+ {"type": "error", "message": str(exc), "timestamp": time.time()}
492
+ )
493
+ except Exception:
494
+ pass
495
+ finally:
496
+ if websocket in active_connections:
497
+ active_connections.remove(websocket)
498
+
499
+
500
+ async def run_interactive_scripted_call(
501
+ websocket: WebSocket,
502
+ scripted_sentences: list[str],
503
+ input_device: str | int | None = None,
504
+ ) -> None:
505
+ """Turn-based scripted call: scammer line, wait for user reply, repeat."""
506
+ from live_mic import LiveMicCapture
507
+ from sentinel_edge.audio.transcriber import Transcriber
508
+ from sentinel_edge.engine import SentinelEngine
509
+
510
+ engine = SentinelEngine(models_dir=os.path.join(_PROJECT_ROOT, "models"))
511
+ engine.reset_call_state()
512
+ mic = LiveMicCapture(
513
+ sample_rate=16_000,
514
+ chunk_size=2048,
515
+ channels=1,
516
+ input_device=(input_device if input_device is not None else _read_input_device_from_env()),
517
+ )
518
+ whisper_model = os.getenv("SENTINEL_WHISPER_MODEL", "base.en")
519
+ transcriber = Transcriber(model_name=whisper_model)
520
+
521
+ call_start_time = time.time()
522
+ turn_index = 0
523
+ scammer_turn_count = 0
524
+ user_reply_count = 0
525
+ fraud_alert_sent = False
526
+ user_replies: list[str] = []
527
+ scammer_history: list[str] = []
528
+ last_scammer_sentence = ""
529
+
530
+ try:
531
+ mic.start()
532
+
533
+ for scam_sentence in scripted_sentences:
534
+ await asyncio.sleep(1.0)
535
+
536
+ line_to_send = scam_sentence
537
+ if user_replies:
538
+ print(f"[Main] Attempting personalization (user has {len(user_replies)} replies)")
539
+ personalized = await _personalize_scammer_line(
540
+ base_sentence=scam_sentence,
541
+ call_description="Interactive scam call simulation",
542
+ recent_user_replies=user_replies,
543
+ recent_scammer_lines=scammer_history,
544
+ )
545
+ if personalized:
546
+ line_to_send = personalized
547
+ else:
548
+ print(f"[Main] No user replies yet; skipping personalization")
549
+
550
+ t0 = time.perf_counter()
551
+ fraud_score, features = engine.analyze_sentence(line_to_send)
552
+ ema_score = engine.accumulator.update(fraud_score)
553
+ alert = engine.alert_engine.evaluate(ema_score, features)
554
+ inference_ms = (time.perf_counter() - t0) * 1000.0
555
+
556
+ await websocket.send_json(
557
+ {
558
+ "type": "sentence",
559
+ "speaker": "scammer",
560
+ "index": turn_index,
561
+ "text": line_to_send,
562
+ "raw_score": round(fraud_score, 4),
563
+ "ema_score": round(ema_score, 4),
564
+ "features": {
565
+ k: round(v, 4) if isinstance(v, float) else int(v)
566
+ for k, v in features.items()
567
+ },
568
+ "alert": {
569
+ "should_alert": alert.should_alert,
570
+ "risk_level": alert.risk_level.value,
571
+ "reasons": alert.reasons,
572
+ },
573
+ "elapsed_seconds": round(time.time() - call_start_time, 1),
574
+ "inference_ms": round(inference_ms, 2),
575
+ "timestamp": time.time(),
576
+ }
577
+ )
578
+ turn_index += 1
579
+ scammer_turn_count += 1
580
+ last_scammer_sentence = line_to_send
581
+ scammer_history.append(line_to_send)
582
+
583
+ elapsed_seconds = time.time() - call_start_time
584
+ if (
585
+ not fraud_alert_sent
586
+ and alert.should_alert
587
+ and scammer_turn_count >= MIN_SCAMMER_TURNS_FOR_ALERT
588
+ and user_reply_count >= MIN_USER_REPLIES_FOR_ALERT
589
+ and elapsed_seconds >= MIN_CALL_SECONDS_FOR_ALERT
590
+ ):
591
+ fraud_alert_sent = True
592
+ await websocket.send_json(
593
+ {
594
+ "type": "fraud_detected",
595
+ "message": "High scam risk detected. Hang up now.",
596
+ "risk_level": alert.risk_level.value,
597
+ "ema_score": round(ema_score, 4),
598
+ "reasons": alert.reasons,
599
+ "timestamp": time.time(),
600
+ }
601
+ )
602
+
603
+ await websocket.send_json(
604
+ {
605
+ "type": "waiting_for_reply",
606
+ "timeout_seconds": int(INTERACTIVE_REPLY_TIMEOUT_SECONDS),
607
+ "timestamp": time.time(),
608
+ }
609
+ )
610
+
611
+ # Ensure only fresh user turn audio is considered.
612
+ mic.drain_queue()
613
+
614
+ user_sentence, heard_audio = await _capture_user_sentence(
615
+ websocket=websocket,
616
+ mic=mic,
617
+ transcriber=transcriber,
618
+ timeout_seconds=INTERACTIVE_REPLY_TIMEOUT_SECONDS,
619
+ listen_start_delay_seconds=REPLY_LISTEN_START_DELAY_SECONDS,
620
+ voice_activity_rms_threshold=VOICE_ACTIVITY_RMS_THRESHOLD,
621
+ min_voiced_seconds_for_transcribe=MIN_VOICED_SECONDS_FOR_TRANSCRIBE,
622
+ )
623
+
624
+ if not heard_audio:
625
+ await websocket.send_json(
626
+ {
627
+ "type": "error",
628
+ "message": (
629
+ "No microphone audio detected during reply window. "
630
+ "Check OS microphone permission and input device selection."
631
+ ),
632
+ "timestamp": time.time(),
633
+ }
634
+ )
635
+ continue
636
+
637
+ if user_sentence is None:
638
+ await websocket.send_json(
639
+ {
640
+ "type": "user_timeout",
641
+ "message": "No reply detected, continuing call.",
642
+ "timestamp": time.time(),
643
+ }
644
+ )
645
+ continue
646
+
647
+ if _looks_like_echo(user_sentence, last_scammer_sentence):
648
+ await websocket.send_json(
649
+ {
650
+ "type": "user_echo_detected",
651
+ "message": "Detected speaker echo. Use headphones or lower volume and repeat.",
652
+ "timestamp": time.time(),
653
+ }
654
+ )
655
+ continue
656
+
657
+ # Score user reply for display only. Do not mix into scammer EMA.
658
+ user_score, user_features = engine.analyze_sentence(user_sentence)
659
+ user_replies.append(user_sentence)
660
+ user_reply_count += 1
661
+ await websocket.send_json(
662
+ {
663
+ "type": "sentence",
664
+ "speaker": "you",
665
+ "index": turn_index,
666
+ "text": user_sentence,
667
+ "raw_score": round(user_score, 4),
668
+ "ema_score": round(engine.accumulator.current_score, 4),
669
+ "features": {
670
+ k: round(v, 4) if isinstance(v, float) else int(v)
671
+ for k, v in user_features.items()
672
+ },
673
+ "alert": {
674
+ "should_alert": False,
675
+ "risk_level": "safe",
676
+ "reasons": [],
677
+ },
678
+ "elapsed_seconds": round(time.time() - call_start_time, 1),
679
+ "inference_ms": 0.0,
680
+ "timestamp": time.time(),
681
+ }
682
+ )
683
+ turn_index += 1
684
+
685
+ # Allow client action such as manual block/hangup.
686
+ try:
687
+ msg = await asyncio.wait_for(websocket.receive_text(), timeout=0.01)
688
+ data = json.loads(msg)
689
+ if data.get("action") == "block":
690
+ await websocket.send_json(
691
+ {"type": "call_blocked", "timestamp": time.time()}
692
+ )
693
+ break
694
+ except asyncio.TimeoutError:
695
+ pass
696
+ except json.JSONDecodeError:
697
+ pass
698
+
699
+ await websocket.send_json(
700
+ {
701
+ "type": "call_end",
702
+ "final_score": round(engine.accumulator.current_score, 4),
703
+ "peak_score": round(engine.accumulator.peak_score, 4),
704
+ "mean_score": round(engine.accumulator.mean_score, 4),
705
+ "total_sentences": turn_index,
706
+ "duration_seconds": round(time.time() - call_start_time, 1),
707
+ "timestamp": time.time(),
708
+ }
709
+ )
710
+ finally:
711
+ mic.stop()
712
+
713
+
714
+ async def _capture_user_sentence(
715
+ websocket: WebSocket,
716
+ mic,
717
+ transcriber,
718
+ timeout_seconds: float,
719
+ listen_start_delay_seconds: float,
720
+ voice_activity_rms_threshold: float,
721
+ min_voiced_seconds_for_transcribe: float,
722
+ ) -> tuple[str | None, bool]:
723
+ """Capture microphone input and return one completed user sentence."""
724
+ from sentinel_edge.audio.sentence_splitter import SentenceSplitter
725
+
726
+ splitter = SentenceSplitter()
727
+ utterance_chunks: list[np.ndarray] = []
728
+ utterance_samples = 0
729
+ min_samples = int(16_000 * min_voiced_seconds_for_transcribe)
730
+ max_samples = int(16_000 * MAX_UTTERANCE_SECONDS)
731
+ deadline = time.time() + timeout_seconds
732
+ listen_start = time.time() + listen_start_delay_seconds
733
+ heard_audio = False
734
+ speech_started = False
735
+ silence_run_seconds = 0.0
736
+
737
+ while time.time() < deadline:
738
+ # Handle immediate client actions while waiting for reply.
739
+ try:
740
+ msg = await asyncio.wait_for(websocket.receive_text(), timeout=0.01)
741
+ data = json.loads(msg)
742
+ if data.get("action") == "block":
743
+ return None, heard_audio
744
+ except asyncio.TimeoutError:
745
+ pass
746
+ except json.JSONDecodeError:
747
+ pass
748
+
749
+ chunk = mic.get_chunk(timeout=0.2)
750
+ if chunk is None:
751
+ await asyncio.sleep(0.01)
752
+ continue
753
+
754
+ if time.time() < listen_start:
755
+ # Avoid immediately picking up system TTS bleed-through.
756
+ continue
757
+
758
+ heard_audio = True
759
+
760
+ rms = float(np.sqrt(np.mean(np.square(chunk))))
761
+ chunk_seconds = len(chunk) / 16_000.0
762
+
763
+ if rms < voice_activity_rms_threshold:
764
+ if speech_started:
765
+ # Keep trailing silence so Whisper captures final words naturally.
766
+ utterance_chunks.append(chunk)
767
+ utterance_samples += len(chunk)
768
+ silence_run_seconds += chunk_seconds
769
+ if (
770
+ utterance_samples >= min_samples
771
+ and silence_run_seconds >= END_OF_UTTERANCE_SILENCE_SECONDS
772
+ ):
773
+ break
774
+ continue
775
+
776
+ speech_started = True
777
+ silence_run_seconds = 0.0
778
+ utterance_chunks.append(chunk)
779
+ utterance_samples += len(chunk)
780
+ if utterance_samples >= max_samples:
781
+ break
782
+
783
+ if utterance_chunks and utterance_samples >= min_samples:
784
+ audio = np.concatenate(utterance_chunks)
785
+ transcript = transcriber.transcribe(audio, sample_rate=16_000).strip()
786
+ if transcript:
787
+ sentences = splitter.feed(transcript)
788
+ if sentences:
789
+ return sentences[0], heard_audio
790
+
791
+ # Whisper can return partial text without sentence punctuation.
792
+ if len(transcript.split()) >= 1:
793
+ return transcript, heard_audio
794
+
795
+ # Fallback: if we heard audio but speech never crossed VAD, try a short best-effort
796
+ # transcription on what we did capture to handle very quiet microphones.
797
+ if utterance_chunks and heard_audio:
798
+ audio = np.concatenate(utterance_chunks)
799
+ transcript = transcriber.transcribe(audio, sample_rate=16_000).strip()
800
+ if transcript:
801
+ return transcript, heard_audio
802
+
803
+ leftover = splitter.flush()
804
+ if leftover:
805
+ return leftover, heard_audio
806
+ return None, heard_audio
807
+
808
+
809
+ def _looks_like_echo(user_sentence: str, scam_sentence: str) -> bool:
810
+ """Heuristic guard: detect if user transcript likely matches scammer TTS echo."""
811
+ def _normalize(text: str) -> list[str]:
812
+ cleaned = re.sub(r"[^a-z0-9\s]", " ", text.lower())
813
+ tokens = [t for t in cleaned.split() if len(t) > 2]
814
+ return tokens
815
+
816
+ user_tokens = _normalize(user_sentence)
817
+ scam_tokens = _normalize(scam_sentence)
818
+
819
+ if not user_tokens or not scam_tokens:
820
+ return False
821
+
822
+ user_set = set(user_tokens)
823
+ scam_set = set(scam_tokens)
824
+ overlap = len(user_set & scam_set)
825
+ ratio = overlap / max(len(user_set), 1)
826
+
827
+ # Require substantial overlap to reduce false positives on short user replies.
828
+ return overlap >= 4 and ratio >= 0.85
829
+
830
+
831
+ async def run_live_mic_detection(
832
+ websocket: WebSocket,
833
+ input_device: str | int | None = None,
834
+ ) -> None:
835
+ """Run live microphone -> transcription -> model scoring pipeline."""
836
+ from live_mic import LiveMicCapture
837
+ from sentinel_edge.audio.sentence_splitter import SentenceSplitter
838
+ from sentinel_edge.audio.transcriber import Transcriber
839
+ from sentinel_edge.engine import SentinelEngine
840
+
841
+ mic = LiveMicCapture(
842
+ sample_rate=16_000,
843
+ chunk_size=2048,
844
+ channels=1,
845
+ input_device=(input_device if input_device is not None else _read_input_device_from_env()),
846
+ )
847
+ splitter = SentenceSplitter()
848
+ whisper_model = os.getenv("SENTINEL_WHISPER_MODEL", "base.en")
849
+ transcriber = Transcriber(model_name=whisper_model)
850
+ engine = SentinelEngine(models_dir=os.path.join(_PROJECT_ROOT, "models"))
851
+ engine.reset_call_state()
852
+
853
+ call_start_time = time.time()
854
+ sentence_index = 0
855
+ chunk_accumulator: list[np.ndarray] = []
856
+ accumulated_samples = 0
857
+ # Transcribe roughly every 2 seconds for stable Whisper context.
858
+ target_samples = 16_000 * 2
859
+
860
+ try:
861
+ mic.start()
862
+
863
+ while True:
864
+ # Non-blocking action handling from client.
865
+ try:
866
+ msg = await asyncio.wait_for(websocket.receive_text(), timeout=0.01)
867
+ data = json.loads(msg)
868
+ if data.get("action") == "block":
869
+ await websocket.send_json(
870
+ {"type": "call_blocked", "timestamp": time.time()}
871
+ )
872
+ break
873
+ except asyncio.TimeoutError:
874
+ pass
875
+ except json.JSONDecodeError:
876
+ pass
877
+
878
+ chunk = mic.get_chunk(timeout=0.2)
879
+ if chunk is None:
880
+ await asyncio.sleep(0.01)
881
+ continue
882
+
883
+ chunk_accumulator.append(chunk)
884
+ accumulated_samples += len(chunk)
885
+
886
+ if accumulated_samples < target_samples:
887
+ continue
888
+
889
+ audio = np.concatenate(chunk_accumulator)
890
+ chunk_accumulator = []
891
+ accumulated_samples = 0
892
+
893
+ transcript = transcriber.transcribe(audio, sample_rate=16_000).strip()
894
+ if not transcript:
895
+ continue
896
+
897
+ for sentence in splitter.feed(transcript):
898
+ t0 = time.perf_counter()
899
+ fraud_score, features = engine.analyze_sentence(sentence)
900
+ ema_score = engine.accumulator.update(fraud_score)
901
+ alert = engine.alert_engine.evaluate(ema_score, features)
902
+ inference_ms = (time.perf_counter() - t0) * 1000.0
903
+
904
+ await websocket.send_json(
905
+ {
906
+ "type": "sentence",
907
+ "index": sentence_index,
908
+ "text": sentence,
909
+ "raw_score": round(fraud_score, 4),
910
+ "ema_score": round(ema_score, 4),
911
+ "features": {
912
+ k: round(v, 4) if isinstance(v, float) else int(v)
913
+ for k, v in features.items()
914
+ },
915
+ "alert": {
916
+ "should_alert": alert.should_alert,
917
+ "risk_level": alert.risk_level.value,
918
+ "reasons": alert.reasons,
919
+ },
920
+ "elapsed_seconds": round(time.time() - call_start_time, 1),
921
+ "inference_ms": round(inference_ms, 2),
922
+ "timestamp": time.time(),
923
+ }
924
+ )
925
+ sentence_index += 1
926
+
927
+ leftover = splitter.flush()
928
+ if leftover:
929
+ t0 = time.perf_counter()
930
+ fraud_score, features = engine.analyze_sentence(leftover)
931
+ ema_score = engine.accumulator.update(fraud_score)
932
+ alert = engine.alert_engine.evaluate(ema_score, features)
933
+ inference_ms = (time.perf_counter() - t0) * 1000.0
934
+ await websocket.send_json(
935
+ {
936
+ "type": "sentence",
937
+ "index": sentence_index,
938
+ "text": leftover,
939
+ "raw_score": round(fraud_score, 4),
940
+ "ema_score": round(ema_score, 4),
941
+ "features": {
942
+ k: round(v, 4) if isinstance(v, float) else int(v)
943
+ for k, v in features.items()
944
+ },
945
+ "alert": {
946
+ "should_alert": alert.should_alert,
947
+ "risk_level": alert.risk_level.value,
948
+ "reasons": alert.reasons,
949
+ },
950
+ "elapsed_seconds": round(time.time() - call_start_time, 1),
951
+ "inference_ms": round(inference_ms, 2),
952
+ "timestamp": time.time(),
953
+ }
954
+ )
955
+ sentence_index += 1
956
+
957
+ await websocket.send_json(
958
+ {
959
+ "type": "call_end",
960
+ "final_score": round(engine.accumulator.current_score, 4),
961
+ "peak_score": round(engine.accumulator.peak_score, 4),
962
+ "mean_score": round(engine.accumulator.mean_score, 4),
963
+ "total_sentences": sentence_index,
964
+ "duration_seconds": round(time.time() - call_start_time, 1),
965
+ "timestamp": time.time(),
966
+ }
967
+ )
968
+ finally:
969
+ mic.stop()
970
+
971
+
972
+ # ---------------------------------------------------------------------------
973
+ # WebSocket: live microphone fraud detection
974
+ # ---------------------------------------------------------------------------
975
+
976
+ _LIVE_MIC_BUFFER_DURATION = 5.0
977
+
978
+
979
+ @app.websocket("/ws/call/live")
980
+ async def live_mic_detection(websocket: WebSocket):
981
+ """WebSocket endpoint for live microphone fraud detection."""
982
+ await websocket.accept()
983
+ active_connections.append(websocket)
984
+
985
+ mic = None
986
+ try:
987
+ try:
988
+ from demo.backend.live_mic import LiveMicCapture
989
+ except ImportError as e:
990
+ await websocket.send_json({
991
+ "type": "error",
992
+ "message": f"Missing dependency for live mic: {e}",
993
+ "timestamp": time.time(),
994
+ })
995
+ return
996
+
997
+ try:
998
+ from sentinel_edge.audio.transcriber import Transcriber
999
+ except ImportError as e:
1000
+ await websocket.send_json({
1001
+ "type": "error",
1002
+ "message": f"Missing dependency for transcription: {e}",
1003
+ "timestamp": time.time(),
1004
+ })
1005
+ return
1006
+
1007
+ mic = LiveMicCapture(sample_rate=16000)
1008
+ transcriber = Transcriber(model_name="tiny.en")
1009
+
1010
+ if not transcriber.is_loaded:
1011
+ await websocket.send_json({
1012
+ "type": "status",
1013
+ "message": "Loading Whisper model (may download on first use)...",
1014
+ "timestamp": time.time(),
1015
+ })
1016
+
1017
+ try:
1018
+ transcriber.load()
1019
+ except Exception as e:
1020
+ await websocket.send_json({
1021
+ "type": "error",
1022
+ "message": (
1023
+ f"Failed to load Whisper model: {e}. "
1024
+ "Install with: pip install openai-whisper"
1025
+ ),
1026
+ "timestamp": time.time(),
1027
+ })
1028
+ return
1029
+
1030
+ from sentinel_edge.features.handcrafted import extract_handcrafted_features
1031
+ from sentinel_edge.features.feature_pipeline import FeaturePipeline
1032
+ from sentinel_edge.classifier.xgb_classifier import FraudClassifier as XGBFraudClassifier
1033
+ from sentinel_edge.classifier.score_accumulator import ScoreAccumulator
1034
+ from sentinel_edge.classifier.alert_engine import AlertEngine
1035
+
1036
+ accumulator = ScoreAccumulator(alpha=0.3)
1037
+ alert_engine = AlertEngine()
1038
+
1039
+ _model_path = os.path.join(_PROJECT_ROOT, "models", "call_fraud_xgb.json")
1040
+ _tfidf_path = os.path.join(_PROJECT_ROOT, "models", "tfidf_call_vectorizer.pkl")
1041
+ _use_real_model = os.path.exists(_model_path) and os.path.exists(_tfidf_path)
1042
+ if _use_real_model:
1043
+ _classifier = XGBFraudClassifier(_model_path)
1044
+ _pipeline = FeaturePipeline(_tfidf_path)
1045
+ else:
1046
+ _classifier = None
1047
+ _pipeline = None
1048
+
1049
+ try:
1050
+ mic.start()
1051
+ except ImportError as e:
1052
+ await websocket.send_json({
1053
+ "type": "error",
1054
+ "message": str(e),
1055
+ "timestamp": time.time(),
1056
+ })
1057
+ return
1058
+ except Exception as e:
1059
+ await websocket.send_json({
1060
+ "type": "error",
1061
+ "message": f"Failed to start microphone: {e}",
1062
+ "timestamp": time.time(),
1063
+ })
1064
+ return
1065
+
1066
+ await websocket.send_json({
1067
+ "type": "call_start",
1068
+ "caller": "Live Microphone",
1069
+ "caller_name": "Live Input",
1070
+ "description": "Live Microphone Analysis",
1071
+ "timestamp": time.time(),
1072
+ })
1073
+
1074
+ call_start_time = time.time()
1075
+ sentence_index = 0
1076
+ audio_buffer: list[np.ndarray] = []
1077
+ buffered_seconds = 0.0
1078
+
1079
+ while True:
1080
+ try:
1081
+ msg = await asyncio.wait_for(
1082
+ websocket.receive_text(), timeout=0.05
1083
+ )
1084
+ data = json.loads(msg)
1085
+ if data.get("action") in ("stop", "block"):
1086
+ if data.get("action") == "block":
1087
+ await websocket.send_json({
1088
+ "type": "call_blocked",
1089
+ "timestamp": time.time(),
1090
+ })
1091
+ break
1092
+ except asyncio.TimeoutError:
1093
+ pass
1094
+ except json.JSONDecodeError:
1095
+ pass
1096
+
1097
+ chunk = await asyncio.get_event_loop().run_in_executor(
1098
+ None, mic.get_chunk, 0.1
1099
+ )
1100
+ if chunk is not None:
1101
+ audio_buffer.append(chunk)
1102
+ buffered_seconds += len(chunk) / mic.sample_rate
1103
+
1104
+ if buffered_seconds >= _LIVE_MIC_BUFFER_DURATION and audio_buffer:
1105
+ audio_segment = np.concatenate(audio_buffer)
1106
+ audio_buffer.clear()
1107
+ buffered_seconds = 0.0
1108
+
1109
+ transcript = await asyncio.get_event_loop().run_in_executor(
1110
+ None, transcriber.transcribe, audio_segment, 16000
1111
+ )
1112
+
1113
+ if not transcript.strip():
1114
+ continue
1115
+
1116
+ sentences = _split_sentences(transcript)
1117
+
1118
+ for sentence in sentences:
1119
+ sentence = sentence.strip()
1120
+ if not sentence:
1121
+ continue
1122
+
1123
+ features = extract_handcrafted_features(sentence)
1124
+
1125
+ if _use_real_model:
1126
+ t0 = time.perf_counter()
1127
+ feature_vec = _pipeline.extract(sentence)
1128
+ fraud_score = _classifier.predict_proba(feature_vec)
1129
+ _inference_ms = (time.perf_counter() - t0) * 1000
1130
+ else:
1131
+ fraud_score = compute_heuristic_score(features)
1132
+ _inference_ms = np.random.uniform(5, 15)
1133
+
1134
+ ema_score = accumulator.update(fraud_score)
1135
+ alert = alert_engine.evaluate(ema_score, features)
1136
+ elapsed = time.time() - call_start_time
1137
+
1138
+ await websocket.send_json({
1139
+ "type": "sentence",
1140
+ "index": sentence_index,
1141
+ "text": sentence,
1142
+ "raw_score": round(fraud_score, 4),
1143
+ "ema_score": round(ema_score, 4),
1144
+ "features": {
1145
+ k: round(v, 4) if isinstance(v, float) else int(v)
1146
+ for k, v in features.items()
1147
+ },
1148
+ "alert": {
1149
+ "should_alert": alert.should_alert,
1150
+ "risk_level": alert.risk_level.value,
1151
+ "reasons": alert.reasons,
1152
+ },
1153
+ "elapsed_seconds": round(elapsed, 1),
1154
+ "inference_ms": round(_inference_ms, 1),
1155
+ "timestamp": time.time(),
1156
+ })
1157
+ sentence_index += 1
1158
+
1159
+ await websocket.send_json({
1160
+ "type": "call_end",
1161
+ "final_score": round(accumulator.current_score, 4),
1162
+ "peak_score": round(accumulator.peak_score, 4),
1163
+ "mean_score": round(accumulator.mean_score, 4),
1164
+ "total_sentences": sentence_index,
1165
+ "duration_seconds": round(time.time() - call_start_time, 1),
1166
+ "timestamp": time.time(),
1167
+ })
1168
+
1169
+ except WebSocketDisconnect:
1170
+ pass
1171
+ except Exception as exc:
1172
+ try:
1173
+ await websocket.send_json({
1174
+ "type": "error",
1175
+ "message": str(exc),
1176
+ "timestamp": time.time(),
1177
+ })
1178
+ except Exception:
1179
+ pass
1180
+ finally:
1181
+ if mic is not None and mic.is_running:
1182
+ mic.stop()
1183
+ if websocket in active_connections:
1184
+ active_connections.remove(websocket)
1185
+
1186
+
1187
+ def _split_sentences(text: str) -> list[str]:
1188
+ """Split transcribed text into individual sentences."""
1189
+ parts = re.split(r'(?<=[.!?])\s+', text.strip())
1190
+ return [p for p in parts if p.strip()]
1191
+
1192
+
1193
+ # ---------------------------------------------------------------------------
1194
+ # WebSocket: privacy / federated learning demo
1195
+ # ---------------------------------------------------------------------------
1196
+
1197
+
1198
+ @app.websocket("/ws/privacy-demo")
1199
+ async def privacy_demo(websocket: WebSocket):
1200
+ """Show what the hub sees vs what stays on device."""
1201
+ await websocket.accept()
1202
+
1203
+ try:
1204
+ examples = [
1205
+ "This is the IRS calling about your unpaid tax debt of five thousand dollars.",
1206
+ "Your social security number has been suspended due to suspicious activity.",
1207
+ "Press 1 now to speak with an agent or a warrant will be issued for your arrest.",
1208
+ "You need to purchase gift cards and read us the numbers to resolve this matter.",
1209
+ ]
1210
+
1211
+ from sentinel_edge.features.handcrafted import extract_handcrafted_features
1212
+ from sentinel_edge.privacy.dp_noise import DPNoiseInjector
1213
+
1214
+ n_local_samples = 50
1215
+ dp = DPNoiseInjector(epsilon=0.3)
1216
+
1217
+ for idx, sentence in enumerate(examples):
1218
+ await asyncio.sleep(2.0)
1219
+
1220
+ features = extract_handcrafted_features(sentence)
1221
+ feature_vector = np.array(list(features.values()), dtype=np.float64)
1222
+
1223
+ gradient = np.random.randn(len(feature_vector)) * 0.01
1224
+ clipped = dp.clip_gradient(gradient)
1225
+ noised_gradient = dp.add_noise(clipped, n_local_samples=n_local_samples)
1226
+ sensitivity = 1.0 / n_local_samples
1227
+ sigma = dp.compute_sigma(sensitivity)
1228
+
1229
+ await websocket.send_json(
1230
+ {
1231
+ "type": "privacy_comparison",
1232
+ "index": idx,
1233
+ "on_device": {
1234
+ "transcript": sentence,
1235
+ "features": {
1236
+ k: round(v, 4) if isinstance(v, float) else int(v)
1237
+ for k, v in features.items()
1238
+ },
1239
+ "fraud_score": round(
1240
+ float(np.random.uniform(0.6, 0.95)), 4
1241
+ ),
1242
+ },
1243
+ "hub_sees": {
1244
+ "gradient_vector": [
1245
+ round(float(x), 6) for x in noised_gradient[:20]
1246
+ ],
1247
+ "gradient_size": len(noised_gradient),
1248
+ "dp_sigma": round(float(sigma), 6),
1249
+ "dp_epsilon": 0.3,
1250
+ "n_samples": n_local_samples,
1251
+ "model_version": 3,
1252
+ },
1253
+ "timestamp": time.time(),
1254
+ }
1255
+ )
1256
+
1257
+ await websocket.send_json({"type": "demo_complete"})
1258
+
1259
+ except WebSocketDisconnect:
1260
+ pass
1261
+ except Exception as exc:
1262
+ try:
1263
+ await websocket.send_json(
1264
+ {"type": "error", "message": str(exc), "timestamp": time.time()}
1265
+ )
1266
+ except Exception:
1267
+ pass
1268
+
1269
+
1270
+ # ---------------------------------------------------------------------------
1271
+ # Transcript loading helpers
1272
+ # ---------------------------------------------------------------------------
1273
+
1274
+
1275
+ def load_transcript(file_path: str) -> list[str]:
1276
+ """Load transcript sentences from a text file (one sentence per line)."""
1277
+ if not os.path.exists(file_path):
1278
+ return get_fallback_transcript(file_path)
1279
+
1280
+ with open(file_path) as f:
1281
+ lines = [line.strip() for line in f if line.strip()]
1282
+ return lines
1283
+
1284
+
1285
+ def get_fallback_transcript(file_path: str) -> list[str]:
1286
+ """Return a built-in transcript when the text file hasn't been created yet."""
1287
+ name = os.path.basename(file_path)
1288
+
1289
+ if "irs" in name:
1290
+ return [
1291
+ "Hello, this is Agent Williams from the Internal Revenue Service.",
1292
+ "We are calling because there is a problem with your tax return.",
1293
+ "Our records indicate that you owe the IRS three thousand five hundred dollars in back taxes.",
1294
+ "If this amount is not paid immediately, a warrant will be issued for your arrest.",
1295
+ "This is your final notice before legal action is taken against you.",
1296
+ "You can resolve this matter today by making a payment over the phone.",
1297
+ "We accept payment through gift cards, wire transfer, or cryptocurrency.",
1298
+ "Please provide your social security number so we can verify your identity.",
1299
+ "Time is running out. You must act now to avoid criminal prosecution.",
1300
+ "Press 1 to be connected to a payment specialist immediately.",
1301
+ ]
1302
+ elif "tech" in name:
1303
+ return [
1304
+ "Hello, this is the Microsoft Windows technical support department.",
1305
+ "We have detected that your computer has been compromised by malicious software.",
1306
+ "Hackers are currently accessing your personal files and banking information.",
1307
+ "You need to act immediately to prevent further damage to your system.",
1308
+ "I will need you to download our remote access tool so we can fix this problem.",
1309
+ "Please go to this website and enter the access code I give you.",
1310
+ "We need to verify your identity. Can you provide your email password?",
1311
+ "There is a one-time security fee of two hundred and ninety nine dollars.",
1312
+ "You can pay with a credit card or by purchasing Google Play gift cards.",
1313
+ "If you do not resolve this now, your computer will be permanently locked.",
1314
+ ]
1315
+ elif "bank" in name:
1316
+ return [
1317
+ "Good afternoon, this is the fraud prevention department at your bank.",
1318
+ "We have detected suspicious activity on your account ending in four seven eight two.",
1319
+ "Someone attempted to make a large purchase of fifteen hundred dollars.",
1320
+ "For your security, we need to verify your identity immediately.",
1321
+ "Can you please confirm your full account number and routing number?",
1322
+ "We also need the three-digit security code on the back of your card.",
1323
+ "I'm going to send you a verification code. Please read it back to me.",
1324
+ "We need to transfer your funds to a secure account to protect them.",
1325
+ "Please do not hang up or contact your branch directly as this is time-sensitive.",
1326
+ "Your account will be frozen if we cannot complete this verification process.",
1327
+ ]
1328
+ else:
1329
+ return [
1330
+ "Hi, this is Sarah from Doctor Smith's office calling.",
1331
+ "I'm calling to confirm your appointment scheduled for next Tuesday at two thirty.",
1332
+ "Doctor Smith will be performing your annual checkup.",
1333
+ "Please remember to bring your insurance card and photo ID.",
1334
+ "Also, please arrive fifteen minutes early to fill out any updated paperwork.",
1335
+ "If you need to reschedule, please call us back at your convenience.",
1336
+ "We look forward to seeing you. Have a great day!",
1337
+ ]
1338
+
1339
+
1340
+ # ---------------------------------------------------------------------------
1341
+ # Heuristic fraud scorer (substitutes for XGBoost in demo mode)
1342
+ # ---------------------------------------------------------------------------
1343
+
1344
+
1345
+ def compute_heuristic_score(features: dict[str, float]) -> float:
1346
+ """Compute a heuristic fraud score from handcrafted features."""
1347
+ score = 0.0
1348
+ weights = {
1349
+ "urgency_count": 0.08,
1350
+ "action_count": 0.06,
1351
+ "financial_count": 0.07,
1352
+ "impersonation_count": 0.10,
1353
+ "has_url": 0.04,
1354
+ "has_shortened_url": 0.06,
1355
+ "has_verify_pattern": 0.04,
1356
+ "has_threat": 0.10,
1357
+ "has_prize": 0.06,
1358
+ "has_account_ref": 0.07,
1359
+ "dollar_sign": 0.03,
1360
+ "has_phone_number": 0.02,
1361
+ "exclamation_count": 0.02,
1362
+ "caps_ratio": 0.03,
1363
+ }
1364
+
1365
+ for feature, weight in weights.items():
1366
+ value = features.get(feature, 0)
1367
+ if isinstance(value, bool):
1368
+ value = 1.0 if value else 0.0
1369
+ score += value * weight
1370
+
1371
+ score = max(0.0, min(1.0, score))
1372
+ score += np.random.normal(0, 0.02)
1373
+ score = max(0.0, min(1.0, score))
1374
+
1375
+ return score
1376
+
1377
+
1378
+ # ---------------------------------------------------------------------------
1379
+ # Entry point
1380
+ # ---------------------------------------------------------------------------
1381
+
1382
+ if __name__ == "__main__":
1383
+ uvicorn.run(
1384
+ "main:app",
1385
+ host="0.0.0.0",
1386
+ port=8000,
1387
+ reload=True,
1388
+ reload_dirs=[os.path.dirname(__file__)],
1389
+ )
demo/backend/sample_calls/amazon_refund_scam.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Hello, this is the Amazon customer service refund department calling.
2
+ We are calling to inform you that a charge of four hundred and ninety nine dollars was made on your Amazon account today.
3
+ This appears to be an unauthorized purchase of an Apple MacBook Pro.
4
+ If you did not make this purchase, we need to process a refund immediately.
5
+ To verify your identity and process the refund, I will need to connect to your computer remotely.
6
+ Please go to the website I'm about to give you and download our secure support tool.
7
+ Once connected, we can access your Amazon account and reverse the charge right away.
8
+ I will also need you to log into your online banking so we can verify the refund was processed correctly.
9
+ It looks like the refund went through but we accidentally sent you forty nine hundred dollars instead of four ninety nine.
10
+ You will need to send back the difference using a wire transfer to our corporate account.
11
+ This needs to be resolved today or your Amazon account will be permanently suspended.
12
+ Can you please stay on the line while we complete this process?
demo/backend/sample_calls/bank_fraud_scam.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Good afternoon, this is the fraud prevention department at your bank.
2
+ We have detected suspicious activity on your account ending in four seven eight two.
3
+ Someone attempted to make an unauthorized purchase of fifteen hundred dollars.
4
+ For your security, we need to verify your identity immediately.
5
+ Can you please confirm your full account number and routing number.
6
+ We also need the three digit security code on the back of your debit card.
7
+ I am going to send you a verification code and please read it back to me.
8
+ We need to transfer your funds to a secure temporary account to protect them from the fraudster.
9
+ Please do not hang up or contact your branch directly as this is time sensitive.
10
+ Your account will be permanently frozen if we cannot complete this verification process now.
11
+ Remember, do not share this call information with anyone for security purposes.
demo/backend/sample_calls/crypto_investment_scam.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Hello, my name is David Chen and I'm a senior investment advisor at Digital Asset Partners.
2
+ I'm reaching out to a select group of individuals about an exclusive cryptocurrency opportunity.
3
+ Our proprietary AI trading algorithm has been generating returns of over five hundred percent in just three months.
4
+ We have limited spots available and this opportunity closes at midnight tonight.
5
+ The minimum investment is just two thousand dollars to get started with our managed portfolio.
6
+ Your funds will be placed in a secure digital wallet that our team manages on your behalf.
7
+ I just need your bank account details to initiate the transfer and secure your spot.
8
+ We also accept payment through Bitcoin, wire transfer, or Zelle for faster processing.
9
+ Several of our clients have already turned five thousand dollars into over fifty thousand.
10
+ I can send you a link right now to complete your registration and fund your account.
11
+ Don't miss out on this opportunity, the market conditions are perfect right now.
12
+ Can you confirm your full name and date of birth so I can set up your investor profile?
demo/backend/sample_calls/grandparent_scam.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Grandma, it's me, please don't hang up.
2
+ I'm in really big trouble and I didn't know who else to call.
3
+ I was in a car accident and the police arrested me because they said it was my fault.
4
+ Please don't tell mom and dad, they would be so upset with me.
5
+ My lawyer says I need to post bail of three thousand dollars to get out of jail today.
6
+ I'm so scared, grandma, I've never been in trouble like this before.
7
+ The lawyer said you can send the money through a wire transfer or gift cards.
8
+ His name is attorney Robert Miller and he's going to call you in a few minutes with the details.
9
+ Please don't tell anyone about this, I'm so embarrassed.
10
+ I need the money as soon as possible or they're going to keep me here overnight.
11
+ Can you go to the store and get Google Play gift cards and read me the numbers on the back?
12
+ I promise I'll pay you back as soon as I get out, I love you grandma.
demo/backend/sample_calls/irs_scam.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Hello, this is Agent Williams from the Internal Revenue Service.
2
+ We are calling because there is a serious problem with your federal tax return.
3
+ Our records indicate that you owe the IRS three thousand five hundred dollars in unpaid back taxes.
4
+ If this amount is not paid immediately, a warrant will be issued for your arrest today.
5
+ This is your final notice before legal action is taken against you.
6
+ You can resolve this matter today by making a payment over the phone.
7
+ We accept payment through gift cards, wire transfer, or cryptocurrency.
8
+ Please provide your social security number so we can verify your identity.
9
+ Time is running out and you must act now to avoid criminal prosecution.
10
+ Do not hang up this call or your local police department will be notified.
11
+ Press one to be connected to a payment specialist immediately.
demo/backend/sample_calls/legitimate_call.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ Hi, this is Sarah from Doctor Smith's office calling.
2
+ I am calling to confirm your appointment scheduled for next Tuesday at two thirty in the afternoon.
3
+ Doctor Smith will be performing your annual checkup.
4
+ Please remember to bring your insurance card and a photo ID.
5
+ Also, please arrive about fifteen minutes early to fill out any updated paperwork.
6
+ If you have any questions or need to reschedule, please call us back at your earliest convenience.
7
+ We look forward to seeing you next week. Have a great day!
demo/backend/sample_calls/prize_notification_scam.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Congratulations, you have been selected as our grand prize winner in the National Consumer Sweepstakes.
2
+ You have won a cash prize of two hundred and fifty thousand dollars and a brand new Mercedes Benz.
3
+ This is not a sales call, you have legitimately won and we just need to process your claim.
4
+ Your entry was automatically submitted when you made a purchase at a participating retailer last month.
5
+ To claim your prize, there is a small processing fee of just nine hundred and ninety five dollars.
6
+ This covers the taxes, insurance, and shipping costs for your new vehicle.
7
+ Once we receive the processing fee, your cash prize will be deposited directly into your bank account within 48 hours.
8
+ I will need your full name, date of birth, and bank account information to set up the deposit.
9
+ We also need your social security number for tax reporting purposes as required by federal law.
10
+ This offer expires today, so we need to complete the claim process on this call.
11
+ If you'd like, I can transfer you to our prize verification department to confirm your winnings.
12
+ Shall I go ahead and start the paperwork for your two hundred and fifty thousand dollar prize?
demo/backend/sample_calls/tech_support_scam.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Hello, this is the Microsoft Windows technical support department.
2
+ We have detected that your computer has been compromised by malicious software.
3
+ Hackers are currently accessing your personal files and banking information.
4
+ You need to act immediately to prevent further damage to your system.
5
+ I will need you to download our remote access tool so we can fix this problem.
6
+ Please go to this website and enter the access code I give you.
7
+ We need to verify your identity so can you please provide your email password.
8
+ There is a one-time security fee of two hundred and ninety nine dollars to clean the infection.
9
+ You can pay with a credit card or by purchasing Google Play gift cards at your nearest store.
10
+ If you do not resolve this right now your computer will be permanently locked by the hackers.
11
+ I urge you to stay on the line because this is extremely urgent.
demo/backend/sample_calls/utility_shutoff_scam.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ This is an urgent notice from your electric utility company's billing department.
2
+ Our records show that your account is severely past due with a balance of eight hundred and forty two dollars.
3
+ If payment is not received within the next two hours, your electricity will be disconnected.
4
+ This is your final warning before we dispatch a technician to your address to shut off service.
5
+ Once disconnected, there will be an additional reconnection fee of three hundred and fifty dollars.
6
+ We understand this may be unexpected, but our system shows multiple missed payments on your account.
7
+ To prevent disconnection, you can make an immediate payment over the phone right now.
8
+ We accept payment through prepaid debit cards, money orders, or cryptocurrency for immediate processing.
9
+ I will need your account number and the last four digits of your social security number to pull up your file.
10
+ If you make the payment now, I can personally ensure your service remains active.
11
+ Please note that our regular customer service line is closed, this is the only way to resolve this today.
12
+ Do you have a pen ready so I can give you the payment instructions?
demo/frontend/index.html ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en" class="dark">
3
+ <head>
4
+ <meta charset="UTF-8" />
5
+ <link rel="icon" type="image/svg+xml" href="data:image/svg+xml,<svg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 32 32'><text y='28' font-size='28'>🛡️</text></svg>" />
6
+ <meta name="viewport" content="width=device-width, initial-scale=1.0" />
7
+ <title>SentinelEdge - Federated Edge AI Fraud Detection</title>
8
+ <link rel="preconnect" href="https://fonts.googleapis.com" />
9
+ <link rel="preconnect" href="https://fonts.gstatic.com" crossorigin />
10
+ <link href="https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700;800&family=JetBrains+Mono:wght@400;500;600&display=swap" rel="stylesheet" />
11
+ </head>
12
+ <body class="bg-dark-bg text-white antialiased">
13
+ <div id="root"></div>
14
+ <script type="module" src="/src/main.tsx"></script>
15
+ </body>
16
+ </html>
demo/frontend/package-lock.json ADDED
The diff for this file is too large to render. See raw diff
 
demo/frontend/package.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "sentineledge-demo",
3
+ "private": true,
4
+ "version": "0.1.0",
5
+ "type": "module",
6
+ "scripts": {
7
+ "dev": "vite",
8
+ "build": "tsc && vite build",
9
+ "preview": "vite preview"
10
+ },
11
+ "dependencies": {
12
+ "react": "^18.3.1",
13
+ "react-dom": "^18.3.1",
14
+ "recharts": "^2.12.0",
15
+ "lucide-react": "^0.344.0"
16
+ },
17
+ "devDependencies": {
18
+ "@types/react": "^18.3.1",
19
+ "@types/react-dom": "^18.3.0",
20
+ "@vitejs/plugin-react": "^4.3.0",
21
+ "autoprefixer": "^10.4.19",
22
+ "postcss": "^8.4.38",
23
+ "tailwindcss": "^3.4.3",
24
+ "typescript": "^5.4.5",
25
+ "vite": "^5.2.11"
26
+ }
27
+ }
demo/frontend/postcss.config.js ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ export default {
2
+ plugins: {
3
+ tailwindcss: {},
4
+ autoprefixer: {},
5
+ },
6
+ }
demo/frontend/public/favicon-16x16.svg ADDED
demo/frontend/public/favicon-32x32.svg ADDED
demo/frontend/public/sentineledge-logo-dark.svg ADDED
demo/frontend/src/App.tsx ADDED
@@ -0,0 +1,619 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { useState, useCallback, useRef, useEffect } from 'react'
2
+ import { Shield, Radio, Lock, GitBranch, Activity } from 'lucide-react'
3
+ import PhoneSimulator from './components/PhoneSimulator'
4
+ import CallScreen from './components/CallScreen'
5
+ import TranscriptPanel from './components/TranscriptPanel'
6
+ import ScoreGauge from './components/ScoreGauge'
7
+ import FeatureBreakdown from './components/FeatureBreakdown'
8
+ import DemoControls from './components/DemoControls'
9
+ import PrivacyDemo from './components/PrivacyDemo'
10
+ import FederatedDashboard from './components/FederatedDashboard'
11
+ import CallHistory from './components/CallHistory'
12
+ import type { CallHistoryEntry } from './components/CallHistory'
13
+ import { useWebSocket } from './hooks/useWebSocket'
14
+
15
+ // -------- Sample call scripts --------
16
+ interface ScriptLine {
17
+ text: string
18
+ score: number
19
+ features: Record<string, number>
20
+ }
21
+
22
+ const SAMPLE_CALLS: Record<string, { description: string; lines: ScriptLine[] }> = {
23
+ irs_scam: {
24
+ description: 'IRS Scam Call',
25
+ lines: [
26
+ { text: "Hello, this is Officer James Wilson from the Internal Revenue Service.", score: 0.15, features: { urgency: 0.1, financial_terms: 0.2, authority_claim: 0.3 } },
27
+ { text: "We have detected a serious issue with your tax filing for the previous year.", score: 0.25, features: { urgency: 0.2, financial_terms: 0.3, authority_claim: 0.3, threat_language: 0.1 } },
28
+ { text: "There is an outstanding balance of $4,789 that must be resolved immediately.", score: 0.42, features: { urgency: 0.5, financial_terms: 0.7, specific_amount: 0.6, time_pressure: 0.4 } },
29
+ { text: "If this is not paid today, a warrant will be issued for your arrest.", score: 0.68, features: { urgency: 0.9, threat_language: 0.8, time_pressure: 0.9, authority_claim: 0.5 } },
30
+ { text: "You need to purchase Google Play gift cards worth $4,789 and read me the codes.", score: 0.85, features: { urgency: 0.8, financial_terms: 0.9, gift_card_mention: 1.0, unusual_payment: 1.0 } },
31
+ { text: "This is the only way to avoid criminal prosecution. Do it now.", score: 0.92, features: { urgency: 1.0, threat_language: 0.95, time_pressure: 1.0, coercion: 0.9, unusual_payment: 0.8 } },
32
+ { text: "Are you at the store yet? You need to hurry, the deadline is in one hour.", score: 0.95, features: { urgency: 1.0, time_pressure: 1.0, coercion: 0.85, unusual_payment: 0.7 } },
33
+ ],
34
+ },
35
+ tech_support: {
36
+ description: 'Tech Support Scam',
37
+ lines: [
38
+ { text: "Hi, this is Mike from Microsoft Technical Support department.", score: 0.12, features: { authority_claim: 0.3, tech_jargon: 0.1 } },
39
+ { text: "We have detected that your computer has been compromised by a virus.", score: 0.28, features: { urgency: 0.3, tech_jargon: 0.4, fear_induction: 0.3 } },
40
+ { text: "Hackers are currently stealing your personal information as we speak.", score: 0.45, features: { urgency: 0.6, fear_induction: 0.7, time_pressure: 0.5 } },
41
+ { text: "I need you to download a remote access tool so I can fix this for you.", score: 0.62, features: { urgency: 0.5, remote_access: 0.9, tech_jargon: 0.5, trust_request: 0.6 } },
42
+ { text: "Please go to this website and enter your computer password when prompted.", score: 0.78, features: { credential_request: 0.95, remote_access: 0.8, trust_request: 0.7 } },
43
+ { text: "Now I also need your bank details to process the security deposit of $299.", score: 0.91, features: { financial_terms: 0.9, credential_request: 0.8, specific_amount: 0.7, trust_request: 0.6 } },
44
+ ],
45
+ },
46
+ bank_fraud: {
47
+ description: 'Bank Fraud Call',
48
+ lines: [
49
+ { text: "Good afternoon, this is the fraud prevention department at First National Bank.", score: 0.10, features: { authority_claim: 0.3, financial_terms: 0.2 } },
50
+ { text: "We noticed some suspicious activity on your account ending in 4821.", score: 0.22, features: { authority_claim: 0.3, financial_terms: 0.4, specific_detail: 0.3 } },
51
+ { text: "Someone attempted to make a purchase of $2,340 at an electronics store.", score: 0.30, features: { financial_terms: 0.5, urgency: 0.3, specific_amount: 0.4 } },
52
+ { text: "To verify your identity, I need you to confirm your full Social Security number.", score: 0.65, features: { credential_request: 0.95, authority_claim: 0.4, pii_request: 0.9 } },
53
+ { text: "I also need your online banking password and the PIN for your debit card.", score: 0.88, features: { credential_request: 1.0, pii_request: 1.0, financial_terms: 0.7 } },
54
+ { text: "Please also share the verification code that was just sent to your phone.", score: 0.93, features: { credential_request: 0.9, mfa_bypass: 1.0, urgency: 0.6 } },
55
+ ],
56
+ },
57
+ legitimate: {
58
+ description: 'Legitimate Call',
59
+ lines: [
60
+ { text: "Hi, this is Sarah from Dr. Thompson's office calling about your appointment.", score: 0.03, features: { authority_claim: 0.05 } },
61
+ { text: "We have you scheduled for next Tuesday at 2:30 PM for your annual checkup.", score: 0.02, features: {} },
62
+ { text: "I wanted to confirm that the time still works for you.", score: 0.01, features: {} },
63
+ { text: "Also, please remember to bring your insurance card and a photo ID.", score: 0.04, features: { pii_request: 0.05 } },
64
+ { text: "If you need to reschedule, you can call us back at the number on our website.", score: 0.02, features: {} },
65
+ { text: "Thank you, and we look forward to seeing you next week. Have a great day!", score: 0.01, features: {} },
66
+ ],
67
+ },
68
+ }
69
+
70
+ const AVAILABLE_CALLS = Object.entries(SAMPLE_CALLS).map(([id, call]) => ({
71
+ id,
72
+ description: call.description,
73
+ }))
74
+
75
+ type AlertLevel = 'safe' | 'low' | 'medium' | 'high' | 'critical'
76
+
77
+ function getAlertLevel(score: number): AlertLevel {
78
+ if (score < 0.15) return 'safe'
79
+ if (score < 0.3) return 'low'
80
+ if (score < 0.5) return 'medium'
81
+ if (score < 0.75) return 'high'
82
+ return 'critical'
83
+ }
84
+
85
+ type Tab = 'detection' | 'privacy' | 'federated'
86
+
87
+ interface AudioDevice {
88
+ index: number
89
+ name: string
90
+ max_input_channels: number
91
+ default_samplerate: number
92
+ }
93
+
94
+ export default function App() {
95
+ const [activeTab, setActiveTab] = useState<Tab>('detection')
96
+ const [isCallActive, setIsCallActive] = useState(false)
97
+ const [isMicActive, setIsMicActive] = useState(true)
98
+ const [callDuration, setCallDuration] = useState(0)
99
+ const [currentCallId, setCurrentCallId] = useState<string | null>(null)
100
+ const [sentences, setSentences] = useState<Array<{ text: string; score: number; index: number }>>([])
101
+ const [currentScore, setCurrentScore] = useState(0)
102
+ const [emaScore, setEmaScore] = useState(0)
103
+ const [features, setFeatures] = useState<Record<string, number>>({})
104
+ const [alertDismissed, setAlertDismissed] = useState(false)
105
+ const [fraudSignalReceived, setFraudSignalReceived] = useState(false)
106
+ const [privacySentences, setPrivacySentences] = useState<Array<{ text: string; score: number; features: Record<string, number> }>>([])
107
+ const [gradientVectors, setGradientVectors] = useState<number[][]>([])
108
+ const [isBackendDriven, setIsBackendDriven] = useState(false)
109
+ const [audioDevices, setAudioDevices] = useState<AudioDevice[]>([])
110
+ const [selectedInputDevice, setSelectedInputDevice] = useState<string>('')
111
+ const [callHistory, setCallHistory] = useState<CallHistoryEntry[]>([])
112
+
113
+ const durationRef = useRef<ReturnType<typeof setInterval> | null>(null)
114
+
115
+ const speakCallerLine = useCallback((text: string) => {
116
+ if (!('speechSynthesis' in window)) {
117
+ return
118
+ }
119
+
120
+ // Keep caller speech readable and avoid queued overlap.
121
+ window.speechSynthesis.cancel()
122
+ const utterance = new SpeechSynthesisUtterance(text)
123
+ utterance.rate = 0.95
124
+ utterance.pitch = 0.95
125
+ window.speechSynthesis.speak(utterance)
126
+ }, [])
127
+
128
+ // WebSocket hook for live backend connection
129
+ const wsUrl = currentCallId
130
+ ? (() => {
131
+ const params = new URLSearchParams()
132
+ const isScriptedCall = currentCallId !== 'live_mic'
133
+ if (isScriptedCall || isMicActive) {
134
+ params.set('interactive', '1')
135
+ }
136
+ if (selectedInputDevice) {
137
+ params.set('input_device', selectedInputDevice)
138
+ }
139
+ const qs = params.toString()
140
+ const wsBase = import.meta.env.DEV
141
+ ? 'ws://localhost:8000'
142
+ : `${window.location.protocol === 'https:' ? 'wss:' : 'ws:'}//${window.location.host}`
143
+ return `${wsBase}/ws/call/${currentCallId}${qs ? `?${qs}` : ''}`
144
+ })()
145
+ : ''
146
+
147
+ const { connect, disconnect, send, isConnected } = useWebSocket({
148
+ url: wsUrl,
149
+ onMessage: (data) => {
150
+ if (data.type === 'call_start') {
151
+ setIsBackendDriven(true)
152
+ return
153
+ }
154
+
155
+ if (data.type === 'sentence') {
156
+ const speaker = data.speaker === 'you' ? 'You' : 'Scammer'
157
+ const isUser = data.speaker === 'you'
158
+ const displayScore = isUser
159
+ ? (data.raw_score ?? 0)
160
+ : (data.ema_score ?? data.raw_score ?? 0)
161
+ const newSentence = {
162
+ text: `[${speaker}] ${data.text}`,
163
+ score: displayScore,
164
+ index: data.index ?? 0,
165
+ }
166
+ setSentences(prev => [...prev, newSentence])
167
+ if (!isUser) {
168
+ setCurrentScore(data.raw_score ?? 0)
169
+ setEmaScore(data.ema_score ?? data.raw_score ?? 0)
170
+ if (data.features) setFeatures(data.features)
171
+ }
172
+ setPrivacySentences(prev => [
173
+ ...prev,
174
+ {
175
+ text: data.text,
176
+ score: displayScore,
177
+ features: data.features ?? {},
178
+ },
179
+ ])
180
+
181
+ // In scripted call mode, play the caller sentence aloud.
182
+ if (data.speaker !== 'you' && currentCallId !== 'live_mic') {
183
+ speakCallerLine(data.text)
184
+ }
185
+ return
186
+ }
187
+
188
+ if (data.type === 'fraud_detected') {
189
+ setFraudSignalReceived(true)
190
+ setAlertDismissed(false)
191
+ if (typeof data.ema_score === 'number') {
192
+ setEmaScore(data.ema_score)
193
+ }
194
+ if (Array.isArray(data.reasons) && data.reasons.length > 0) {
195
+ const mapped: Record<string, number> = {}
196
+ data.reasons.forEach((reason: string, idx: number) => {
197
+ mapped[`reason_${idx + 1}_${reason.toLowerCase().replace(/[^a-z0-9]+/g, '_')}`] = 1
198
+ })
199
+ setFeatures(mapped)
200
+ }
201
+ return
202
+ }
203
+
204
+ if (data.type === 'waiting_for_reply') {
205
+ setSentences(prev => [
206
+ ...prev,
207
+ {
208
+ text: `[System] Your turn: speak now (window ${data.timeout_seconds ?? 15}s)`,
209
+ score: 0,
210
+ index: prev.length,
211
+ },
212
+ ])
213
+ return
214
+ }
215
+
216
+ if (data.type === 'user_timeout') {
217
+ setSentences(prev => [
218
+ ...prev,
219
+ {
220
+ text: '[System] No reply detected, continuing call flow.',
221
+ score: 0,
222
+ index: prev.length,
223
+ },
224
+ ])
225
+ return
226
+ }
227
+
228
+ if (data.type === 'user_echo_detected') {
229
+ setSentences(prev => [
230
+ ...prev,
231
+ {
232
+ text: `[System] ${data.message}`,
233
+ score: 0,
234
+ index: prev.length,
235
+ },
236
+ ])
237
+ return
238
+ }
239
+
240
+ if (data.type === 'call_end' || data.type === 'call_blocked') {
241
+ setIsCallActive(false)
242
+ setCurrentCallId(null)
243
+ setFraudSignalReceived(false)
244
+ if (durationRef.current) {
245
+ clearInterval(durationRef.current)
246
+ durationRef.current = null
247
+ }
248
+ if ('speechSynthesis' in window) {
249
+ window.speechSynthesis.cancel()
250
+ }
251
+ return
252
+ }
253
+
254
+ if (data.type === 'error') {
255
+ console.error('Backend error:', data.message)
256
+ setSentences(prev => [
257
+ ...prev,
258
+ {
259
+ text: `[System] ${data.message}`,
260
+ score: 0,
261
+ index: prev.length,
262
+ },
263
+ ])
264
+ }
265
+ },
266
+ onOpen: () => console.log('WebSocket connected'),
267
+ onClose: () => {
268
+ console.log('WebSocket disconnected')
269
+ setIsBackendDriven(false)
270
+ },
271
+ autoConnect: false,
272
+ })
273
+
274
+ const startCall = useCallback((callId: string) => {
275
+ if (!SAMPLE_CALLS[callId]) return
276
+
277
+ // Reset state
278
+ setSentences([])
279
+ setCurrentScore(0)
280
+ setEmaScore(0)
281
+ setFeatures({})
282
+ setAlertDismissed(false)
283
+ setFraudSignalReceived(false)
284
+ setCallDuration(0)
285
+ setCurrentCallId(callId)
286
+ setIsCallActive(true)
287
+ setPrivacySentences([])
288
+ setGradientVectors([])
289
+ setIsBackendDriven(false)
290
+
291
+ // Start duration timer
292
+ if (durationRef.current) {
293
+ clearInterval(durationRef.current)
294
+ }
295
+ durationRef.current = setInterval(() => {
296
+ setCallDuration(prev => prev + 1)
297
+ }, 1000)
298
+ }, [])
299
+
300
+ useEffect(() => {
301
+ const loadAudioDevices = async () => {
302
+ try {
303
+ const apiBase = import.meta.env.DEV ? 'http://localhost:8000' : ''
304
+ const resp = await fetch(`${apiBase}/api/audio-devices`)
305
+ const data = await resp.json()
306
+ const devices: AudioDevice[] = Array.isArray(data.devices) ? data.devices : []
307
+ setAudioDevices(devices)
308
+ if (devices.length > 0) {
309
+ setSelectedInputDevice(String(devices[0].index))
310
+ }
311
+ } catch {
312
+ setAudioDevices([])
313
+ }
314
+ }
315
+
316
+ void loadAudioDevices()
317
+ }, [])
318
+
319
+ const localPlaybackRef = useRef<ReturnType<typeof setTimeout>[]>([])
320
+
321
+ useEffect(() => {
322
+ if (!isCallActive || !currentCallId) return
323
+
324
+ // Try WebSocket first
325
+ connect()
326
+
327
+ // After a short delay, check if backend connected.
328
+ // If not, run local playback with built-in scripts.
329
+ const fallbackTimer = setTimeout(() => {
330
+ if (!isBackendDriven && currentCallId) {
331
+ const call = SAMPLE_CALLS[currentCallId]
332
+ if (!call) return
333
+
334
+ let ema = 0
335
+ const alpha = 0.3
336
+
337
+ call.lines.forEach((line, i) => {
338
+ const timer = setTimeout(() => {
339
+ ema = i === 0 ? line.score : alpha * line.score + (1 - alpha) * ema
340
+ setSentences(prev => [...prev, { text: line.text, score: line.score, index: i }])
341
+ setCurrentScore(line.score)
342
+ setEmaScore(ema)
343
+ setFeatures(line.features)
344
+ setPrivacySentences(prev => [...prev, { text: line.text, score: line.score, features: line.features }])
345
+ if (ema >= 0.5) setFraudSignalReceived(true)
346
+ }, (i + 1) * 3000)
347
+ localPlaybackRef.current.push(timer)
348
+ })
349
+ }
350
+ }, 2000)
351
+
352
+ return () => {
353
+ clearTimeout(fallbackTimer)
354
+ localPlaybackRef.current.forEach(t => clearTimeout(t))
355
+ localPlaybackRef.current = []
356
+ }
357
+ }, [isCallActive, currentCallId, connect, isBackendDriven])
358
+
359
+ // Record a completed call into history
360
+ const recordCallHistory = useCallback((outcome: 'Blocked' | 'Dismissed' | 'Completed') => {
361
+ if (!currentCallId) return
362
+ const call = SAMPLE_CALLS[currentCallId]
363
+ if (!call) return
364
+
365
+ // Gather top features from all sentences seen so far
366
+ const featureCounts: Record<string, number> = {}
367
+ privacySentences.forEach(s => {
368
+ Object.entries(s.features).forEach(([k, v]) => {
369
+ if (v > 0.3) {
370
+ featureCounts[k] = Math.max(featureCounts[k] ?? 0, v)
371
+ }
372
+ })
373
+ })
374
+ const topFeatures = Object.entries(featureCounts)
375
+ .sort(([, a], [, b]) => b - a)
376
+ .slice(0, 5)
377
+ .map(([k]) => k)
378
+
379
+ const peakScore = sentences.reduce((max, s) => Math.max(max, s.score), 0)
380
+
381
+ const entry: CallHistoryEntry = {
382
+ id: `${currentCallId}_${Date.now()}`,
383
+ callType: currentCallId,
384
+ callLabel: call.description,
385
+ duration: callDuration,
386
+ peakScore,
387
+ finalScore: emaScore,
388
+ outcome,
389
+ timestamp: Date.now(),
390
+ totalSentences: sentences.length,
391
+ topFeatures,
392
+ }
393
+ setCallHistory(prev => [entry, ...prev])
394
+ }, [currentCallId, callDuration, emaScore, sentences, privacySentences])
395
+
396
+ const endCall = useCallback(() => {
397
+ recordCallHistory('Completed')
398
+ setIsCallActive(false)
399
+ setCurrentCallId(null)
400
+ if (durationRef.current) {
401
+ clearInterval(durationRef.current)
402
+ durationRef.current = null
403
+ }
404
+ if ('speechSynthesis' in window) {
405
+ window.speechSynthesis.cancel()
406
+ }
407
+ disconnect()
408
+ }, [disconnect, recordCallHistory])
409
+
410
+ const blockCaller = useCallback(() => {
411
+ send({ action: 'block' })
412
+ endCall()
413
+ }, [endCall, send])
414
+
415
+ const dismissAlert = useCallback(() => {
416
+ send({ action: 'dismiss' })
417
+ setAlertDismissed(true)
418
+ }, [send])
419
+
420
+ // Cleanup on unmount
421
+ useEffect(() => {
422
+ return () => {
423
+ if (durationRef.current) clearInterval(durationRef.current)
424
+ if ('speechSynthesis' in window) {
425
+ window.speechSynthesis.cancel()
426
+ }
427
+ }
428
+ }, [])
429
+
430
+ const alertLevel = getAlertLevel(emaScore)
431
+ const effectiveAlertLevel = alertDismissed || !fraudSignalReceived ? 'safe' : alertLevel
432
+ const callerNames: Record<string, string> = {
433
+ live_mic: 'Live Microphone',
434
+ irs_scam: 'Officer James Wilson',
435
+ tech_support: 'Mike - Microsoft',
436
+ bank_fraud: 'First National Bank',
437
+ legitimate: "Dr. Thompson's Office",
438
+ }
439
+ const callerNumbers: Record<string, string> = {
440
+ live_mic: 'On-device audio stream',
441
+ irs_scam: '+1 (202) 555-0147',
442
+ tech_support: '+1 (800) 555-0199',
443
+ bank_fraud: '+1 (312) 555-0183',
444
+ legitimate: '+1 (415) 555-0126',
445
+ }
446
+
447
+ const tabs: { id: Tab; label: string; icon: React.ReactNode }[] = [
448
+ { id: 'detection', label: 'Live Detection', icon: <Radio className="w-4 h-4" /> },
449
+ { id: 'privacy', label: 'Privacy Demo', icon: <Lock className="w-4 h-4" /> },
450
+ { id: 'federated', label: 'Federated Learning', icon: <GitBranch className="w-4 h-4" /> },
451
+ ]
452
+ const isDetectionLoading = isCallActive && sentences.length === 0
453
+
454
+ return (
455
+ <div className="min-h-screen bg-dark-bg text-gray-100 font-sans">
456
+ {/* Header */}
457
+ <header className="border-b border-dark-border/50 bg-dark-card/50 backdrop-blur-xl sticky top-0 z-50">
458
+ <div className="mx-auto flex max-w-[1440px] flex-col gap-4 px-4 py-4 sm:px-6 lg:flex-row lg:items-center lg:justify-between">
459
+ <div className="flex items-center gap-3">
460
+ <div className="relative">
461
+ <Shield className="w-8 h-8 text-brand-teal" />
462
+ <div className="absolute -top-0.5 -right-0.5 w-2.5 h-2.5 bg-safe rounded-full animate-pulse" />
463
+ </div>
464
+ <div>
465
+ <h1 className="text-xl font-bold tracking-tight">
466
+ <span className="text-brand-teal">Sentinel</span>
467
+ <span className="text-white">Edge</span>
468
+ </h1>
469
+ <p className="text-[10px] text-gray-500 font-medium tracking-widest uppercase">
470
+ Federated Edge AI Fraud Detection
471
+ </p>
472
+ </div>
473
+ </div>
474
+
475
+ {/* Tabs */}
476
+ <nav className="flex flex-wrap items-center gap-2">
477
+ {tabs.map(tab => (
478
+ <button
479
+ key={tab.id}
480
+ onClick={() => setActiveTab(tab.id)}
481
+ className={`control-button px-3 py-2 text-sm font-medium sm:px-4 ${
482
+ activeTab === tab.id
483
+ ? 'border-brand-teal/30 bg-brand-teal/10 text-brand-teal'
484
+ : 'text-gray-400 hover:text-gray-200 hover:bg-white/5'
485
+ }`}
486
+ >
487
+ {tab.icon}
488
+ {tab.label}
489
+ </button>
490
+ ))}
491
+ </nav>
492
+
493
+ {/* Status indicator */}
494
+ <div className="flex items-center gap-2 text-xs lg:justify-end">
495
+ <Activity className={`w-3.5 h-3.5 ${isConnected ? 'text-safe' : 'text-gray-500'}`} />
496
+ <span className={isConnected ? 'text-safe' : 'text-gray-500'}>
497
+ {isConnected ? 'Backend Connected' : isBackendDriven ? 'Reconnecting...' : 'Backend Disconnected'}
498
+ </span>
499
+ </div>
500
+ </div>
501
+ </header>
502
+
503
+ {/* Main Content */}
504
+ <main className="mx-auto max-w-[1440px] px-4 py-5 sm:px-6 sm:py-6">
505
+ {activeTab === 'detection' && (
506
+ <div className="space-y-6">
507
+ {/* Demo Controls */}
508
+ <DemoControls
509
+ onSelectCall={startCall}
510
+ onToggleMic={() => setIsMicActive(!isMicActive)}
511
+ onSelectMicDevice={setSelectedInputDevice}
512
+ isCallActive={isCallActive}
513
+ isMicActive={isMicActive}
514
+ selectedMicDevice={selectedInputDevice}
515
+ micDevices={audioDevices.map(d => ({ value: String(d.index), label: `${d.index}: ${d.name}` }))}
516
+ availableCalls={AVAILABLE_CALLS}
517
+ />
518
+
519
+ {/* Main detection layout */}
520
+ <div className="grid grid-cols-1 gap-6 lg:grid-cols-12">
521
+ {/* Phone Simulator */}
522
+ <div className="flex justify-center lg:col-span-4 lg:justify-start">
523
+ <PhoneSimulator>
524
+ <CallScreen
525
+ callerName={currentCallId ? callerNames[currentCallId] ?? 'Unknown' : 'No Active Call'}
526
+ callerNumber={currentCallId ? callerNumbers[currentCallId] ?? '' : ''}
527
+ duration={callDuration}
528
+ isActive={isCallActive}
529
+ onEndCall={endCall}
530
+ onBlock={blockCaller}
531
+ onDismissAlert={dismissAlert}
532
+ alertLevel={effectiveAlertLevel}
533
+ alertReasons={
534
+ Object.entries(features)
535
+ .filter(([, v]) => v > 0.5)
536
+ .sort(([, a], [, b]) => b - a)
537
+ .map(([k]) => k.replace(/_/g, ' '))
538
+ }
539
+ fraudScore={emaScore}
540
+ />
541
+ </PhoneSimulator>
542
+ </div>
543
+
544
+ {/* Right side panels */}
545
+ <div className="space-y-6 lg:col-span-8">
546
+ {/* Score + Features row */}
547
+ <div className="grid grid-cols-1 gap-6 lg:grid-cols-2">
548
+ <div className="glass-card p-6 glow-teal">
549
+ {isDetectionLoading ? (
550
+ <div className="panel-skeleton">
551
+ <div className="mx-auto h-48 w-48 rounded-full skeleton-block" />
552
+ <div className="mx-auto h-4 w-32 skeleton-block" />
553
+ <div className="mx-auto h-3 w-40 skeleton-block" />
554
+ </div>
555
+ ) : (
556
+ <ScoreGauge score={emaScore} label="Fraud Score (EMA)" />
557
+ )}
558
+ </div>
559
+ <div className="glass-card p-6">
560
+ {isDetectionLoading ? (
561
+ <div className="panel-skeleton">
562
+ {[0, 1, 2, 3].map(item => (
563
+ <div key={item} className="panel-skeleton-card space-y-2">
564
+ <div className="flex items-center justify-between gap-3">
565
+ <div className="h-3 w-24 skeleton-block" />
566
+ <div className="h-3 w-10 skeleton-block" />
567
+ </div>
568
+ <div className="h-2 w-full rounded-full skeleton-block" />
569
+ </div>
570
+ ))}
571
+ </div>
572
+ ) : (
573
+ <FeatureBreakdown features={features} />
574
+ )}
575
+ </div>
576
+ </div>
577
+
578
+ {/* Transcript */}
579
+ <div className="glass-card p-6">
580
+ {isDetectionLoading ? (
581
+ <div className="panel-skeleton">
582
+ {[0, 1, 2].map(item => (
583
+ <div key={item} className="panel-skeleton-card space-y-3">
584
+ <div className="h-3 w-20 skeleton-block" />
585
+ <div className="h-3 w-full skeleton-block" />
586
+ <div className="h-3 w-5/6 skeleton-block" />
587
+ </div>
588
+ ))}
589
+ </div>
590
+ ) : (
591
+ <TranscriptPanel
592
+ sentences={sentences}
593
+ isStreaming={isCallActive}
594
+ />
595
+ )}
596
+ </div>
597
+ </div>
598
+ </div>
599
+
600
+ {/* Call History - collapsible section below main content */}
601
+ <CallHistory entries={callHistory} />
602
+ </div>
603
+ )}
604
+
605
+ {activeTab === 'privacy' && (
606
+ <PrivacyDemo
607
+ sentences={privacySentences}
608
+ gradientVectors={gradientVectors}
609
+ isCallActive={isCallActive}
610
+ />
611
+ )}
612
+
613
+ {activeTab === 'federated' && (
614
+ <FederatedDashboard />
615
+ )}
616
+ </main>
617
+ </div>
618
+ )
619
+ }
demo/frontend/src/components/CallHistory.tsx ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { useState } from 'react'
2
+ import {
3
+ ChevronDown,
4
+ ChevronUp,
5
+ Phone,
6
+ AlertTriangle,
7
+ Shield,
8
+ Clock,
9
+ } from 'lucide-react'
10
+
11
+ // -------- Types --------
12
+ export interface CallHistoryEntry {
13
+ id: string
14
+ callType: string
15
+ callLabel: string
16
+ duration: number // seconds
17
+ peakScore: number
18
+ finalScore: number
19
+ outcome: 'Blocked' | 'Dismissed' | 'Completed'
20
+ timestamp: number // Date.now()
21
+ totalSentences: number
22
+ topFeatures: string[]
23
+ }
24
+
25
+ interface CallHistoryProps {
26
+ entries: CallHistoryEntry[]
27
+ }
28
+
29
+ // -------- Helpers --------
30
+ function formatDuration(seconds: number): string {
31
+ const mins = Math.floor(seconds / 60)
32
+ const secs = seconds % 60
33
+ return `${mins.toString().padStart(2, '0')}:${secs.toString().padStart(2, '0')}`
34
+ }
35
+
36
+ function relativeTime(timestamp: number): string {
37
+ const diff = Math.floor((Date.now() - timestamp) / 1000)
38
+ if (diff < 5) return 'just now'
39
+ if (diff < 60) return `${diff}s ago`
40
+ const mins = Math.floor(diff / 60)
41
+ if (mins < 60) return `${mins} minute${mins !== 1 ? 's' : ''} ago`
42
+ const hours = Math.floor(mins / 60)
43
+ return `${hours} hour${hours !== 1 ? 's' : ''} ago`
44
+ }
45
+
46
+ function getOutcomeStyle(outcome: string): { bg: string; text: string } {
47
+ switch (outcome) {
48
+ case 'Blocked':
49
+ return { bg: 'bg-alert/10', text: 'text-alert' }
50
+ case 'Dismissed':
51
+ return { bg: 'bg-warning/10', text: 'text-warning' }
52
+ case 'Completed':
53
+ default:
54
+ return { bg: 'bg-safe/10', text: 'text-safe' }
55
+ }
56
+ }
57
+
58
+ function getScoreColor(score: number): string {
59
+ if (score < 0.3) return 'text-safe'
60
+ if (score < 0.5) return 'text-warning'
61
+ if (score < 0.75) return 'text-orange-400'
62
+ return 'text-alert'
63
+ }
64
+
65
+ function getScoreBgColor(score: number): string {
66
+ if (score < 0.3) return 'bg-safe/10'
67
+ if (score < 0.5) return 'bg-warning/10'
68
+ if (score < 0.75) return 'bg-orange-500/10'
69
+ return 'bg-alert/10'
70
+ }
71
+
72
+ function getCallIcon(callType: string) {
73
+ switch (callType) {
74
+ case 'irs_scam':
75
+ case 'tech_support':
76
+ case 'bank_fraud':
77
+ return <AlertTriangle className="w-4 h-4" />
78
+ case 'legitimate':
79
+ return <Shield className="w-4 h-4" />
80
+ default:
81
+ return <Phone className="w-4 h-4" />
82
+ }
83
+ }
84
+
85
+ function getCallIconColor(callType: string): string {
86
+ switch (callType) {
87
+ case 'irs_scam':
88
+ return 'text-alert bg-alert/10'
89
+ case 'tech_support':
90
+ return 'text-orange-400 bg-orange-500/10'
91
+ case 'bank_fraud':
92
+ return 'text-warning bg-warning/10'
93
+ case 'legitimate':
94
+ return 'text-safe bg-safe/10'
95
+ default:
96
+ return 'text-gray-400 bg-gray-700/30'
97
+ }
98
+ }
99
+
100
+ // -------- Component --------
101
+ export default function CallHistory({ entries }: CallHistoryProps) {
102
+ const [isExpanded, setIsExpanded] = useState(true)
103
+ const [selectedId, setSelectedId] = useState<string | null>(null)
104
+
105
+ const selectedEntry = entries.find(e => e.id === selectedId) ?? null
106
+
107
+ return (
108
+ <div className="glass-card overflow-hidden">
109
+ {/* Collapsible header */}
110
+ <button
111
+ onClick={() => setIsExpanded(!isExpanded)}
112
+ className="w-full flex items-center justify-between px-4 py-4 hover:bg-white/[0.02] transition-colors sm:px-6"
113
+ >
114
+ <div className="flex items-center gap-2.5">
115
+ <Clock className="w-4 h-4 text-brand-teal" />
116
+ <h3 className="text-sm font-semibold text-gray-200">Recent Calls</h3>
117
+ {entries.length > 0 && (
118
+ <span className="text-[10px] px-2 py-0.5 rounded-full bg-brand-teal/10 text-brand-teal font-medium tabular-nums">
119
+ {entries.length}
120
+ </span>
121
+ )}
122
+ </div>
123
+ {isExpanded ? (
124
+ <ChevronUp className="w-4 h-4 text-gray-500" />
125
+ ) : (
126
+ <ChevronDown className="w-4 h-4 text-gray-500" />
127
+ )}
128
+ </button>
129
+
130
+ {/* Body */}
131
+ {isExpanded && (
132
+ <div className="border-t border-dark-border/30">
133
+ {entries.length === 0 ? (
134
+ /* Empty state */
135
+ <div className="flex flex-col items-center justify-center py-10 px-6">
136
+ <Phone className="w-8 h-8 text-gray-700 mb-3" />
137
+ <p className="text-sm text-gray-500 font-medium">No calls yet.</p>
138
+ <p className="text-xs text-gray-600 mt-1">Start a sample call to see results here.</p>
139
+ </div>
140
+ ) : (
141
+ <div className="divide-y divide-dark-border/20">
142
+ {entries.map((entry) => (
143
+ <div key={entry.id}>
144
+ {/* Row */}
145
+ <button
146
+ onClick={() => setSelectedId(selectedId === entry.id ? null : entry.id)}
147
+ className="w-full flex flex-col items-start gap-3 px-4 py-3.5 text-left hover:bg-white/[0.02] transition-colors sm:px-6 lg:flex-row lg:items-center lg:gap-4"
148
+ >
149
+ {/* Call type icon */}
150
+ <div className={`w-8 h-8 rounded-lg flex items-center justify-center flex-shrink-0 ${getCallIconColor(entry.callType)}`}>
151
+ {getCallIcon(entry.callType)}
152
+ </div>
153
+
154
+ {/* Call info */}
155
+ <div className="flex-1 min-w-0">
156
+ <p className="text-sm font-medium text-gray-200 truncate">
157
+ {entry.callLabel}
158
+ </p>
159
+ <p className="text-[10px] text-gray-500 mt-0.5">
160
+ {relativeTime(entry.timestamp)}
161
+ </p>
162
+ </div>
163
+
164
+ {/* Duration */}
165
+ <div className="flex items-center gap-1.5 text-xs text-gray-400 lg:flex-shrink-0">
166
+ <Clock className="w-3 h-3" />
167
+ <span className="font-mono tabular-nums">{formatDuration(entry.duration)}</span>
168
+ </div>
169
+
170
+ {/* Final score badge */}
171
+ <div className={`flex-shrink-0 px-2.5 py-1 rounded-lg ${getScoreBgColor(entry.finalScore)}`}>
172
+ <span className={`text-xs font-bold tabular-nums ${getScoreColor(entry.finalScore)}`}>
173
+ {(entry.finalScore * 100).toFixed(0)}%
174
+ </span>
175
+ </div>
176
+
177
+ {/* Outcome badge */}
178
+ <div>
179
+ <span className={`text-[10px] font-semibold px-2 py-1 rounded-full ${getOutcomeStyle(entry.outcome).bg} ${getOutcomeStyle(entry.outcome).text}`}>
180
+ {entry.outcome}
181
+ </span>
182
+ </div>
183
+
184
+ {/* Expand indicator */}
185
+ {selectedId === entry.id ? (
186
+ <ChevronUp className="w-3.5 h-3.5 text-gray-600 flex-shrink-0" />
187
+ ) : (
188
+ <ChevronDown className="w-3.5 h-3.5 text-gray-600 flex-shrink-0" />
189
+ )}
190
+ </button>
191
+
192
+ {/* Expanded detail */}
193
+ {selectedId === entry.id && (
194
+ <div className="px-4 pb-4 animate-fade-in sm:px-6">
195
+ <div className="rounded-xl border border-dark-border/20 bg-dark-bg/50 p-4 space-y-3 lg:ml-12">
196
+ {/* Summary row */}
197
+ <div className="grid grid-cols-1 gap-4 sm:grid-cols-3">
198
+ <div>
199
+ <p className="text-[10px] text-gray-600 uppercase tracking-wider mb-1">Total Sentences</p>
200
+ <p className="text-sm font-bold text-gray-200 tabular-nums">{entry.totalSentences}</p>
201
+ </div>
202
+ <div>
203
+ <p className="text-[10px] text-gray-600 uppercase tracking-wider mb-1">Peak Score</p>
204
+ <p className={`text-sm font-bold tabular-nums ${getScoreColor(entry.peakScore)}`}>
205
+ {(entry.peakScore * 100).toFixed(1)}%
206
+ </p>
207
+ </div>
208
+ <div>
209
+ <p className="text-[10px] text-gray-600 uppercase tracking-wider mb-1">Final Score</p>
210
+ <p className={`text-sm font-bold tabular-nums ${getScoreColor(entry.finalScore)}`}>
211
+ {(entry.finalScore * 100).toFixed(1)}%
212
+ </p>
213
+ </div>
214
+ </div>
215
+
216
+ {/* Top features */}
217
+ {entry.topFeatures.length > 0 && (
218
+ <div>
219
+ <p className="text-[10px] text-gray-600 uppercase tracking-wider mb-1.5">Top Features</p>
220
+ <div className="flex flex-wrap gap-1.5">
221
+ {entry.topFeatures.map((feature) => (
222
+ <span key={feature} className="text-[10px] px-2 py-0.5 bg-brand-teal/10 text-brand-teal rounded-full">
223
+ {feature.replace(/_/g, ' ')}
224
+ </span>
225
+ ))}
226
+ </div>
227
+ </div>
228
+ )}
229
+ </div>
230
+ </div>
231
+ )}
232
+ </div>
233
+ ))}
234
+ </div>
235
+ )}
236
+ </div>
237
+ )}
238
+ </div>
239
+ )
240
+ }
demo/frontend/src/components/CallScreen.tsx ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { useEffect, useState } from 'react'
2
+ import {
3
+ Phone,
4
+ PhoneOff,
5
+ Mic,
6
+ MicOff,
7
+ Volume2,
8
+ Grid3X3,
9
+ UserCircle2,
10
+ Wifi,
11
+ Battery,
12
+ Signal,
13
+ Camera,
14
+ } from 'lucide-react'
15
+ import FraudAlert from './FraudAlert'
16
+
17
+ interface CallScreenProps {
18
+ callerName: string
19
+ callerNumber: string
20
+ duration: number
21
+ isActive: boolean
22
+ onEndCall: () => void
23
+ onBlock: () => void
24
+ onDismissAlert: () => void
25
+ alertLevel: 'safe' | 'low' | 'medium' | 'high' | 'critical'
26
+ alertReasons: string[]
27
+ fraudScore: number
28
+ }
29
+
30
+ function formatDuration(seconds: number): string {
31
+ const mins = Math.floor(seconds / 60)
32
+ const secs = seconds % 60
33
+ return `${mins.toString().padStart(2, '0')}:${secs.toString().padStart(2, '0')}`
34
+ }
35
+
36
+ function getCurrentTime(): string {
37
+ const now = new Date()
38
+ return now.toLocaleTimeString('en-US', { hour: 'numeric', minute: '2-digit', hour12: true })
39
+ }
40
+
41
+ export default function CallScreen({
42
+ callerName,
43
+ callerNumber,
44
+ duration,
45
+ isActive,
46
+ onEndCall,
47
+ onBlock,
48
+ onDismissAlert,
49
+ alertLevel,
50
+ alertReasons,
51
+ fraudScore,
52
+ }: CallScreenProps) {
53
+ const [isMuted, setIsMuted] = useState(false)
54
+ const [isSpeaker, setIsSpeaker] = useState(false)
55
+ const [isScreenShaking, setIsScreenShaking] = useState(false)
56
+
57
+ const showAlert = isActive && (alertLevel === 'high' || alertLevel === 'critical' || alertLevel === 'medium')
58
+ const callerInitials = callerName
59
+ .split(' ')
60
+ .filter(Boolean)
61
+ .slice(0, 2)
62
+ .map(part => part[0]?.toUpperCase())
63
+ .join('')
64
+
65
+ useEffect(() => {
66
+ if (!showAlert) return
67
+
68
+ setIsScreenShaking(true)
69
+ const timeout = window.setTimeout(() => {
70
+ setIsScreenShaking(false)
71
+ }, 480)
72
+
73
+ return () => window.clearTimeout(timeout)
74
+ }, [showAlert, alertLevel])
75
+
76
+ return (
77
+ <div className={`w-full h-full flex flex-col bg-gradient-to-b from-dark-bg via-dark-card to-dark-bg ${isScreenShaking ? 'phone-screen-shake' : ''}`}>
78
+ {/* Status bar */}
79
+ <div className="relative z-30 flex items-center justify-between px-7 pt-4 pb-2 text-[11px] text-white/85">
80
+ <span className="min-w-[52px] font-semibold tracking-[0.01em]">{getCurrentTime()}</span>
81
+ <div className="w-[126px]" />
82
+ <div className="flex min-w-[56px] items-center justify-end gap-1.5">
83
+ <Signal className="h-3.5 w-3.5 stroke-[2.2]" />
84
+ <Wifi className="h-3.5 w-3.5 stroke-[2.2]" />
85
+ <Battery className="h-3.5 w-3.5 stroke-[2.2]" />
86
+ </div>
87
+ </div>
88
+
89
+ {/* Fraud Alert Overlay */}
90
+ {showAlert && (
91
+ <div className="absolute inset-0 z-20 pt-[42px]">
92
+ <FraudAlert
93
+ riskLevel={alertLevel as 'medium' | 'high' | 'critical'}
94
+ score={fraudScore}
95
+ reasons={alertReasons}
96
+ onBlock={onBlock}
97
+ onDismiss={onDismissAlert}
98
+ />
99
+ </div>
100
+ )}
101
+
102
+ {/* Main call content */}
103
+ <div className="flex-1 flex flex-col items-center justify-center px-6 relative">
104
+ {isActive ? (
105
+ <>
106
+ {/* Caller avatar */}
107
+ <div className="relative mb-4">
108
+ <div className={`relative w-[92px] h-[92px] rounded-full flex items-center justify-center overflow-hidden shadow-[0_20px_45px_rgba(15,23,42,0.38)] ${
109
+ alertLevel === 'critical' ? 'ring-2 ring-alert/80 animate-pulse-alert' :
110
+ alertLevel === 'high' ? 'ring-2 ring-warning/80' :
111
+ 'ring-2 ring-brand-teal/50'
112
+ }`}>
113
+ <div className="absolute inset-0 bg-[radial-gradient(circle_at_30%_28%,rgba(255,255,255,0.28),transparent_28%),linear-gradient(160deg,#8892a6_0%,#646f84_45%,#3b4456_100%)]" />
114
+ <div className="absolute inset-x-5 bottom-0 h-[44px] rounded-t-[28px] bg-white/18 blur-[1px]" />
115
+ <div className="absolute left-1/2 top-[18px] h-[26px] w-[26px] -translate-x-1/2 rounded-full bg-white/28" />
116
+ <div className="absolute left-1/2 top-[38px] h-[34px] w-[54px] -translate-x-1/2 rounded-t-[26px] bg-white/22" />
117
+ <div className="absolute inset-0 bg-gradient-to-b from-white/5 via-transparent to-black/25" />
118
+ <div className="absolute bottom-2 left-1/2 -translate-x-1/2 rounded-full bg-black/28 px-2 py-0.5 backdrop-blur-sm">
119
+ <span className="text-[10px] font-semibold tracking-[0.2em] text-white/88">{callerInitials || 'CP'}</span>
120
+ </div>
121
+ </div>
122
+
123
+ <div className="absolute -top-1 -right-1 rounded-full border border-white/10 bg-black/45 p-1.5 shadow-lg backdrop-blur-md">
124
+ <Camera className="h-3.5 w-3.5 text-white/75" />
125
+ </div>
126
+
127
+ {/* SentinelEdge protection badge */}
128
+ <div className="absolute -bottom-1 -right-1 w-6 h-6 rounded-full bg-dark-card border-2 border-brand-teal flex items-center justify-center">
129
+ <div className="w-2 h-2 rounded-full bg-brand-teal" />
130
+ </div>
131
+ </div>
132
+
133
+ {/* Caller info */}
134
+ <h2 className="text-lg font-semibold text-white text-center leading-tight">{callerName}</h2>
135
+ <p className="text-xs text-gray-400 mt-0.5 font-mono">{callerNumber}</p>
136
+
137
+ {/* Call timer */}
138
+ <div className="mt-3 flex items-center gap-2">
139
+ <div className={`w-1.5 h-1.5 rounded-full ${
140
+ alertLevel === 'critical' ? 'bg-alert animate-pulse' :
141
+ alertLevel === 'high' ? 'bg-warning animate-pulse' :
142
+ 'bg-safe animate-pulse'
143
+ }`} />
144
+ <span className="text-sm text-gray-300 font-mono tabular-nums">
145
+ {formatDuration(duration)}
146
+ </span>
147
+ </div>
148
+
149
+ {/* SentinelEdge status line */}
150
+ <div className="mt-2 px-3 py-1 rounded-full bg-dark-card/80 border border-dark-border/50">
151
+ <p className="text-[9px] text-gray-500 tracking-wider uppercase flex items-center gap-1">
152
+ <span className={`inline-block w-1 h-1 rounded-full ${
153
+ alertLevel === 'critical' || alertLevel === 'high' ? 'bg-alert' : 'bg-safe'
154
+ }`} />
155
+ SentinelEdge Active
156
+ </p>
157
+ </div>
158
+
159
+ {callerName === 'Live Microphone' && (
160
+ <div className="mt-3 px-3 py-2 rounded-lg bg-brand-teal/10 border border-brand-teal/25">
161
+ <p className="text-[10px] text-brand-teal text-center tracking-wide uppercase">
162
+ Speak now: live mic analysis in progress
163
+ </p>
164
+ </div>
165
+ )}
166
+ </>
167
+ ) : (
168
+ /* Idle state */
169
+ <div className="text-center">
170
+ <div className="w-20 h-20 rounded-full bg-dark-card flex items-center justify-center mx-auto mb-4 ring-1 ring-dark-border">
171
+ <Phone className="w-10 h-10 text-gray-600" />
172
+ </div>
173
+ <h2 className="text-lg font-medium text-gray-500">No Active Call</h2>
174
+ <p className="text-xs text-gray-600 mt-1">Select a sample call to begin</p>
175
+ </div>
176
+ )}
177
+ </div>
178
+
179
+ {/* Bottom action buttons */}
180
+ {isActive && (
181
+ <div className="pb-10 px-6">
182
+ {/* Action row */}
183
+ <div className="grid grid-cols-4 gap-4 mb-6">
184
+ <button
185
+ onClick={() => setIsMuted(!isMuted)}
186
+ className="flex flex-col items-center gap-1"
187
+ >
188
+ <div className={`w-11 h-11 rounded-full flex items-center justify-center transition-colors ${
189
+ isMuted ? 'bg-white text-black' : 'bg-dark-surface/80 text-white'
190
+ }`}>
191
+ {isMuted ? <MicOff className="w-5 h-5" /> : <Mic className="w-5 h-5" />}
192
+ </div>
193
+ <span className="text-[9px] text-gray-400">
194
+ {isMuted ? 'unmute' : 'mute'}
195
+ </span>
196
+ </button>
197
+
198
+ <button className="flex flex-col items-center gap-1">
199
+ <div className="w-11 h-11 rounded-full bg-dark-surface/80 flex items-center justify-center text-white">
200
+ <Grid3X3 className="w-5 h-5" />
201
+ </div>
202
+ <span className="text-[9px] text-gray-400">keypad</span>
203
+ </button>
204
+
205
+ <button
206
+ onClick={() => setIsSpeaker(!isSpeaker)}
207
+ className="flex flex-col items-center gap-1"
208
+ >
209
+ <div className={`w-11 h-11 rounded-full flex items-center justify-center transition-colors ${
210
+ isSpeaker ? 'bg-white text-black' : 'bg-dark-surface/80 text-white'
211
+ }`}>
212
+ <Volume2 className="w-5 h-5" />
213
+ </div>
214
+ <span className="text-[9px] text-gray-400">speaker</span>
215
+ </button>
216
+
217
+ <button className="flex flex-col items-center gap-1">
218
+ <div className="w-11 h-11 rounded-full bg-dark-surface/80 flex items-center justify-center text-white">
219
+ <UserCircle2 className="w-5 h-5" />
220
+ </div>
221
+ <span className="text-[9px] text-gray-400">contacts</span>
222
+ </button>
223
+ </div>
224
+
225
+ {/* End call button */}
226
+ <div className="flex justify-center">
227
+ <button
228
+ onClick={onEndCall}
229
+ className="w-16 h-16 rounded-full bg-alert flex items-center justify-center hover:bg-alert-dark transition-colors shadow-lg shadow-alert/30 active:scale-95"
230
+ >
231
+ <PhoneOff className="w-7 h-7 text-white rotate-[135deg]" />
232
+ </button>
233
+ </div>
234
+ </div>
235
+ )}
236
+ </div>
237
+ )
238
+ }
demo/frontend/src/components/DemoControls.tsx ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Play, Mic, MicOff, AlertTriangle, Phone, Shield } from 'lucide-react'
2
+
3
+ interface DemoControlsProps {
4
+ onSelectCall: (callId: string) => void
5
+ onToggleMic: () => void
6
+ onSelectMicDevice: (deviceValue: string) => void
7
+ isCallActive: boolean
8
+ isMicActive: boolean
9
+ selectedMicDevice: string
10
+ micDevices: Array<{ value: string; label: string }>
11
+ availableCalls: Array<{ id: string; description: string }>
12
+ }
13
+
14
+ const callIcons: Record<string, { icon: React.ReactNode; color: string }> = {
15
+ irs_scam: {
16
+ icon: <AlertTriangle className="w-3.5 h-3.5" />,
17
+ color: 'hover:bg-alert/10 hover:text-alert hover:border-alert/30',
18
+ },
19
+ tech_support: {
20
+ icon: <AlertTriangle className="w-3.5 h-3.5" />,
21
+ color: 'hover:bg-orange-500/10 hover:text-orange-400 hover:border-orange-500/30',
22
+ },
23
+ bank_fraud: {
24
+ icon: <AlertTriangle className="w-3.5 h-3.5" />,
25
+ color: 'hover:bg-warning/10 hover:text-warning hover:border-warning/30',
26
+ },
27
+ legitimate: {
28
+ icon: <Shield className="w-3.5 h-3.5" />,
29
+ color: 'hover:bg-safe/10 hover:text-safe hover:border-safe/30',
30
+ },
31
+ }
32
+
33
+ export default function DemoControls({
34
+ onSelectCall,
35
+ onToggleMic,
36
+ onSelectMicDevice,
37
+ isCallActive,
38
+ isMicActive,
39
+ selectedMicDevice,
40
+ micDevices,
41
+ availableCalls,
42
+ }: DemoControlsProps) {
43
+ return (
44
+ <div className="glass-card p-4 sm:p-5">
45
+ <div className="flex flex-col gap-4 xl:flex-row xl:items-center xl:justify-between">
46
+ {/* Left: sample calls */}
47
+ <div className="flex flex-col gap-3 lg:flex-row lg:items-center">
48
+ <div className="flex items-center gap-2 mr-2">
49
+ <Phone className="w-4 h-4 text-brand-teal" />
50
+ <span className="text-xs font-semibold text-gray-400 uppercase tracking-wider">
51
+ Sample Calls
52
+ </span>
53
+ </div>
54
+ <div className="flex flex-wrap gap-2">
55
+ {availableCalls.map((call) => {
56
+ const config = callIcons[call.id] || {
57
+ icon: <Play className="w-3.5 h-3.5" />,
58
+ color: 'hover:bg-brand-teal/10 hover:text-brand-teal hover:border-brand-teal/30',
59
+ }
60
+ return (
61
+ <button
62
+ key={call.id}
63
+ onClick={() => onSelectCall(call.id)}
64
+ disabled={isCallActive}
65
+ className={`
66
+ control-button px-3 py-2 text-xs font-medium
67
+ ${isCallActive ? '' : config.color}
68
+ `}
69
+ >
70
+ <Play className="w-3 h-3" />
71
+ {call.description}
72
+ </button>
73
+ )
74
+ })}
75
+ </div>
76
+ </div>
77
+
78
+ {/* Right: mic toggle */}
79
+ <div className="flex items-center gap-3">
80
+ <div className="h-6 w-px bg-dark-border/30" />
81
+ <div className="flex flex-col items-end gap-1">
82
+ <button
83
+ onClick={onToggleMic}
84
+ disabled={isCallActive}
85
+ className={`
86
+ flex items-center gap-2 px-4 py-2 rounded-lg text-xs font-medium
87
+ transition-all duration-200 border
88
+ ${isMicActive
89
+ ? 'bg-brand-teal/10 text-brand-teal border-brand-teal/30'
90
+ : isCallActive
91
+ ? 'opacity-40 cursor-not-allowed bg-dark-card text-gray-600 border-dark-border/50'
92
+ : 'bg-dark-card text-gray-300 border-dark-border/50 hover:bg-brand-teal/10 hover:text-brand-teal hover:border-brand-teal/30'
93
+ }
94
+ `}
95
+ >
96
+ {isMicActive ? (
97
+ <Mic className="w-3.5 h-3.5" />
98
+ ) : (
99
+ <MicOff className="w-3.5 h-3.5" />
100
+ )}
101
+ Live Mic
102
+ </button>
103
+
104
+ {!isCallActive && (
105
+ <>
106
+ <span className={`text-[10px] ${isMicActive ? 'text-brand-teal/80' : 'text-gray-500'}`}>
107
+ {isMicActive
108
+ ? 'Mic is on for your side of role-play'
109
+ : 'Scripted caller mode: backend plays sample scam call lines'}
110
+ </span>
111
+
112
+ <select
113
+ value={selectedMicDevice}
114
+ onChange={(e) => onSelectMicDevice(e.target.value)}
115
+ className="px-2 py-1 text-[10px] bg-dark-card text-gray-300 border border-dark-border/50 rounded-md min-w-[260px]"
116
+ disabled={isCallActive || micDevices.length === 0}
117
+ >
118
+ {micDevices.length === 0 ? (
119
+ <option value="">No input devices found</option>
120
+ ) : (
121
+ micDevices.map((device) => (
122
+ <option key={device.value} value={device.value}>
123
+ {device.label}
124
+ </option>
125
+ ))
126
+ )}
127
+ </select>
128
+ </>
129
+ )}
130
+ </div>
131
+
132
+ {/* Status indicator */}
133
+ {isCallActive && (
134
+ <div className="flex items-center gap-1.5 px-3 py-1.5 rounded-full bg-safe/10 border border-safe/20">
135
+ <div className="w-1.5 h-1.5 rounded-full bg-safe animate-pulse" />
136
+ <span className="text-[10px] text-safe font-medium">Call Active</span>
137
+ </div>
138
+ )}
139
+ </div>
140
+ </div>
141
+ </div>
142
+ )
143
+ }
demo/frontend/src/components/FeatureBreakdown.tsx ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { BarChart3, Info } from 'lucide-react'
2
+ import type { CSSProperties } from 'react'
3
+
4
+ interface FeatureBreakdownProps {
5
+ features: Record<string, number>
6
+ }
7
+
8
+ const FEATURE_EXPLANATIONS: Record<string, string> = {
9
+ authority_claim: 'The caller is presenting themselves as a trusted institution or official authority.',
10
+ coercion: 'Language is pressuring the recipient into acting before they can think or verify.',
11
+ credential_request: 'The conversation is attempting to obtain passwords, PINs, codes, or other secrets.',
12
+ fear_induction: 'The caller is using fear to reduce skepticism and speed up compliance.',
13
+ financial_terms: 'Money, payments, balances, or banking language is becoming central to the call.',
14
+ gift_card_mention: 'Requesting gift cards is a strong scam indicator because they are hard to trace or reverse.',
15
+ mfa_bypass: 'The caller is trying to capture one-time codes or verification steps meant to protect the user.',
16
+ pii_request: 'The caller is asking for sensitive personal information that should not be shared casually.',
17
+ remote_access: 'The caller is steering the user toward remote-control tools or device access.',
18
+ specific_amount: 'Using a precise dollar amount can make a scam feel more legitimate and urgent.',
19
+ specific_detail: 'Specific details are being used to create a false sense of credibility.',
20
+ tech_jargon: 'Technical language may be used to intimidate or confuse the recipient.',
21
+ threat_language: 'Threats of punishment, loss, or legal action are a major fraud signal.',
22
+ time_pressure: 'The caller is creating a countdown or deadline to force a rushed decision.',
23
+ trust_request: 'The caller is explicitly pushing the recipient to trust them or follow instructions without verification.',
24
+ unusual_payment: 'The requested payment method is uncommon for legitimate businesses or institutions.',
25
+ urgency: 'The tone suggests immediate action is required, which is common in scam escalation.',
26
+ }
27
+
28
+ function getSeverity(value: number) {
29
+ if (value < 0.3) {
30
+ return {
31
+ label: 'Low',
32
+ color: '#14B8A6',
33
+ glow: 'rgba(20, 184, 166, 0.16)',
34
+ track: 'rgba(20, 184, 166, 0.12)',
35
+ }
36
+ }
37
+ if (value < 0.5) {
38
+ return {
39
+ label: 'Medium',
40
+ color: '#FBBF24',
41
+ glow: 'rgba(251, 191, 36, 0.18)',
42
+ track: 'rgba(251, 191, 36, 0.12)',
43
+ }
44
+ }
45
+ if (value < 0.75) {
46
+ return {
47
+ label: 'High',
48
+ color: '#FB923C',
49
+ glow: 'rgba(251, 146, 60, 0.2)',
50
+ track: 'rgba(251, 146, 60, 0.12)',
51
+ }
52
+ }
53
+ return {
54
+ label: 'Critical',
55
+ color: '#F87171',
56
+ glow: 'rgba(248, 113, 113, 0.22)',
57
+ track: 'rgba(248, 113, 113, 0.12)',
58
+ }
59
+ }
60
+
61
+ function formatFeatureName(name: string): string {
62
+ return name
63
+ .replace(/_/g, ' ')
64
+ .replace(/\b\w/g, c => c.toUpperCase())
65
+ }
66
+
67
+ export default function FeatureBreakdown({ features }: FeatureBreakdownProps) {
68
+ const sortedFeatures = Object.entries(features)
69
+ .filter(([, value]) => value > 0)
70
+ .sort(([, a], [, b]) => b - a)
71
+ .slice(0, 8)
72
+
73
+ return (
74
+ <div>
75
+ <div className="mb-4 flex items-center gap-2">
76
+ <BarChart3 className="w-4 h-4 text-brand-teal" />
77
+ <h3 className="text-sm font-semibold text-gray-200">Feature Breakdown</h3>
78
+ </div>
79
+
80
+ <div className="space-y-2.5">
81
+ {sortedFeatures.length === 0 ? (
82
+ <div className="flex items-center justify-center py-12 text-gray-600">
83
+ <div className="text-center">
84
+ <BarChart3 className="w-8 h-8 mx-auto mb-2 opacity-30" />
85
+ <p className="text-sm">No features detected</p>
86
+ <p className="text-xs mt-1 opacity-60">Analysis data appears during calls</p>
87
+ </div>
88
+ </div>
89
+ ) : (
90
+ sortedFeatures.map(([name, value], index) => {
91
+ const severity = getSeverity(value)
92
+ const description = FEATURE_EXPLANATIONS[name] ?? 'This signal contributes to the overall fraud risk score.'
93
+
94
+ return (
95
+ <div
96
+ key={name}
97
+ className="feature-bar-container group relative"
98
+ style={
99
+ {
100
+ '--feature-color': severity.color,
101
+ '--feature-glow': severity.glow,
102
+ '--feature-track': severity.track,
103
+ animationDelay: `${index * 70}ms`,
104
+ } as CSSProperties
105
+ }
106
+ >
107
+ <div className="mb-1 flex items-center justify-between gap-3">
108
+ <div className="flex min-w-0 items-center gap-2">
109
+ <span className="truncate text-xs text-gray-400 transition-colors duration-300 group-hover:text-gray-100">
110
+ {formatFeatureName(name)}
111
+ </span>
112
+ <div className="feature-tooltip-trigger relative flex-shrink-0">
113
+ <Info className="h-3.5 w-3.5 text-gray-600 transition-colors duration-300 group-hover:text-gray-300" />
114
+ <div className="feature-tooltip pointer-events-none absolute left-1/2 top-[calc(100%+10px)] z-20 w-56 -translate-x-1/2 rounded-xl border border-white/10 bg-slate-950/95 p-3 text-[11px] leading-relaxed text-gray-200 shadow-[0_18px_45px_rgba(2,6,23,0.5)] backdrop-blur-md">
115
+ <p className="font-semibold text-white">{formatFeatureName(name)}</p>
116
+ <p className="mt-1 text-gray-300">{description}</p>
117
+ <div className="mt-2 flex items-center gap-1.5 text-[10px] uppercase tracking-[0.18em] text-gray-500">
118
+ <span className="h-1.5 w-1.5 rounded-full" style={{ backgroundColor: severity.color }} />
119
+ {severity.label} Signal
120
+ </div>
121
+ </div>
122
+ </div>
123
+ </div>
124
+ <span className="font-mono text-xs tabular-nums text-gray-500">
125
+ {(value * 100).toFixed(0)}%
126
+ </span>
127
+ </div>
128
+
129
+ <div className="feature-track h-2 overflow-hidden rounded-full bg-dark-bg">
130
+ <div
131
+ className="feature-bar h-full rounded-full"
132
+ style={{ width: `${Math.max(value * 100, 2)}%` }}
133
+ />
134
+ </div>
135
+ </div>
136
+ )
137
+ })
138
+ )}
139
+ </div>
140
+
141
+ {sortedFeatures.length > 0 && (
142
+ <div className="mt-4 border-t border-dark-border/30 pt-3">
143
+ <div className="flex items-center justify-between">
144
+ <span className="text-[10px] text-gray-600">
145
+ {sortedFeatures.length} active indicator{sortedFeatures.length !== 1 ? 's' : ''}
146
+ </span>
147
+ <div className="flex gap-2">
148
+ {[
149
+ { label: 'Low', color: '#14B8A6' },
150
+ { label: 'Med', color: '#FBBF24' },
151
+ { label: 'High', color: '#FB923C' },
152
+ { label: 'Crit', color: '#F87171' },
153
+ ].map(item => (
154
+ <div key={item.label} className="flex items-center gap-1">
155
+ <div className="h-1.5 w-1.5 rounded-full" style={{ backgroundColor: item.color }} />
156
+ <span className="text-[9px] text-gray-600">{item.label}</span>
157
+ </div>
158
+ ))}
159
+ </div>
160
+ </div>
161
+ </div>
162
+ )}
163
+ </div>
164
+ )
165
+ }
demo/frontend/src/components/FederatedDashboard.tsx ADDED
@@ -0,0 +1,587 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { useState, useCallback, useEffect, useRef } from 'react'
2
+ import {
3
+ BarChart,
4
+ Bar,
5
+ XAxis,
6
+ YAxis,
7
+ CartesianGrid,
8
+ Tooltip,
9
+ ResponsiveContainer,
10
+ Legend,
11
+ Area,
12
+ AreaChart,
13
+ } from 'recharts'
14
+ import {
15
+ GitBranch,
16
+ Cpu,
17
+ Shield,
18
+ Lock,
19
+ Play,
20
+ RefreshCw,
21
+ TrendingUp,
22
+ Users,
23
+ Database,
24
+ Smartphone,
25
+ WifiOff,
26
+ } from 'lucide-react'
27
+
28
+ // -------- Mock data generators (used as fallback when hub is offline) --------
29
+ function generateRoundData(numRounds: number) {
30
+ const data = []
31
+ let accuracy = 0.62
32
+ let f1 = 0.55
33
+ let loss = 0.82
34
+
35
+ for (let i = 1; i <= numRounds; i++) {
36
+ const accGain = (0.98 - accuracy) * 0.08 + (Math.random() - 0.5) * 0.015
37
+ const f1Gain = (0.95 - f1) * 0.09 + (Math.random() - 0.5) * 0.02
38
+ const lossDecay = loss * 0.06 + (Math.random() - 0.5) * 0.02
39
+
40
+ accuracy = Math.min(0.98, accuracy + accGain)
41
+ f1 = Math.min(0.95, f1 + f1Gain)
42
+ loss = Math.max(0.05, loss - lossDecay)
43
+
44
+ data.push({
45
+ round: i,
46
+ accuracy: Number(accuracy.toFixed(4)),
47
+ f1: Number(f1.toFixed(4)),
48
+ loss: Number(loss.toFixed(4)),
49
+ })
50
+ }
51
+ return data
52
+ }
53
+
54
+ function generateDeviceData() {
55
+ const devices = [
56
+ { name: 'Device A', samples: 342, contribution: 0.23 },
57
+ { name: 'Device B', samples: 287, contribution: 0.19 },
58
+ { name: 'Device C', samples: 198, contribution: 0.14 },
59
+ { name: 'Device D', samples: 456, contribution: 0.31 },
60
+ { name: 'Device E', samples: 167, contribution: 0.13 },
61
+ ]
62
+ return devices
63
+ }
64
+
65
+ // Generate a random gradient delta for simulated FL submissions
66
+ function generateGradientDelta(size = 64): number[] {
67
+ return Array.from({ length: size }, () =>
68
+ Number(((Math.random() - 0.5) * 0.01).toFixed(6))
69
+ )
70
+ }
71
+
72
+ const CustomTooltip = ({ active, payload, label }: any) => {
73
+ if (active && payload && payload.length) {
74
+ return (
75
+ <div className="bg-dark-card border border-dark-border rounded-lg px-3 py-2 shadow-xl">
76
+ <p className="text-xs text-gray-400 mb-1">Round {label}</p>
77
+ {payload.map((entry: any, idx: number) => (
78
+ <p key={idx} className="text-xs font-mono" style={{ color: entry.color }}>
79
+ {entry.name}: {(entry.value * 100).toFixed(1)}%
80
+ </p>
81
+ ))}
82
+ </div>
83
+ )
84
+ }
85
+ return null
86
+ }
87
+
88
+ export default function FederatedDashboard() {
89
+ const [roundData, setRoundData] = useState(() => generateRoundData(20))
90
+ const [deviceData, setDeviceData] = useState(() => generateDeviceData())
91
+ const [isSimulating, setIsSimulating] = useState(false)
92
+ const [currentRound, setCurrentRound] = useState(20)
93
+ const [hubConnected, setHubConnected] = useState(false)
94
+ const [totalSamples, setTotalSamples] = useState(1450)
95
+ const [privacyBudget, setPrivacyBudget] = useState('epsilon=1.0')
96
+
97
+ const pollIntervalRef = useRef<ReturnType<typeof setInterval> | null>(null)
98
+ const isMountedRef = useRef(true)
99
+
100
+ // Fetch global metrics from the hub API
101
+ const fetchGlobalMetrics = useCallback(async () => {
102
+ try {
103
+ const response = await fetch(`${import.meta.env.DEV ? 'http://localhost:8080' : '/hub'}/v1/metrics/global`, {
104
+ signal: AbortSignal.timeout(3000),
105
+ })
106
+ if (!response.ok) throw new Error(`HTTP ${response.status}`)
107
+ const data = await response.json()
108
+
109
+ if (!isMountedRef.current) return
110
+
111
+ // Map hub API response to our local state
112
+ if (data.accuracy_history && Array.isArray(data.accuracy_history)) {
113
+ const mapped = data.accuracy_history.map((entry: any, i: number) => ({
114
+ round: entry.round ?? i + 1,
115
+ accuracy: entry.accuracy ?? 0,
116
+ f1: entry.f1 ?? 0,
117
+ loss: entry.loss ?? 0,
118
+ }))
119
+ if (mapped.length > 0) {
120
+ setRoundData(mapped)
121
+ setCurrentRound(mapped.length)
122
+ }
123
+ }
124
+ if (data.device_count != null) {
125
+ // Update device count visually -- keep existing device data shape
126
+ const count = Number(data.device_count)
127
+ if (count > 0 && count !== deviceData.length) {
128
+ const generated = Array.from({ length: count }, (_, i) => ({
129
+ name: `Device ${String.fromCharCode(65 + i)}`,
130
+ samples: Math.floor(Math.random() * 400) + 100,
131
+ contribution: Number((1 / count).toFixed(2)),
132
+ }))
133
+ setDeviceData(generated)
134
+ }
135
+ }
136
+ if (data.round_count != null) {
137
+ setCurrentRound(Number(data.round_count))
138
+ }
139
+ if (data.total_samples != null) {
140
+ setTotalSamples(Number(data.total_samples))
141
+ }
142
+ if (data.privacy_budget != null) {
143
+ setPrivacyBudget(String(data.privacy_budget))
144
+ }
145
+
146
+ setHubConnected(true)
147
+ } catch {
148
+ if (isMountedRef.current) {
149
+ setHubConnected(false)
150
+ }
151
+ }
152
+ }, [deviceData.length])
153
+
154
+ // Fetch round status (current round progress)
155
+ const fetchRoundStatus = useCallback(async () => {
156
+ try {
157
+ const response = await fetch(`${import.meta.env.DEV ? 'http://localhost:8080' : '/hub'}/v1/round/status`, {
158
+ signal: AbortSignal.timeout(3000),
159
+ })
160
+ if (!response.ok) throw new Error(`HTTP ${response.status}`)
161
+ const data = await response.json()
162
+
163
+ if (!isMountedRef.current) return
164
+
165
+ if (data.current_round != null) {
166
+ setCurrentRound(Number(data.current_round))
167
+ }
168
+ if (data.is_aggregating != null) {
169
+ setIsSimulating(Boolean(data.is_aggregating))
170
+ }
171
+ } catch {
172
+ // Silently fail -- global metrics fetch already handles connection status
173
+ }
174
+ }, [])
175
+
176
+ // Poll hub API every 5 seconds while tab is active
177
+ useEffect(() => {
178
+ isMountedRef.current = true
179
+
180
+ // Initial fetch
181
+ fetchGlobalMetrics()
182
+ fetchRoundStatus()
183
+
184
+ const startPolling = () => {
185
+ if (pollIntervalRef.current) clearInterval(pollIntervalRef.current)
186
+ pollIntervalRef.current = setInterval(() => {
187
+ fetchGlobalMetrics()
188
+ fetchRoundStatus()
189
+ }, 5000)
190
+ }
191
+
192
+ const stopPolling = () => {
193
+ if (pollIntervalRef.current) {
194
+ clearInterval(pollIntervalRef.current)
195
+ pollIntervalRef.current = null
196
+ }
197
+ }
198
+
199
+ const handleVisibilityChange = () => {
200
+ if (document.hidden) {
201
+ stopPolling()
202
+ } else {
203
+ fetchGlobalMetrics()
204
+ fetchRoundStatus()
205
+ startPolling()
206
+ }
207
+ }
208
+
209
+ startPolling()
210
+ document.addEventListener('visibilitychange', handleVisibilityChange)
211
+
212
+ return () => {
213
+ isMountedRef.current = false
214
+ stopPolling()
215
+ document.removeEventListener('visibilitychange', handleVisibilityChange)
216
+ }
217
+ }, [fetchGlobalMetrics, fetchRoundStatus])
218
+
219
+ const latestMetrics = roundData[roundData.length - 1]
220
+
221
+ // Run FL Round: POST to hub if connected, else simulate locally
222
+ const runSimulationRound = useCallback(async () => {
223
+ setIsSimulating(true)
224
+
225
+ if (hubConnected) {
226
+ try {
227
+ const payload = {
228
+ device_id: `device_${Math.random().toString(36).slice(2, 8)}`,
229
+ gradient_delta: generateGradientDelta(),
230
+ num_samples: Math.floor(Math.random() * 200) + 50,
231
+ round: currentRound + 1,
232
+ }
233
+ const response = await fetch(`${import.meta.env.DEV ? 'http://localhost:8080' : '/hub'}/v1/federated/submit`, {
234
+ method: 'POST',
235
+ headers: { 'Content-Type': 'application/json' },
236
+ body: JSON.stringify(payload),
237
+ signal: AbortSignal.timeout(5000),
238
+ })
239
+
240
+ if (response.ok) {
241
+ // After successful submit, re-fetch to pick up the new round data
242
+ await fetchGlobalMetrics()
243
+ await fetchRoundStatus()
244
+ setIsSimulating(false)
245
+ return
246
+ }
247
+ } catch {
248
+ // Hub became unreachable during the request -- fall through to local simulation
249
+ setHubConnected(false)
250
+ }
251
+ }
252
+
253
+ // Fallback: local simulation
254
+ const timer = setTimeout(() => {
255
+ setRoundData(prev => {
256
+ const last = prev[prev.length - 1]
257
+ const newRound = {
258
+ round: last.round + 1,
259
+ accuracy: Number(Math.min(0.98, last.accuracy + (0.98 - last.accuracy) * 0.08 + (Math.random() - 0.5) * 0.01).toFixed(4)),
260
+ f1: Number(Math.min(0.95, last.f1 + (0.95 - last.f1) * 0.09 + (Math.random() - 0.5) * 0.015).toFixed(4)),
261
+ loss: Number(Math.max(0.05, last.loss - last.loss * 0.06 + (Math.random() - 0.5) * 0.01).toFixed(4)),
262
+ }
263
+ return [...prev, newRound]
264
+ })
265
+ setCurrentRound(prev => prev + 1)
266
+ setIsSimulating(false)
267
+ }, 1500)
268
+
269
+ return () => clearTimeout(timer)
270
+ }, [hubConnected, currentRound, fetchGlobalMetrics, fetchRoundStatus])
271
+
272
+ const stats = [
273
+ {
274
+ label: 'Model Version',
275
+ value: `v${currentRound}`,
276
+ icon: <Cpu className="w-4 h-4" />,
277
+ color: 'text-brand-teal',
278
+ bgColor: 'bg-brand-teal/10',
279
+ },
280
+ {
281
+ label: 'Active Devices',
282
+ value: String(deviceData.length),
283
+ icon: <Users className="w-4 h-4" />,
284
+ color: 'text-blue-400',
285
+ bgColor: 'bg-blue-400/10',
286
+ },
287
+ {
288
+ label: 'Total Samples',
289
+ value: totalSamples.toLocaleString(),
290
+ icon: <Database className="w-4 h-4" />,
291
+ color: 'text-purple-400',
292
+ bgColor: 'bg-purple-400/10',
293
+ },
294
+ {
295
+ label: 'Privacy Budget',
296
+ value: privacyBudget,
297
+ icon: <Lock className="w-4 h-4" />,
298
+ color: 'text-safe',
299
+ bgColor: 'bg-safe/10',
300
+ },
301
+ {
302
+ label: 'Accuracy',
303
+ value: `${(latestMetrics.accuracy * 100).toFixed(1)}%`,
304
+ icon: <TrendingUp className="w-4 h-4" />,
305
+ color: 'text-brand-teal',
306
+ bgColor: 'bg-brand-teal/10',
307
+ },
308
+ {
309
+ label: 'F1 Score',
310
+ value: `${(latestMetrics.f1 * 100).toFixed(1)}%`,
311
+ icon: <Shield className="w-4 h-4" />,
312
+ color: 'text-warning',
313
+ bgColor: 'bg-warning/10',
314
+ },
315
+ ]
316
+
317
+ return (
318
+ <div className="space-y-6">
319
+ {/* Offline banner */}
320
+ {!hubConnected && (
321
+ <div className="flex items-center gap-3 px-4 py-3 rounded-xl bg-warning/10 border border-warning/20">
322
+ <WifiOff className="w-4 h-4 text-warning flex-shrink-0" />
323
+ <p className="text-xs text-warning font-medium">
324
+ Hub offline &mdash; showing simulated data
325
+ </p>
326
+ </div>
327
+ )}
328
+
329
+ {/* Header with action */}
330
+ <div className="flex flex-col gap-4 lg:flex-row lg:items-center lg:justify-between">
331
+ <div className="flex items-center gap-3">
332
+ <div className="w-10 h-10 rounded-xl bg-brand-teal/10 flex items-center justify-center">
333
+ <GitBranch className="w-5 h-5 text-brand-teal" />
334
+ </div>
335
+ <div>
336
+ <div className="flex items-center gap-2">
337
+ <h2 className="text-lg font-bold text-white">Federated Learning Dashboard</h2>
338
+ {/* Connection status indicator */}
339
+ <span
340
+ className={`inline-block w-2.5 h-2.5 rounded-full ${
341
+ hubConnected ? 'bg-safe animate-pulse' : 'bg-gray-500'
342
+ }`}
343
+ title={hubConnected ? 'Hub connected' : 'Hub offline'}
344
+ />
345
+ </div>
346
+ <p className="text-xs text-gray-500">Model training across distributed edge devices</p>
347
+ </div>
348
+ </div>
349
+ <button
350
+ onClick={runSimulationRound}
351
+ disabled={isSimulating}
352
+ className={`
353
+ control-button px-4 py-2 text-sm font-medium
354
+ ${isSimulating
355
+ ? 'bg-brand-teal/20 text-brand-teal/50 border-brand-teal/20'
356
+ : 'border-brand-teal bg-brand-teal text-white hover:bg-brand-teal-light hover:text-white shadow-lg shadow-brand-teal/20'
357
+ }
358
+ `}
359
+ >
360
+ {isSimulating ? (
361
+ <RefreshCw className="w-4 h-4 animate-spin" />
362
+ ) : (
363
+ <Play className="w-4 h-4" />
364
+ )}
365
+ {isSimulating ? 'Aggregating...' : 'Run FL Round'}
366
+ </button>
367
+ </div>
368
+
369
+ {/* Stats cards */}
370
+ <div className="grid grid-cols-2 gap-4 md:grid-cols-3 xl:grid-cols-6">
371
+ {stats.map((stat) => (
372
+ <div key={stat.label} className="glass-card p-4">
373
+ <div className="flex items-center gap-2 mb-2">
374
+ <div className={`w-7 h-7 rounded-lg ${stat.bgColor} flex items-center justify-center ${stat.color}`}>
375
+ {stat.icon}
376
+ </div>
377
+ </div>
378
+ <p className={`text-lg font-bold ${stat.color} tabular-nums`}>{stat.value}</p>
379
+ <p className="text-[10px] text-gray-500 mt-0.5">{stat.label}</p>
380
+ </div>
381
+ ))}
382
+ </div>
383
+
384
+ {/* Charts */}
385
+ <div className="grid grid-cols-1 gap-6 xl:grid-cols-2">
386
+ {/* Accuracy & F1 over rounds */}
387
+ <div className="glass-card p-6">
388
+ <h3 className="text-sm font-semibold text-gray-200 mb-4 flex items-center gap-2">
389
+ <TrendingUp className="w-4 h-4 text-brand-teal" />
390
+ Model Performance Over Rounds
391
+ </h3>
392
+ <ResponsiveContainer width="100%" height={280}>
393
+ <AreaChart data={roundData}>
394
+ <defs>
395
+ <linearGradient id="gradAccuracy" x1="0" y1="0" x2="0" y2="1">
396
+ <stop offset="5%" stopColor="#0D9488" stopOpacity={0.3} />
397
+ <stop offset="95%" stopColor="#0D9488" stopOpacity={0} />
398
+ </linearGradient>
399
+ <linearGradient id="gradF1" x1="0" y1="0" x2="0" y2="1">
400
+ <stop offset="5%" stopColor="#F59E0B" stopOpacity={0.3} />
401
+ <stop offset="95%" stopColor="#F59E0B" stopOpacity={0} />
402
+ </linearGradient>
403
+ </defs>
404
+ <CartesianGrid strokeDasharray="3 3" stroke="#1E293B" />
405
+ <XAxis
406
+ dataKey="round"
407
+ tick={{ fontSize: 10, fill: '#64748B' }}
408
+ axisLine={{ stroke: '#334155' }}
409
+ tickLine={false}
410
+ />
411
+ <YAxis
412
+ domain={[0.5, 1]}
413
+ tick={{ fontSize: 10, fill: '#64748B' }}
414
+ axisLine={{ stroke: '#334155' }}
415
+ tickLine={false}
416
+ tickFormatter={(v: number) => `${(v * 100).toFixed(0)}%`}
417
+ />
418
+ <Tooltip content={<CustomTooltip />} />
419
+ <Legend
420
+ wrapperStyle={{ fontSize: '11px', paddingTop: '8px' }}
421
+ />
422
+ <Area
423
+ type="monotone"
424
+ dataKey="accuracy"
425
+ name="Accuracy"
426
+ stroke="#0D9488"
427
+ strokeWidth={2}
428
+ fill="url(#gradAccuracy)"
429
+ dot={false}
430
+ activeDot={{ r: 4, strokeWidth: 0, fill: '#0D9488' }}
431
+ />
432
+ <Area
433
+ type="monotone"
434
+ dataKey="f1"
435
+ name="F1 Score"
436
+ stroke="#F59E0B"
437
+ strokeWidth={2}
438
+ fill="url(#gradF1)"
439
+ dot={false}
440
+ activeDot={{ r: 4, strokeWidth: 0, fill: '#F59E0B' }}
441
+ />
442
+ </AreaChart>
443
+ </ResponsiveContainer>
444
+ </div>
445
+
446
+ {/* Loss over rounds */}
447
+ <div className="glass-card p-6">
448
+ <h3 className="text-sm font-semibold text-gray-200 mb-4 flex items-center gap-2">
449
+ <TrendingUp className="w-4 h-4 text-alert" />
450
+ Training Loss
451
+ </h3>
452
+ <ResponsiveContainer width="100%" height={280}>
453
+ <AreaChart data={roundData}>
454
+ <defs>
455
+ <linearGradient id="gradLoss" x1="0" y1="0" x2="0" y2="1">
456
+ <stop offset="5%" stopColor="#EF4444" stopOpacity={0.3} />
457
+ <stop offset="95%" stopColor="#EF4444" stopOpacity={0} />
458
+ </linearGradient>
459
+ </defs>
460
+ <CartesianGrid strokeDasharray="3 3" stroke="#1E293B" />
461
+ <XAxis
462
+ dataKey="round"
463
+ tick={{ fontSize: 10, fill: '#64748B' }}
464
+ axisLine={{ stroke: '#334155' }}
465
+ tickLine={false}
466
+ />
467
+ <YAxis
468
+ tick={{ fontSize: 10, fill: '#64748B' }}
469
+ axisLine={{ stroke: '#334155' }}
470
+ tickLine={false}
471
+ tickFormatter={(v: number) => v.toFixed(2)}
472
+ />
473
+ <Tooltip content={<CustomTooltip />} />
474
+ <Area
475
+ type="monotone"
476
+ dataKey="loss"
477
+ name="Loss"
478
+ stroke="#EF4444"
479
+ strokeWidth={2}
480
+ fill="url(#gradLoss)"
481
+ dot={false}
482
+ activeDot={{ r: 4, strokeWidth: 0, fill: '#EF4444' }}
483
+ />
484
+ </AreaChart>
485
+ </ResponsiveContainer>
486
+ </div>
487
+ </div>
488
+
489
+ {/* Device contributions */}
490
+ <div className="glass-card p-6">
491
+ <h3 className="text-sm font-semibold text-gray-200 mb-4 flex items-center gap-2">
492
+ <Users className="w-4 h-4 text-brand-teal" />
493
+ Per-Device Sample Contributions
494
+ </h3>
495
+ <div className="grid grid-cols-1 gap-6 xl:grid-cols-2">
496
+ <ResponsiveContainer width="100%" height={200}>
497
+ <BarChart data={deviceData} layout="vertical">
498
+ <CartesianGrid strokeDasharray="3 3" stroke="#1E293B" horizontal={false} />
499
+ <XAxis
500
+ type="number"
501
+ tick={{ fontSize: 10, fill: '#64748B' }}
502
+ axisLine={{ stroke: '#334155' }}
503
+ tickLine={false}
504
+ />
505
+ <YAxis
506
+ dataKey="name"
507
+ type="category"
508
+ tick={{ fontSize: 11, fill: '#94A3B8' }}
509
+ axisLine={{ stroke: '#334155' }}
510
+ tickLine={false}
511
+ width={70}
512
+ />
513
+ <Tooltip
514
+ contentStyle={{
515
+ backgroundColor: '#1E293B',
516
+ border: '1px solid #334155',
517
+ borderRadius: '8px',
518
+ fontSize: '12px',
519
+ }}
520
+ />
521
+ <Bar
522
+ dataKey="samples"
523
+ name="Samples"
524
+ fill="#0D9488"
525
+ radius={[0, 4, 4, 0]}
526
+ barSize={20}
527
+ />
528
+ </BarChart>
529
+ </ResponsiveContainer>
530
+
531
+ {/* Device details table */}
532
+ <div>
533
+ <table className="w-full">
534
+ <thead>
535
+ <tr className="border-b border-dark-border/30">
536
+ <th className="text-left text-[10px] text-gray-500 font-medium pb-2 uppercase tracking-wider">Device</th>
537
+ <th className="text-right text-[10px] text-gray-500 font-medium pb-2 uppercase tracking-wider">Samples</th>
538
+ <th className="text-right text-[10px] text-gray-500 font-medium pb-2 uppercase tracking-wider">Contribution</th>
539
+ <th className="text-right text-[10px] text-gray-500 font-medium pb-2 uppercase tracking-wider">Status</th>
540
+ </tr>
541
+ </thead>
542
+ <tbody>
543
+ {deviceData.map((device) => (
544
+ <tr key={device.name} className="border-b border-dark-border/10">
545
+ <td className="py-2.5 text-sm text-gray-300">{device.name}</td>
546
+ <td className="py-2.5 text-sm text-gray-400 text-right font-mono tabular-nums">{device.samples}</td>
547
+ <td className="py-2.5 text-sm text-brand-teal text-right font-mono tabular-nums">
548
+ {(device.contribution * 100).toFixed(0)}%
549
+ </td>
550
+ <td className="py-2.5 text-right">
551
+ <span className="inline-flex items-center gap-1 text-[10px] px-2 py-0.5 rounded-full bg-safe/10 text-safe">
552
+ <div className="w-1 h-1 rounded-full bg-safe animate-pulse" />
553
+ Active
554
+ </span>
555
+ </td>
556
+ </tr>
557
+ ))}
558
+ </tbody>
559
+ </table>
560
+ </div>
561
+ </div>
562
+ </div>
563
+
564
+ {/* FL Process explanation */}
565
+ <div className="glass-card p-6">
566
+ <h3 className="text-sm font-semibold text-gray-200 mb-4">How Federated Learning Works</h3>
567
+ <div className="grid grid-cols-1 gap-4 sm:grid-cols-2 xl:grid-cols-4">
568
+ {[
569
+ { step: '1', title: 'Local Training', desc: 'Each device trains on its own call data', icon: <Smartphone className="w-5 h-5" />, color: 'text-brand-teal' },
570
+ { step: '2', title: 'Add DP Noise', desc: 'Differential privacy noise added to gradients', icon: <Lock className="w-5 h-5" />, color: 'text-safe' },
571
+ { step: '3', title: 'Aggregate', desc: 'Hub averages noisy gradients from all devices', icon: <GitBranch className="w-5 h-5" />, color: 'text-warning' },
572
+ { step: '4', title: 'Update Model', desc: 'Improved model pushed back to all devices', icon: <RefreshCw className="w-5 h-5" />, color: 'text-purple-400' },
573
+ ].map((item) => (
574
+ <div key={item.step} className="text-center">
575
+ <div className={`w-12 h-12 rounded-xl bg-dark-bg mx-auto mb-3 flex items-center justify-center ${item.color}`}>
576
+ {item.icon}
577
+ </div>
578
+ <div className="text-[10px] text-gray-600 mb-1">Step {item.step}</div>
579
+ <h4 className="text-xs font-semibold text-gray-300 mb-1">{item.title}</h4>
580
+ <p className="text-[10px] text-gray-500 leading-relaxed">{item.desc}</p>
581
+ </div>
582
+ ))}
583
+ </div>
584
+ </div>
585
+ </div>
586
+ )
587
+ }
demo/frontend/src/components/FraudAlert.tsx ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { ShieldAlert, AlertTriangle, Info, ShieldX } from 'lucide-react'
2
+
3
+ interface FraudAlertProps {
4
+ riskLevel: 'medium' | 'high' | 'critical'
5
+ score: number
6
+ reasons: string[]
7
+ onBlock: () => void
8
+ onDismiss: () => void
9
+ }
10
+
11
+ export default function FraudAlert({
12
+ riskLevel,
13
+ score,
14
+ reasons,
15
+ onBlock,
16
+ onDismiss,
17
+ }: FraudAlertProps) {
18
+ const configs = {
19
+ critical: {
20
+ bg: 'bg-gradient-to-b from-alert/95 to-alert-dark/95',
21
+ border: 'border-alert-light',
22
+ icon: <ShieldX className="w-10 h-10 text-white drop-shadow-lg" />,
23
+ title: 'FRAUD DETECTED',
24
+ subtitle: 'This call exhibits strong fraud indicators',
25
+ containerClass: 'alert-overlay alert-overlay-critical',
26
+ badgeClass: 'bg-white/20 text-white',
27
+ },
28
+ high: {
29
+ bg: 'bg-gradient-to-b from-warning/95 to-amber-700/95',
30
+ border: 'border-warning-light',
31
+ icon: <AlertTriangle className="w-10 h-10 text-white drop-shadow-lg" />,
32
+ title: 'HIGH RISK CALL',
33
+ subtitle: 'Suspicious patterns detected',
34
+ containerClass: 'alert-overlay',
35
+ badgeClass: 'bg-white/20 text-white',
36
+ },
37
+ medium: {
38
+ bg: 'bg-gradient-to-b from-dark-card/98 to-dark-surface/98',
39
+ border: 'border-warning/50',
40
+ icon: <Info className="w-6 h-6 text-warning" />,
41
+ title: 'Caution',
42
+ subtitle: 'Some suspicious indicators present',
43
+ containerClass: 'alert-banner',
44
+ badgeClass: 'bg-warning/20 text-warning',
45
+ },
46
+ }
47
+
48
+ const config = configs[riskLevel]
49
+ const percentage = Math.round(score * 100)
50
+
51
+ if (riskLevel === 'medium') {
52
+ return (
53
+ <div className={`mx-3 mt-2 ${config.containerClass}`}>
54
+ <div className={`${config.bg} alert-banner-card backdrop-blur-xl rounded-xl border ${config.border} p-3 shadow-[0_18px_45px_rgba(15,23,42,0.4)]`}>
55
+ <div className="flex items-center gap-3">
56
+ {config.icon}
57
+ <div className="flex-1">
58
+ <p className="text-xs font-semibold text-warning">{config.title}</p>
59
+ <p className="text-[10px] text-gray-400">{config.subtitle}</p>
60
+ </div>
61
+ <div className={`px-2 py-0.5 rounded-full ${config.badgeClass} text-[10px] font-bold`}>
62
+ {percentage}%
63
+ </div>
64
+ </div>
65
+ {reasons.length > 0 && (
66
+ <div className="mt-2 flex flex-wrap gap-1">
67
+ {reasons.slice(0, 3).map((reason, i) => (
68
+ <span key={i} className="text-[9px] px-1.5 py-0.5 bg-warning/10 text-warning/80 rounded-full">
69
+ {reason}
70
+ </span>
71
+ ))}
72
+ </div>
73
+ )}
74
+ </div>
75
+ </div>
76
+ )
77
+ }
78
+
79
+ // Full overlay for high/critical
80
+ return (
81
+ <div className={`w-full h-full ${config.containerClass}`}>
82
+ <div className={`alert-overlay-panel w-full h-full ${config.bg} backdrop-blur-xl border-y ${config.border} flex flex-col items-center justify-center px-6`}>
83
+ {/* Risk icon */}
84
+ <div className="relative mb-4">
85
+ <div className={`w-20 h-20 rounded-full ${
86
+ riskLevel === 'critical' ? 'bg-white/10' : 'bg-white/10'
87
+ } flex items-center justify-center`}>
88
+ {config.icon}
89
+ </div>
90
+ {riskLevel === 'critical' && (
91
+ <div className="absolute inset-0 w-20 h-20 rounded-full border-2 border-white/30 animate-ping" />
92
+ )}
93
+ </div>
94
+
95
+ {/* Title */}
96
+ <h3 className="text-xl font-bold text-white tracking-wide mb-1">
97
+ {config.title}
98
+ </h3>
99
+ <p className="text-xs text-white/70 mb-3 text-center">
100
+ {config.subtitle}
101
+ </p>
102
+
103
+ {/* Confidence score */}
104
+ <div className={`${config.badgeClass} px-4 py-1.5 rounded-full text-sm font-bold mb-4`}>
105
+ {percentage}% Confidence
106
+ </div>
107
+
108
+ {/* Reasons */}
109
+ {reasons.length > 0 && (
110
+ <div className="w-full max-w-[220px] mb-6 space-y-1.5">
111
+ {reasons.slice(0, 4).map((reason, i) => (
112
+ <div key={i} className="flex items-center gap-2 text-[11px] text-white/80">
113
+ <div className="w-1 h-1 rounded-full bg-white/60 flex-shrink-0" />
114
+ <span className="capitalize">{reason}</span>
115
+ </div>
116
+ ))}
117
+ </div>
118
+ )}
119
+
120
+ {/* Action buttons */}
121
+ <div className="w-full max-w-[220px] space-y-2">
122
+ <button
123
+ onClick={onBlock}
124
+ className="w-full py-2.5 rounded-xl bg-white text-alert font-bold text-sm flex items-center justify-center gap-2 hover:bg-gray-100 transition-colors active:scale-[0.98]"
125
+ >
126
+ <ShieldAlert className="w-4 h-4" />
127
+ Block Caller
128
+ </button>
129
+ <button
130
+ onClick={onDismiss}
131
+ className="w-full py-2.5 rounded-xl bg-white/10 text-white/80 font-medium text-sm hover:bg-white/20 transition-colors active:scale-[0.98]"
132
+ >
133
+ Dismiss
134
+ </button>
135
+ </div>
136
+
137
+ {/* SentinelEdge branding */}
138
+ <p className="text-[8px] text-white/30 mt-4 tracking-widest uppercase">
139
+ Protected by SentinelEdge
140
+ </p>
141
+ </div>
142
+ </div>
143
+ )
144
+ }
demo/frontend/src/components/PhoneSimulator.tsx ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { ReactNode } from 'react'
2
+
3
+ interface PhoneSimulatorProps {
4
+ children: ReactNode
5
+ }
6
+
7
+ export default function PhoneSimulator({ children }: PhoneSimulatorProps) {
8
+ return (
9
+ <div className="phone-frame">
10
+ <div className="relative w-[290px] h-[600px] rounded-[46px] bg-[#111215] p-[4px] shadow-2xl shadow-black/60">
11
+ <div className="pointer-events-none absolute inset-[1px] rounded-[45px] border border-white/10" />
12
+ <div className="pointer-events-none absolute inset-x-[18px] top-[10px] h-[22px] rounded-full bg-white/5 blur-md" />
13
+
14
+ <div className="relative h-full w-full overflow-hidden rounded-[42px] bg-gradient-to-b from-[#2a2b31] via-[#111216] to-[#050608] p-[7px]">
15
+ <div className="absolute -left-[3px] top-[116px] h-[34px] w-[3px] rounded-l-sm bg-zinc-500/70" />
16
+ <div className="absolute -left-[3px] top-[172px] h-[58px] w-[3px] rounded-l-sm bg-zinc-500/70" />
17
+ <div className="absolute -left-[3px] top-[240px] h-[58px] w-[3px] rounded-l-sm bg-zinc-500/70" />
18
+ <div className="absolute -right-[3px] top-[182px] h-[78px] w-[3px] rounded-r-sm bg-zinc-500/70" />
19
+
20
+ <div className="relative h-full w-full overflow-hidden rounded-[35px] border border-black/70 bg-dark-bg shadow-[inset_0_0_0_1px_rgba(255,255,255,0.03)]">
21
+ <div className="pointer-events-none absolute inset-0 rounded-[35px] shadow-[inset_0_1px_0_rgba(255,255,255,0.06),inset_0_-18px_40px_rgba(0,0,0,0.32)]" />
22
+
23
+ <div className="absolute left-1/2 top-[7px] z-30 h-[34px] w-[124px] -translate-x-1/2 rounded-[20px] bg-black shadow-[0_4px_14px_rgba(0,0,0,0.55)]">
24
+ <div className="absolute left-[18px] top-1/2 h-[10px] w-[10px] -translate-y-1/2 rounded-full bg-zinc-900 ring-1 ring-white/5" />
25
+ <div className="absolute left-[35px] top-1/2 h-[7px] w-[7px] -translate-y-1/2 rounded-full bg-zinc-800 ring-1 ring-white/5" />
26
+ <div className="absolute right-[18px] top-1/2 h-[8px] w-[40px] -translate-y-1/2 rounded-full bg-zinc-950/90" />
27
+ </div>
28
+
29
+ <div className="relative h-full w-full overflow-hidden rounded-[35px]">
30
+ {children}
31
+ </div>
32
+
33
+ <div className="absolute bottom-1.5 left-0 right-0 z-30 flex justify-center">
34
+ <div className="h-[4px] w-[118px] rounded-full bg-white/38" />
35
+ </div>
36
+ </div>
37
+ </div>
38
+ </div>
39
+ </div>
40
+ )
41
+ }
demo/frontend/src/components/PrivacyDemo.tsx ADDED
@@ -0,0 +1,446 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { useState, useEffect, useRef, useCallback } from 'react'
2
+ import { Lock, Smartphone, Server, ArrowRight, Eye, EyeOff, ShieldCheck, RotateCcw, WifiOff, Wifi } from 'lucide-react'
3
+
4
+ interface DisplayEntry {
5
+ text: string
6
+ score: number
7
+ features: Record<string, number>
8
+ gradient: number[]
9
+ sigma: number
10
+ epsilon: number
11
+ }
12
+
13
+ // -------- Fallback example data for offline mode --------
14
+ const FALLBACK_EXAMPLES: DisplayEntry[] = [
15
+ {
16
+ text: "This is Officer James Wilson from the Internal Revenue Service.",
17
+ score: 0.15,
18
+ features: { authority_claim: 0.3, financial_terms: 0.2 } as Record<string, number>,
19
+ gradient: [-0.003421, 0.001872, -0.000543, 0.004219, -0.002103, 0.000891, -0.001567, 0.003245, 0.000123, -0.002876, 0.001432, -0.000765],
20
+ sigma: 0.5,
21
+ epsilon: 1.0,
22
+ },
23
+ {
24
+ text: "There is an outstanding balance of $4,789 that must be resolved immediately.",
25
+ score: 0.42,
26
+ features: { urgency: 0.5, financial_terms: 0.7, specific_amount: 0.6, time_pressure: 0.4 } as Record<string, number>,
27
+ gradient: [0.002156, -0.004312, 0.001098, -0.003567, 0.000432, -0.002789, 0.004101, -0.001234, 0.003456, -0.000876, 0.002345, -0.001678],
28
+ sigma: 0.5,
29
+ epsilon: 1.0,
30
+ },
31
+ {
32
+ text: "If this is not paid today, a warrant will be issued for your arrest.",
33
+ score: 0.68,
34
+ features: { urgency: 0.9, threat_language: 0.8, time_pressure: 0.9, authority_claim: 0.5 } as Record<string, number>,
35
+ gradient: [-0.001234, 0.003678, -0.002901, 0.000456, 0.004512, -0.003123, 0.001789, -0.000345, 0.002567, -0.004089, 0.000912, -0.003456],
36
+ sigma: 0.5,
37
+ epsilon: 1.0,
38
+ },
39
+ {
40
+ text: "You need to purchase Google Play gift cards worth $4,789 and read me the codes.",
41
+ score: 0.85,
42
+ features: { urgency: 0.8, financial_terms: 0.9, gift_card_mention: 1.0, unusual_payment: 1.0 } as Record<string, number>,
43
+ gradient: [0.004567, -0.001234, 0.002890, -0.003456, 0.000789, -0.004123, 0.001567, 0.003012, -0.002345, 0.000678, -0.001890, 0.004234],
44
+ sigma: 0.5,
45
+ epsilon: 1.0,
46
+ },
47
+ {
48
+ text: "This is the only way to avoid criminal prosecution. Do it now.",
49
+ score: 0.92,
50
+ features: { urgency: 1.0, threat_language: 0.95, time_pressure: 1.0, coercion: 0.9 } as Record<string, number>,
51
+ gradient: [-0.002345, 0.004567, -0.000891, 0.003210, -0.001678, 0.002456, -0.004012, 0.000345, 0.001789, -0.003567, 0.002123, -0.000456],
52
+ sigma: 0.5,
53
+ epsilon: 1.0,
54
+ },
55
+ ]
56
+
57
+ interface PrivacyDemoProps {
58
+ sentences: Array<{
59
+ text: string
60
+ score: number
61
+ features: Record<string, number>
62
+ }>
63
+ gradientVectors: number[][]
64
+ isCallActive: boolean
65
+ }
66
+
67
+ export default function PrivacyDemo({ sentences, gradientVectors, isCallActive }: PrivacyDemoProps) {
68
+ const [wsConnected, setWsConnected] = useState(false)
69
+ const [wsEntries, setWsEntries] = useState<DisplayEntry[]>([])
70
+ const [fallbackEntries, setFallbackEntries] = useState<DisplayEntry[]>([])
71
+ const [isReplaying, setIsReplaying] = useState(false)
72
+
73
+ const wsRef = useRef<WebSocket | null>(null)
74
+ const replayTimerRef = useRef<ReturnType<typeof setTimeout> | null>(null)
75
+ const isMountedRef = useRef(true)
76
+
77
+ // Try connecting to the privacy-demo WebSocket
78
+ useEffect(() => {
79
+ isMountedRef.current = true
80
+ let ws: WebSocket | null = null
81
+
82
+ try {
83
+ const wsBase = import.meta.env.DEV
84
+ ? 'ws://localhost:8000'
85
+ : `${window.location.protocol === 'https:' ? 'wss:' : 'ws:'}//${window.location.host}`
86
+ ws = new WebSocket(`${wsBase}/ws/privacy-demo`)
87
+ wsRef.current = ws
88
+
89
+ ws.onopen = () => {
90
+ if (isMountedRef.current) {
91
+ setWsConnected(true)
92
+ }
93
+ }
94
+
95
+ ws.onmessage = (event) => {
96
+ if (!isMountedRef.current) return
97
+ try {
98
+ const data = JSON.parse(event.data)
99
+ const entry: DisplayEntry = {
100
+ text: data.text ?? data.transcript ?? '',
101
+ score: data.fraud_score ?? data.score ?? 0,
102
+ features: data.features ?? {},
103
+ gradient: data.gradient ?? data.noised_gradient ?? [],
104
+ sigma: data.sigma ?? 0.5,
105
+ epsilon: data.epsilon ?? 1.0,
106
+ }
107
+ setWsEntries(prev => [...prev, entry])
108
+ } catch {
109
+ // Ignore malformed messages
110
+ }
111
+ }
112
+
113
+ ws.onerror = () => {
114
+ if (isMountedRef.current) {
115
+ setWsConnected(false)
116
+ }
117
+ }
118
+
119
+ ws.onclose = () => {
120
+ if (isMountedRef.current) {
121
+ setWsConnected(false)
122
+ wsRef.current = null
123
+ }
124
+ }
125
+ } catch {
126
+ setWsConnected(false)
127
+ }
128
+
129
+ return () => {
130
+ isMountedRef.current = false
131
+ if (ws) {
132
+ ws.close(1000, 'Component unmount')
133
+ }
134
+ wsRef.current = null
135
+ }
136
+ }, [])
137
+
138
+ // Determine which data to display
139
+ // Priority: WebSocket data > live call data passed via props > fallback examples
140
+ const useWebSocketData = wsConnected && wsEntries.length > 0
141
+ const usePropData = !useWebSocketData && sentences.length > 0
142
+
143
+ const displayEntries: DisplayEntry[] = useWebSocketData
144
+ ? wsEntries
145
+ : usePropData
146
+ ? sentences.map((s, i) => ({
147
+ text: s.text,
148
+ score: s.score,
149
+ features: s.features,
150
+ gradient: gradientVectors[i] ?? Array.from({ length: 12 }, () => Number(((Math.random() - 0.5) * 0.01).toFixed(6))),
151
+ sigma: 0.5,
152
+ epsilon: 1.0,
153
+ }))
154
+ : fallbackEntries
155
+
156
+ // Replay: cycle through fallback examples one by one
157
+ const startReplay = useCallback(() => {
158
+ if (isReplaying) return
159
+ setIsReplaying(true)
160
+ setFallbackEntries([])
161
+ setWsEntries([])
162
+
163
+ let index = 0
164
+ const playNext = () => {
165
+ if (!isMountedRef.current || index >= FALLBACK_EXAMPLES.length) {
166
+ if (isMountedRef.current) setIsReplaying(false)
167
+ return
168
+ }
169
+ const example = FALLBACK_EXAMPLES[index]
170
+ setFallbackEntries(prev => [...prev, example])
171
+ index++
172
+ replayTimerRef.current = setTimeout(playNext, 1500)
173
+ }
174
+
175
+ replayTimerRef.current = setTimeout(playNext, 500)
176
+ }, [isReplaying])
177
+
178
+ // Cleanup replay timer
179
+ useEffect(() => {
180
+ return () => {
181
+ if (replayTimerRef.current) clearTimeout(replayTimerRef.current)
182
+ }
183
+ }, [])
184
+
185
+ return (
186
+ <div className="space-y-6">
187
+ {/* Intro banner */}
188
+ <div className="glass-card p-5 sm:p-6 glow-teal">
189
+ <div className="flex flex-col gap-4 lg:flex-row lg:items-start">
190
+ <div className="w-12 h-12 rounded-xl bg-brand-teal/10 flex items-center justify-center flex-shrink-0">
191
+ <Lock className="w-6 h-6 text-brand-teal" />
192
+ </div>
193
+ <div className="flex-1">
194
+ <h2 className="text-lg font-bold text-white mb-1">Privacy-First Architecture</h2>
195
+ <p className="text-sm text-gray-400 leading-relaxed max-w-2xl">
196
+ SentinelEdge processes all sensitive data on your device. The hub server only receives
197
+ differentially-private gradient updates -- mathematical noise that cannot be reverse-engineered
198
+ to reconstruct your conversations or personal information.
199
+ </p>
200
+ </div>
201
+ <div className="flex flex-wrap items-center gap-3 lg:flex-shrink-0">
202
+ {/* Connection status */}
203
+ <div className="flex items-center gap-1.5 text-xs">
204
+ {wsConnected ? (
205
+ <>
206
+ <Wifi className="w-3.5 h-3.5 text-safe" />
207
+ <span className="text-safe">Live</span>
208
+ </>
209
+ ) : (
210
+ <>
211
+ <WifiOff className="w-3.5 h-3.5 text-gray-500" />
212
+ <span className="text-gray-500">Offline</span>
213
+ </>
214
+ )}
215
+ </div>
216
+ {/* Replay button */}
217
+ <button
218
+ onClick={startReplay}
219
+ disabled={isReplaying || isCallActive}
220
+ className={`
221
+ control-button px-3 py-1.5 text-xs font-medium
222
+ ${isReplaying || isCallActive
223
+ ? ''
224
+ : 'border-brand-teal/20 bg-brand-teal/10 text-brand-teal hover:bg-brand-teal/20'
225
+ }
226
+ `}
227
+ >
228
+ <RotateCcw className={`w-3.5 h-3.5 ${isReplaying ? 'animate-spin' : ''}`} />
229
+ {isReplaying ? 'Replaying...' : 'Replay Demo'}
230
+ </button>
231
+ </div>
232
+ </div>
233
+ </div>
234
+
235
+ {/* Side by side panels with noise wall */}
236
+ <div className="grid grid-cols-1 gap-4 xl:grid-cols-[1fr_auto_1fr] xl:gap-0">
237
+ {/* Device panel */}
238
+ <div className="glass-card border-brand-teal/30 overflow-hidden">
239
+ {/* Header */}
240
+ <div className="px-6 py-4 bg-brand-teal/5 border-b border-brand-teal/20 flex items-center gap-3">
241
+ <div className="w-8 h-8 rounded-lg bg-brand-teal/10 flex items-center justify-center">
242
+ <Smartphone className="w-4 h-4 text-brand-teal" />
243
+ </div>
244
+ <div>
245
+ <h3 className="text-sm font-bold text-brand-teal">Your Device</h3>
246
+ <p className="text-[10px] text-gray-500 uppercase tracking-wider">Everything stays here</p>
247
+ </div>
248
+ <Eye className="w-4 h-4 text-brand-teal ml-auto" />
249
+ </div>
250
+
251
+ {/* Content */}
252
+ <div className="p-6 space-y-4 max-h-[500px] overflow-y-auto">
253
+ {displayEntries.length === 0 && !isCallActive ? (
254
+ <div className="text-center py-12">
255
+ <Smartphone className="w-10 h-10 text-gray-700 mx-auto mb-3" />
256
+ <p className="text-sm text-gray-600">Start a call to see device-side data</p>
257
+ <p className="text-xs text-gray-700 mt-1">Full transcript and analysis visible here</p>
258
+ <p className="text-xs text-gray-700 mt-2">
259
+ Or press <span className="text-brand-teal font-medium">Replay Demo</span> above
260
+ </p>
261
+ </div>
262
+ ) : (
263
+ displayEntries.map((entry, i) => (
264
+ <div key={i} className="animate-fade-in space-y-2">
265
+ {/* Transcript text */}
266
+ <div className="bg-dark-bg/50 rounded-lg p-3 border border-brand-teal/10">
267
+ <p className="text-xs text-gray-500 mb-1 font-medium">Transcript #{i + 1}</p>
268
+ <p className="text-sm text-gray-200 font-mono leading-relaxed">
269
+ {entry.text}
270
+ </p>
271
+ </div>
272
+
273
+ {/* Features + score */}
274
+ <div className="flex gap-2">
275
+ <div className="flex-1 bg-dark-bg/30 rounded-lg p-2">
276
+ <p className="text-[10px] text-gray-600 mb-1">Fraud Score</p>
277
+ <p className={`text-lg font-bold tabular-nums ${
278
+ entry.score < 0.3 ? 'text-safe' :
279
+ entry.score < 0.5 ? 'text-warning' :
280
+ entry.score < 0.75 ? 'text-orange-400' :
281
+ 'text-alert'
282
+ }`}>
283
+ {(entry.score * 100).toFixed(1)}%
284
+ </p>
285
+ </div>
286
+ <div className="flex-1 bg-dark-bg/30 rounded-lg p-2">
287
+ <p className="text-[10px] text-gray-600 mb-1">Features</p>
288
+ <div className="flex flex-wrap gap-1">
289
+ {Object.entries(entry.features)
290
+ .filter(([, v]) => v > 0.3)
291
+ .slice(0, 3)
292
+ .map(([k]) => (
293
+ <span key={k} className="text-[9px] px-1.5 py-0.5 bg-brand-teal/10 text-brand-teal rounded-full">
294
+ {k.replace(/_/g, ' ')}
295
+ </span>
296
+ ))
297
+ }
298
+ </div>
299
+ </div>
300
+ </div>
301
+
302
+ {i < displayEntries.length - 1 && (
303
+ <div className="border-b border-dark-border/20" />
304
+ )}
305
+ </div>
306
+ ))
307
+ )}
308
+ </div>
309
+ </div>
310
+
311
+ {/* Visual noise wall - dashed vertical privacy boundary */}
312
+ <div className="flex flex-col items-center justify-center px-4 relative">
313
+ {/* Top label */}
314
+ <div className="absolute top-4 bg-dark-bg/80 px-2 py-1 rounded-md z-10">
315
+ <p className="text-[9px] text-gray-500 uppercase tracking-widest font-bold">Privacy</p>
316
+ </div>
317
+
318
+ {/* Dashed line */}
319
+ <div className="h-full w-px relative">
320
+ <div
321
+ className="absolute inset-0 w-px"
322
+ style={{
323
+ backgroundImage: 'repeating-linear-gradient(to bottom, #0D9488 0px, #0D9488 6px, transparent 6px, transparent 14px)',
324
+ opacity: 0.5,
325
+ }}
326
+ />
327
+ {/* Noise glow effect */}
328
+ <div
329
+ className="absolute inset-0 w-6 -left-[11px]"
330
+ style={{
331
+ background: 'linear-gradient(to right, transparent, rgba(13, 148, 136, 0.08), transparent)',
332
+ }}
333
+ />
334
+ </div>
335
+
336
+ {/* Lock icon in the middle */}
337
+ <div className="absolute top-1/2 -translate-y-1/2 w-8 h-8 rounded-full bg-dark-bg border border-brand-teal/30 flex items-center justify-center z-10">
338
+ <Lock className="w-3.5 h-3.5 text-brand-teal" />
339
+ </div>
340
+
341
+ {/* Bottom label */}
342
+ <div className="absolute bottom-4 bg-dark-bg/80 px-2 py-1 rounded-md z-10">
343
+ <p className="text-[9px] text-gray-500 uppercase tracking-widest font-bold">Barrier</p>
344
+ </div>
345
+ </div>
346
+
347
+ {/* Hub server panel */}
348
+ <div className="glass-card border-gray-700/30 overflow-hidden">
349
+ {/* Header */}
350
+ <div className="px-6 py-4 bg-gray-800/30 border-b border-gray-700/20 flex items-center gap-3">
351
+ <div className="w-8 h-8 rounded-lg bg-gray-700/30 flex items-center justify-center">
352
+ <Server className="w-4 h-4 text-gray-400" />
353
+ </div>
354
+ <div>
355
+ <h3 className="text-sm font-bold text-gray-400">Hub Server</h3>
356
+ <p className="text-[10px] text-gray-600 uppercase tracking-wider">This is all we send</p>
357
+ </div>
358
+ <EyeOff className="w-4 h-4 text-gray-600 ml-auto" />
359
+ </div>
360
+
361
+ {/* Content */}
362
+ <div className="p-6 space-y-4 max-h-[500px] overflow-y-auto">
363
+ {displayEntries.length === 0 && !isCallActive ? (
364
+ <div className="text-center py-12">
365
+ <Server className="w-10 h-10 text-gray-700 mx-auto mb-3" />
366
+ <p className="text-sm text-gray-600">No data transmitted yet</p>
367
+ <p className="text-xs text-gray-700 mt-1">Only noisy gradients are sent</p>
368
+ </div>
369
+ ) : (
370
+ displayEntries.map((entry, i) => (
371
+ <div key={i} className="animate-fade-in">
372
+ <div className="bg-dark-bg/50 rounded-lg p-3 border border-gray-700/20">
373
+ <div className="flex items-center justify-between mb-2">
374
+ <p className="text-xs text-gray-600 font-medium">
375
+ Gradient Update #{i + 1}
376
+ </p>
377
+ <span className="text-[9px] px-1.5 py-0.5 bg-gray-700/30 text-gray-500 rounded-full">
378
+ DP-SGD
379
+ </span>
380
+ </div>
381
+ <div className="font-mono text-[10px] text-gray-600 leading-relaxed break-all">
382
+ [{entry.gradient.map((v, j) => (
383
+ <span key={j}>
384
+ <span className={
385
+ Math.abs(v) > 0.005 ? 'text-gray-400' : 'text-gray-700'
386
+ }>
387
+ {v.toFixed(6)}
388
+ </span>
389
+ {j < entry.gradient.length - 1 ? ', ' : ''}
390
+ </span>
391
+ ))}]
392
+ </div>
393
+ <div className="mt-2 flex items-center gap-3">
394
+ <div className="flex items-center gap-1.5">
395
+ <Lock className="w-2.5 h-2.5 text-gray-600" />
396
+ <p className="text-[9px] text-gray-600">
397
+ sigma={entry.sigma.toFixed(1)}
398
+ </p>
399
+ </div>
400
+ <div className="flex items-center gap-1.5">
401
+ <ShieldCheck className="w-2.5 h-2.5 text-gray-600" />
402
+ <p className="text-[9px] text-gray-600">
403
+ epsilon={entry.epsilon.toFixed(1)}
404
+ </p>
405
+ </div>
406
+ </div>
407
+ </div>
408
+ </div>
409
+ ))
410
+ )}
411
+ </div>
412
+ </div>
413
+ </div>
414
+
415
+ {/* Bottom explanation */}
416
+ <div className="glass-card p-5 sm:p-6">
417
+ <div className="flex flex-col gap-5 lg:flex-row lg:items-center lg:gap-6">
418
+ <div className="flex items-center gap-3 overflow-x-auto pb-1 lg:flex-shrink-0">
419
+ <div className="w-10 h-10 rounded-full bg-brand-teal/10 flex items-center justify-center">
420
+ <Smartphone className="w-5 h-5 text-brand-teal" />
421
+ </div>
422
+ <ArrowRight className="w-5 h-5 text-gray-600" />
423
+ <div className="w-10 h-10 rounded-full bg-gray-700/30 flex items-center justify-center">
424
+ <Lock className="w-5 h-5 text-gray-400" />
425
+ </div>
426
+ <ArrowRight className="w-5 h-5 text-gray-600" />
427
+ <div className="w-10 h-10 rounded-full bg-gray-700/30 flex items-center justify-center">
428
+ <Server className="w-5 h-5 text-gray-400" />
429
+ </div>
430
+ </div>
431
+ <div className="flex-1 space-y-2">
432
+ <div className="flex items-center gap-2">
433
+ <ShieldCheck className="w-4 h-4 text-safe" />
434
+ <span className="text-sm font-semibold text-gray-200">Zero-Knowledge Fraud Detection</span>
435
+ </div>
436
+ <p className="text-xs text-gray-500 leading-relaxed">
437
+ Raw audio and transcripts never leave your device. Only differentially-private model gradients
438
+ are transmitted. Even if the hub server is compromised, your conversation data remains private.
439
+ Mathematical guarantees ensure no individual call can be reconstructed from gradient updates.
440
+ </p>
441
+ </div>
442
+ </div>
443
+ </div>
444
+ </div>
445
+ )
446
+ }
demo/frontend/src/components/ScoreGauge.tsx ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { useEffect, useMemo, useRef, useState, type CSSProperties } from 'react'
2
+ import { ShieldCheck, ShieldAlert, ShieldX } from 'lucide-react'
3
+
4
+ interface ScoreGaugeProps {
5
+ score: number
6
+ label?: string
7
+ }
8
+
9
+ interface ParticleBurst {
10
+ id: number
11
+ color: string
12
+ }
13
+
14
+ const THRESHOLDS = [0.3, 0.5, 0.75]
15
+ const GRADIENT_STOPS = [
16
+ { score: 0, color: '#10B981' },
17
+ { score: 0.3, color: '#EAB308' },
18
+ { score: 0.5, color: '#F59E0B' },
19
+ { score: 0.75, color: '#F97316' },
20
+ { score: 1, color: '#EF4444' },
21
+ ]
22
+
23
+ function hexToRgb(hex: string) {
24
+ const normalized = hex.replace('#', '')
25
+ const value = Number.parseInt(normalized, 16)
26
+ return {
27
+ r: (value >> 16) & 255,
28
+ g: (value >> 8) & 255,
29
+ b: value & 255,
30
+ }
31
+ }
32
+
33
+ function interpolateColor(score: number): string {
34
+ const safeScore = Math.max(0, Math.min(1, score))
35
+
36
+ for (let index = 1; index < GRADIENT_STOPS.length; index += 1) {
37
+ const start = GRADIENT_STOPS[index - 1]
38
+ const end = GRADIENT_STOPS[index]
39
+
40
+ if (safeScore <= end.score) {
41
+ const range = end.score - start.score || 1
42
+ const progress = (safeScore - start.score) / range
43
+ const startRgb = hexToRgb(start.color)
44
+ const endRgb = hexToRgb(end.color)
45
+
46
+ const r = Math.round(startRgb.r + (endRgb.r - startRgb.r) * progress)
47
+ const g = Math.round(startRgb.g + (endRgb.g - startRgb.g) * progress)
48
+ const b = Math.round(startRgb.b + (endRgb.b - startRgb.b) * progress)
49
+
50
+ return `rgb(${r}, ${g}, ${b})`
51
+ }
52
+ }
53
+
54
+ return GRADIENT_STOPS[GRADIENT_STOPS.length - 1].color
55
+ }
56
+
57
+ function getLevelText(score: number): string {
58
+ if (score < 0.15) return 'Safe'
59
+ if (score < 0.3) return 'Low Risk'
60
+ if (score < 0.5) return 'Moderate'
61
+ if (score < 0.75) return 'High Risk'
62
+ return 'Critical'
63
+ }
64
+
65
+ function getLevelIcon(score: number) {
66
+ if (score < 0.3) return <ShieldCheck className="w-5 h-5" />
67
+ if (score < 0.75) return <ShieldAlert className="w-5 h-5" />
68
+ return <ShieldX className="w-5 h-5" />
69
+ }
70
+
71
+ export default function ScoreGauge({ score, label = 'Fraud Score' }: ScoreGaugeProps) {
72
+ const clampedScore = Math.max(0, Math.min(1, score))
73
+ const previousScoreRef = useRef(clampedScore)
74
+ const burstIdRef = useRef(0)
75
+ const [bursts, setBursts] = useState<ParticleBurst[]>([])
76
+ const [isThresholdPulseActive, setIsThresholdPulseActive] = useState(false)
77
+
78
+ const percentage = Math.round(clampedScore * 100)
79
+ const color = useMemo(() => interpolateColor(clampedScore), [clampedScore])
80
+
81
+ const size = 200
82
+ const strokeWidth = 12
83
+ const radius = (size - strokeWidth) / 2
84
+ const circumference = Math.PI * radius
85
+ const offset = circumference - clampedScore * circumference
86
+ const centerX = size / 2
87
+ const centerY = size / 2
88
+
89
+ useEffect(() => {
90
+ const previousScore = previousScoreRef.current
91
+ const crossedThreshold = THRESHOLDS.some(
92
+ threshold =>
93
+ (previousScore < threshold && clampedScore >= threshold) ||
94
+ (previousScore > threshold && clampedScore <= threshold)
95
+ )
96
+
97
+ previousScoreRef.current = clampedScore
98
+
99
+ if (!crossedThreshold) return
100
+
101
+ const burst = {
102
+ id: burstIdRef.current,
103
+ color,
104
+ }
105
+ burstIdRef.current += 1
106
+
107
+ setBursts(current => [...current, burst])
108
+ setIsThresholdPulseActive(true)
109
+
110
+ const pulseTimeout = window.setTimeout(() => {
111
+ setIsThresholdPulseActive(false)
112
+ }, 700)
113
+
114
+ const cleanupTimeout = window.setTimeout(() => {
115
+ setBursts(current => current.filter(item => item.id !== burst.id))
116
+ }, 1300)
117
+
118
+ return () => {
119
+ window.clearTimeout(pulseTimeout)
120
+ window.clearTimeout(cleanupTimeout)
121
+ }
122
+ }, [clampedScore, color])
123
+
124
+ return (
125
+ <div className="flex flex-col items-center">
126
+ <div
127
+ className={`score-gauge-shell relative ${isThresholdPulseActive ? 'score-gauge-threshold-pulse' : ''}`}
128
+ style={
129
+ {
130
+ width: size,
131
+ height: size,
132
+ '--gauge-color': color,
133
+ } as CSSProperties
134
+ }
135
+ >
136
+ <div className="score-gauge-glow absolute inset-[24px] rounded-full" />
137
+
138
+ {bursts.map(burst => (
139
+ <div
140
+ key={burst.id}
141
+ className="score-gauge-burst pointer-events-none absolute inset-0"
142
+ style={{ '--burst-color': burst.color } as CSSProperties}
143
+ aria-hidden="true"
144
+ >
145
+ {Array.from({ length: 10 }).map((_, index) => {
146
+ const angle = (360 / 10) * index
147
+ const distance = 36 + (index % 3) * 10
148
+ const delay = `${index * 35}ms`
149
+ return (
150
+ <span
151
+ key={index}
152
+ className="score-gauge-particle absolute left-1/2 top-1/2"
153
+ style={
154
+ {
155
+ '--particle-angle': `${angle}deg`,
156
+ '--particle-distance': `${distance}px`,
157
+ animationDelay: delay,
158
+ } as CSSProperties
159
+ }
160
+ />
161
+ )
162
+ })}
163
+ </div>
164
+ ))}
165
+
166
+ <svg
167
+ width={size}
168
+ height={size}
169
+ viewBox={`0 0 ${size} ${size}`}
170
+ className="score-gauge-svg transform -rotate-90"
171
+ >
172
+ <defs>
173
+ <linearGradient id="scoreGaugeTrack" x1="0%" y1="0%" x2="100%" y2="0%">
174
+ <stop offset="0%" stopColor="#0f172a" />
175
+ <stop offset="100%" stopColor="#1e293b" />
176
+ </linearGradient>
177
+ </defs>
178
+
179
+ <circle
180
+ cx={centerX}
181
+ cy={centerY}
182
+ r={radius}
183
+ fill="none"
184
+ stroke="url(#scoreGaugeTrack)"
185
+ strokeWidth={strokeWidth}
186
+ strokeLinecap="round"
187
+ strokeDasharray={`${circumference} ${circumference * 2}`}
188
+ transform={`rotate(90, ${centerX}, ${centerY})`}
189
+ />
190
+
191
+ <circle
192
+ cx={centerX}
193
+ cy={centerY}
194
+ r={radius}
195
+ fill="none"
196
+ stroke={color}
197
+ strokeWidth={strokeWidth}
198
+ strokeLinecap="round"
199
+ strokeDasharray={`${circumference} ${circumference * 2}`}
200
+ strokeDashoffset={offset}
201
+ transform={`rotate(90, ${centerX}, ${centerY})`}
202
+ className="gauge-arc"
203
+ />
204
+
205
+ {[0, 0.25, 0.5, 0.75, 1].map(tick => {
206
+ const angle = (tick * 180 - 90) * (Math.PI / 180)
207
+ const outerR = radius + strokeWidth / 2 + 4
208
+ const innerR = radius + strokeWidth / 2 + 10
209
+ const x1 = centerX + outerR * Math.cos(angle)
210
+ const y1 = centerY + outerR * Math.sin(angle)
211
+ const x2 = centerX + innerR * Math.cos(angle)
212
+ const y2 = centerY + innerR * Math.sin(angle)
213
+ return (
214
+ <line
215
+ key={tick}
216
+ x1={x1}
217
+ y1={y1}
218
+ x2={x2}
219
+ y2={y2}
220
+ stroke="#475569"
221
+ strokeWidth={1.5}
222
+ strokeLinecap="round"
223
+ transform={`rotate(90, ${centerX}, ${centerY})`}
224
+ />
225
+ )
226
+ })}
227
+ </svg>
228
+
229
+ <div className="absolute inset-0 flex flex-col items-center justify-center">
230
+ <div className="mb-1 score-gauge-center" style={{ color }}>
231
+ {getLevelIcon(clampedScore)}
232
+ </div>
233
+ <span className="score-gauge-center text-4xl font-bold tabular-nums tracking-tight" style={{ color }}>
234
+ {percentage}
235
+ <span className="text-lg font-normal text-gray-500">%</span>
236
+ </span>
237
+ <span className="score-gauge-center mt-0.5 text-xs font-semibold uppercase tracking-wide" style={{ color }}>
238
+ {getLevelText(clampedScore)}
239
+ </span>
240
+ </div>
241
+ </div>
242
+
243
+ <p className="text-xs text-gray-500 font-medium mt-2">{label}</p>
244
+
245
+ <div className="mt-3 flex items-center gap-3">
246
+ {[
247
+ { label: 'Safe', color: '#10B981' },
248
+ { label: 'Warning', color: '#EAB308' },
249
+ { label: 'High', color: '#F97316' },
250
+ { label: 'Critical', color: '#EF4444' },
251
+ ].map(item => (
252
+ <div key={item.label} className="flex items-center gap-1">
253
+ <div className="h-2 w-2 rounded-full" style={{ backgroundColor: item.color }} />
254
+ <span className="text-[9px] text-gray-500">{item.label}</span>
255
+ </div>
256
+ ))}
257
+ </div>
258
+ </div>
259
+ )
260
+ }
demo/frontend/src/components/TranscriptPanel.tsx ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { useEffect, useMemo, useRef, useState } from 'react'
2
+ import { MessageSquare, Radio } from 'lucide-react'
3
+
4
+ interface TranscriptPanelProps {
5
+ sentences: Array<{
6
+ text: string
7
+ score: number
8
+ index: number
9
+ }>
10
+ isStreaming: boolean
11
+ }
12
+
13
+ const KEYWORD_STYLES: Array<{ pattern: RegExp; className: string }> = [
14
+ { pattern: /\b(immediately|urgent|hurry|deadline|today|now)\b/gi, className: 'transcript-keyword transcript-keyword-urgent' },
15
+ { pattern: /\b(gift cards?|google play|codes?)\b/gi, className: 'transcript-keyword transcript-keyword-payment' },
16
+ { pattern: /\b(password|pin|social security|verification code|bank details?|ssn)\b/gi, className: 'transcript-keyword transcript-keyword-credential' },
17
+ { pattern: /\b(arrest|warrant|prosecution|criminal)\b/gi, className: 'transcript-keyword transcript-keyword-threat' },
18
+ { pattern: /\b(remote access|download|website|click|login)\b/gi, className: 'transcript-keyword transcript-keyword-action' },
19
+ ]
20
+
21
+ function getScoreColor(score: number): string {
22
+ if (score < 0.3) return 'border-safe'
23
+ if (score < 0.5) return 'border-warning'
24
+ if (score < 0.75) return 'border-orange-500'
25
+ return 'border-alert'
26
+ }
27
+
28
+ function getScoreBg(score: number): string {
29
+ if (score < 0.3) return 'bg-safe/5'
30
+ if (score < 0.5) return 'bg-warning/5'
31
+ if (score < 0.75) return 'bg-orange-500/5'
32
+ return 'bg-alert/5'
33
+ }
34
+
35
+ function getScoreBadge(score: number): { bg: string; text: string; pulse: boolean } {
36
+ if (score < 0.3) return { bg: 'bg-safe/10 text-safe', text: 'Safe', pulse: false }
37
+ if (score < 0.5) return { bg: 'bg-warning/10 text-warning', text: 'Caution', pulse: false }
38
+ if (score < 0.75) return { bg: 'bg-orange-500/10 text-orange-400', text: 'Suspicious', pulse: true }
39
+ return { bg: 'bg-alert/10 text-alert', text: 'Danger', pulse: true }
40
+ }
41
+
42
+ function highlightKeywords(text: string) {
43
+ if (!text) return null
44
+
45
+ const matches: Array<{ start: number; end: number; className: string }> = []
46
+
47
+ KEYWORD_STYLES.forEach(({ pattern, className }) => {
48
+ const regex = new RegExp(pattern.source, pattern.flags)
49
+ let match = regex.exec(text)
50
+ while (match) {
51
+ matches.push({
52
+ start: match.index,
53
+ end: match.index + match[0].length,
54
+ className,
55
+ })
56
+ match = regex.exec(text)
57
+ }
58
+ })
59
+
60
+ matches.sort((a, b) => a.start - b.start || b.end - a.end)
61
+
62
+ const segments: Array<{ text: string; className?: string }> = []
63
+ let cursor = 0
64
+
65
+ matches.forEach(match => {
66
+ if (match.start < cursor) return
67
+ if (match.start > cursor) {
68
+ segments.push({ text: text.slice(cursor, match.start) })
69
+ }
70
+ segments.push({
71
+ text: text.slice(match.start, match.end),
72
+ className: match.className,
73
+ })
74
+ cursor = match.end
75
+ })
76
+
77
+ if (cursor < text.length) {
78
+ segments.push({ text: text.slice(cursor) })
79
+ }
80
+
81
+ return segments.map((segment, index) => (
82
+ <span key={`${segment.text}-${index}`} className={segment.className}>
83
+ {segment.text}
84
+ </span>
85
+ ))
86
+ }
87
+
88
+ interface TypewriterSentenceProps {
89
+ text: string
90
+ isLatest: boolean
91
+ }
92
+
93
+ function TypewriterSentence({ text, isLatest }: TypewriterSentenceProps) {
94
+ const [visibleLength, setVisibleLength] = useState(isLatest ? 0 : text.length)
95
+
96
+ useEffect(() => {
97
+ if (!isLatest) {
98
+ setVisibleLength(text.length)
99
+ return
100
+ }
101
+
102
+ setVisibleLength(0)
103
+ const stepMs = Math.max(16, Math.min(34, 520 / Math.max(text.length, 1)))
104
+ const interval = window.setInterval(() => {
105
+ setVisibleLength(current => {
106
+ if (current >= text.length) {
107
+ window.clearInterval(interval)
108
+ return current
109
+ }
110
+ return current + 1
111
+ })
112
+ }, stepMs)
113
+
114
+ return () => window.clearInterval(interval)
115
+ }, [isLatest, text])
116
+
117
+ const visibleText = useMemo(() => text.slice(0, visibleLength), [text, visibleLength])
118
+ const isTyping = isLatest && visibleLength < text.length
119
+
120
+ return (
121
+ <span className={isTyping ? 'typewriter-cursor' : undefined}>
122
+ {highlightKeywords(visibleText)}
123
+ </span>
124
+ )
125
+ }
126
+
127
+ export default function TranscriptPanel({ sentences, isStreaming }: TranscriptPanelProps) {
128
+ const scrollRef = useRef<HTMLDivElement>(null)
129
+
130
+ useEffect(() => {
131
+ if (scrollRef.current) {
132
+ scrollRef.current.scrollTop = scrollRef.current.scrollHeight
133
+ }
134
+ }, [sentences])
135
+
136
+ return (
137
+ <div>
138
+ <div className="mb-4 flex items-center justify-between">
139
+ <div className="flex items-center gap-2">
140
+ <MessageSquare className="w-4 h-4 text-brand-teal" />
141
+ <h3 className="text-sm font-semibold text-gray-200">Live Transcript</h3>
142
+ </div>
143
+ {isStreaming && (
144
+ <div className="flex items-center gap-1.5 text-xs text-safe">
145
+ <Radio className="w-3 h-3 animate-pulse" />
146
+ <span className="font-medium">Streaming</span>
147
+ </div>
148
+ )}
149
+ </div>
150
+
151
+ <div
152
+ ref={scrollRef}
153
+ className="transcript-scroll space-y-2 max-h-[280px] overflow-y-auto pr-2 scroll-smooth"
154
+ >
155
+ {sentences.length === 0 ? (
156
+ <div className="flex items-center justify-center py-12 text-gray-600">
157
+ <div className="text-center">
158
+ <MessageSquare className="w-8 h-8 mx-auto mb-2 opacity-30" />
159
+ <p className="text-sm">Waiting for transcript...</p>
160
+ <p className="text-xs mt-1 opacity-60">Start a sample call to see real-time analysis</p>
161
+ </div>
162
+ </div>
163
+ ) : (
164
+ sentences.map((sentence, i) => {
165
+ const badge = getScoreBadge(sentence.score)
166
+ const isLatest = i === sentences.length - 1
167
+
168
+ return (
169
+ <div
170
+ key={sentence.index}
171
+ className={`
172
+ flex items-start gap-3 rounded-lg border-l-[3px] p-3
173
+ ${getScoreColor(sentence.score)} ${getScoreBg(sentence.score)}
174
+ animate-fade-in
175
+ `}
176
+ style={{ animationDelay: `${i * 50}ms` }}
177
+ >
178
+ <span className="mt-0.5 w-4 flex-shrink-0 text-right font-mono text-[10px] text-gray-600">
179
+ {sentence.index + 1}
180
+ </span>
181
+
182
+ <p className="flex-1 font-mono text-sm leading-relaxed text-gray-300">
183
+ <TypewriterSentence text={sentence.text} isLatest={isLatest} />
184
+ </p>
185
+
186
+ <div className="flex flex-shrink-0 flex-col items-end gap-1">
187
+ <span className={`text-[9px] font-bold px-1.5 py-0.5 rounded ${badge.bg} ${badge.pulse ? 'transcript-score-badge-pulse' : ''}`}>
188
+ {badge.text}
189
+ </span>
190
+ <span className="text-[10px] text-gray-500 font-mono tabular-nums">
191
+ {(sentence.score * 100).toFixed(0)}%
192
+ </span>
193
+ </div>
194
+ </div>
195
+ )
196
+ })
197
+ )}
198
+
199
+ {isStreaming && sentences.length > 0 && (
200
+ <div className="flex items-center gap-2 px-3 py-2">
201
+ <div className="flex gap-1">
202
+ <div className="streaming-dot w-1.5 h-1.5 rounded-full bg-brand-teal" />
203
+ <div className="streaming-dot w-1.5 h-1.5 rounded-full bg-brand-teal" />
204
+ <div className="streaming-dot w-1.5 h-1.5 rounded-full bg-brand-teal" />
205
+ </div>
206
+ <span className="text-[10px] text-gray-500">Listening...</span>
207
+ </div>
208
+ )}
209
+ </div>
210
+ </div>
211
+ )
212
+ }
demo/frontend/src/hooks/useWebSocket.ts ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { useRef, useState, useCallback, useEffect } from 'react'
2
+
3
+ interface UseWebSocketOptions {
4
+ url: string
5
+ onMessage: (data: any) => void
6
+ onOpen?: () => void
7
+ onClose?: () => void
8
+ autoConnect?: boolean
9
+ }
10
+
11
+ interface UseWebSocketReturn {
12
+ connect: () => void
13
+ disconnect: () => void
14
+ send: (data: any) => void
15
+ isConnected: boolean
16
+ error: string | null
17
+ }
18
+
19
+ export function useWebSocket(options: UseWebSocketOptions): UseWebSocketReturn {
20
+ const { url, onMessage, onOpen, onClose, autoConnect = false } = options
21
+ const [isConnected, setIsConnected] = useState(false)
22
+ const [error, setError] = useState<string | null>(null)
23
+ const wsRef = useRef<WebSocket | null>(null)
24
+ const reconnectTimeoutRef = useRef<ReturnType<typeof setTimeout> | null>(null)
25
+ const onMessageRef = useRef(onMessage)
26
+ const onOpenRef = useRef(onOpen)
27
+ const onCloseRef = useRef(onClose)
28
+
29
+ // Keep callback refs fresh without causing reconnections
30
+ useEffect(() => {
31
+ onMessageRef.current = onMessage
32
+ onOpenRef.current = onOpen
33
+ onCloseRef.current = onClose
34
+ }, [onMessage, onOpen, onClose])
35
+
36
+ const disconnect = useCallback(() => {
37
+ if (reconnectTimeoutRef.current) {
38
+ clearTimeout(reconnectTimeoutRef.current)
39
+ reconnectTimeoutRef.current = null
40
+ }
41
+ if (wsRef.current) {
42
+ wsRef.current.close(1000, 'Client disconnect')
43
+ wsRef.current = null
44
+ }
45
+ setIsConnected(false)
46
+ }, [])
47
+
48
+ const connect = useCallback(() => {
49
+ if (!url) return
50
+ disconnect()
51
+ setError(null)
52
+
53
+ try {
54
+ const ws = new WebSocket(url)
55
+ wsRef.current = ws
56
+
57
+ ws.onopen = () => {
58
+ setIsConnected(true)
59
+ setError(null)
60
+ onOpenRef.current?.()
61
+ }
62
+
63
+ ws.onmessage = (event) => {
64
+ try {
65
+ const data = JSON.parse(event.data)
66
+ onMessageRef.current(data)
67
+ } catch {
68
+ // If not JSON, pass raw data
69
+ onMessageRef.current(event.data)
70
+ }
71
+ }
72
+
73
+ ws.onerror = () => {
74
+ setError('WebSocket connection error')
75
+ }
76
+
77
+ ws.onclose = (event) => {
78
+ setIsConnected(false)
79
+ wsRef.current = null
80
+ onCloseRef.current?.()
81
+
82
+ // Auto-reconnect on abnormal closure (not manual disconnect)
83
+ if (event.code !== 1000 && event.code !== 1005) {
84
+ reconnectTimeoutRef.current = setTimeout(() => {
85
+ connect()
86
+ }, 3000)
87
+ }
88
+ }
89
+ } catch (err) {
90
+ setError(`Failed to connect: ${err}`)
91
+ setIsConnected(false)
92
+ }
93
+ }, [url, disconnect])
94
+
95
+ const send = useCallback((data: any) => {
96
+ if (wsRef.current?.readyState === WebSocket.OPEN) {
97
+ wsRef.current.send(typeof data === 'string' ? data : JSON.stringify(data))
98
+ }
99
+ }, [])
100
+
101
+ // Auto-connect on mount if configured
102
+ useEffect(() => {
103
+ if (autoConnect && url) {
104
+ connect()
105
+ }
106
+ return () => {
107
+ disconnect()
108
+ }
109
+ }, [autoConnect, url, connect, disconnect])
110
+
111
+ return { connect, disconnect, send, isConnected, error }
112
+ }
demo/frontend/src/main.tsx ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import React from 'react'
2
+ import ReactDOM from 'react-dom/client'
3
+ import App from './App'
4
+ import './styles/index.css'
5
+ import './styles/phone.css'
6
+
7
+ ReactDOM.createRoot(document.getElementById('root')!).render(
8
+ <React.StrictMode>
9
+ <App />
10
+ </React.StrictMode>,
11
+ )
demo/frontend/src/styles/index.css ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @tailwind base;
2
+ @tailwind components;
3
+ @tailwind utilities;
4
+
5
+ @layer base {
6
+ * {
7
+ box-sizing: border-box;
8
+ }
9
+
10
+ body {
11
+ @apply bg-dark-bg text-gray-100 font-sans;
12
+ -webkit-font-smoothing: antialiased;
13
+ -moz-osx-font-smoothing: grayscale;
14
+ }
15
+
16
+ button {
17
+ transition:
18
+ transform 180ms ease,
19
+ background-color 180ms ease,
20
+ border-color 180ms ease,
21
+ color 180ms ease,
22
+ box-shadow 180ms ease,
23
+ opacity 180ms ease;
24
+ }
25
+
26
+ button:hover:not(:disabled) {
27
+ transform: translateY(-1px);
28
+ }
29
+
30
+ button:active:not(:disabled) {
31
+ transform: translateY(0) scale(0.98);
32
+ }
33
+
34
+ button:disabled {
35
+ cursor: not-allowed;
36
+ }
37
+
38
+ button:focus-visible {
39
+ outline: 2px solid rgba(20, 184, 166, 0.5);
40
+ outline-offset: 2px;
41
+ }
42
+
43
+ ::-webkit-scrollbar {
44
+ width: 6px;
45
+ height: 6px;
46
+ }
47
+
48
+ ::-webkit-scrollbar-track {
49
+ @apply bg-dark-bg;
50
+ }
51
+
52
+ ::-webkit-scrollbar-thumb {
53
+ @apply bg-dark-surface rounded-full;
54
+ }
55
+
56
+ ::-webkit-scrollbar-thumb:hover {
57
+ @apply bg-dark-border;
58
+ }
59
+ }
60
+
61
+ @layer components {
62
+ .glass-card {
63
+ @apply bg-dark-card/80 backdrop-blur-xl border border-dark-border/50 rounded-2xl;
64
+ }
65
+
66
+ .control-button {
67
+ @apply inline-flex items-center justify-center gap-2 rounded-lg border border-dark-border/50 bg-dark-card text-gray-300 shadow-sm shadow-black/10;
68
+ }
69
+
70
+ .control-button:hover:not(:disabled) {
71
+ @apply border-brand-teal/30 bg-brand-teal/10 text-brand-teal;
72
+ box-shadow: 0 10px 25px rgba(13, 148, 136, 0.08);
73
+ }
74
+
75
+ .control-button:disabled {
76
+ @apply bg-dark-card/70 text-gray-600 border-dark-border/40 opacity-50;
77
+ }
78
+
79
+ .skeleton-block {
80
+ position: relative;
81
+ overflow: hidden;
82
+ border-radius: 0.75rem;
83
+ background: linear-gradient(90deg, rgba(51, 65, 85, 0.55) 0%, rgba(71, 85, 105, 0.75) 50%, rgba(51, 65, 85, 0.55) 100%);
84
+ background-size: 200% 100%;
85
+ animation: skeleton-wave 1.6s ease-in-out infinite;
86
+ }
87
+
88
+ .panel-skeleton {
89
+ @apply space-y-3;
90
+ }
91
+
92
+ .panel-skeleton-card {
93
+ @apply rounded-2xl border border-dark-border/30 bg-dark-bg/30 p-4;
94
+ }
95
+
96
+ @keyframes skeleton-wave {
97
+ 0% {
98
+ background-position: 200% 0;
99
+ }
100
+ 100% {
101
+ background-position: -200% 0;
102
+ }
103
+ }
104
+
105
+ .glow-teal {
106
+ box-shadow: 0 0 20px rgba(13, 148, 136, 0.15), 0 0 60px rgba(13, 148, 136, 0.05);
107
+ }
108
+
109
+ .glow-red {
110
+ box-shadow: 0 0 20px rgba(239, 68, 68, 0.2), 0 0 60px rgba(239, 68, 68, 0.1);
111
+ }
112
+
113
+ .tab-active {
114
+ @apply text-brand-teal border-b-2 border-brand-teal;
115
+ }
116
+
117
+ .tab-inactive {
118
+ @apply text-gray-400 hover:text-gray-200 border-b-2 border-transparent;
119
+ }
120
+ }
demo/frontend/src/styles/phone.css ADDED
@@ -0,0 +1,535 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* ===== Phone Frame Styling ===== */
2
+
3
+ .phone-frame {
4
+ perspective: 1000px;
5
+ filter: drop-shadow(0 25px 50px rgba(0, 0, 0, 0.5))
6
+ drop-shadow(0 12px 24px rgba(0, 0, 0, 0.3));
7
+ }
8
+
9
+ .phone-frame:hover {
10
+ transform: translateY(-2px);
11
+ transition: transform 0.3s ease;
12
+ }
13
+
14
+ .phone-screen-shake {
15
+ animation: phone-screen-shake 480ms cubic-bezier(0.36, 0.07, 0.19, 0.97);
16
+ }
17
+
18
+ @keyframes phone-screen-shake {
19
+ 0%,
20
+ 100% {
21
+ transform: translate3d(0, 0, 0);
22
+ }
23
+ 18% {
24
+ transform: translate3d(-1.5px, 0, 0);
25
+ }
26
+ 36% {
27
+ transform: translate3d(1.25px, 0, 0);
28
+ }
29
+ 54% {
30
+ transform: translate3d(-1px, 0.4px, 0);
31
+ }
32
+ 72% {
33
+ transform: translate3d(0.8px, -0.4px, 0);
34
+ }
35
+ }
36
+
37
+ /* ===== Pulse Animation for Critical Alerts ===== */
38
+
39
+ @keyframes pulse-border {
40
+ 0%,
41
+ 100% {
42
+ box-shadow: 0 0 0 0 rgba(239, 68, 68, 0.6),
43
+ inset 0 0 20px rgba(239, 68, 68, 0.1);
44
+ }
45
+ 50% {
46
+ box-shadow: 0 0 20px 5px rgba(239, 68, 68, 0.3),
47
+ inset 0 0 30px rgba(239, 68, 68, 0.15);
48
+ }
49
+ }
50
+
51
+ .pulse-border {
52
+ animation: pulse-border 1.5s ease-in-out infinite;
53
+ }
54
+
55
+ /* ===== Typewriter Effect for Transcript ===== */
56
+
57
+ @keyframes typewriter-cursor {
58
+ 0%,
59
+ 100% {
60
+ opacity: 1;
61
+ }
62
+ 50% {
63
+ opacity: 0;
64
+ }
65
+ }
66
+
67
+ .typewriter-cursor::after {
68
+ content: "|";
69
+ animation: typewriter-cursor 0.8s ease-in-out infinite;
70
+ color: #0d9488;
71
+ font-weight: bold;
72
+ margin-left: 1px;
73
+ }
74
+
75
+ .transcript-keyword {
76
+ font-weight: 700;
77
+ border-radius: 0.35rem;
78
+ padding: 0 0.12rem;
79
+ }
80
+
81
+ .transcript-keyword-urgent {
82
+ color: #fbbf24;
83
+ background: rgba(245, 158, 11, 0.12);
84
+ }
85
+
86
+ .transcript-keyword-payment {
87
+ color: #fb923c;
88
+ background: rgba(249, 115, 22, 0.12);
89
+ }
90
+
91
+ .transcript-keyword-credential {
92
+ color: #f87171;
93
+ background: rgba(239, 68, 68, 0.12);
94
+ }
95
+
96
+ .transcript-keyword-threat {
97
+ color: #ef4444;
98
+ background: rgba(127, 29, 29, 0.24);
99
+ }
100
+
101
+ .transcript-keyword-action {
102
+ color: #2dd4bf;
103
+ background: rgba(13, 148, 136, 0.12);
104
+ }
105
+
106
+ .transcript-score-badge-pulse {
107
+ animation: transcript-badge-pulse 1.25s ease-in-out infinite;
108
+ box-shadow: 0 0 0 rgba(249, 115, 22, 0);
109
+ }
110
+
111
+ @keyframes transcript-badge-pulse {
112
+ 0%,
113
+ 100% {
114
+ transform: scale(1);
115
+ box-shadow: 0 0 0 0 rgba(249, 115, 22, 0);
116
+ }
117
+ 50% {
118
+ transform: scale(1.06);
119
+ box-shadow: 0 0 18px 0 rgba(249, 115, 22, 0.18);
120
+ }
121
+ }
122
+
123
+ /* ===== Slide Down for Alert Overlay ===== */
124
+
125
+ .alert-slide-enter {
126
+ animation: alert-slide 0.4s cubic-bezier(0.32, 0.72, 0, 1) forwards;
127
+ }
128
+
129
+ .alert-overlay {
130
+ animation: alert-overlay-fade 420ms ease-out both;
131
+ }
132
+
133
+ .alert-overlay-critical {
134
+ animation: alert-overlay-fade 420ms ease-out both;
135
+ }
136
+
137
+ .alert-overlay-panel {
138
+ transform-origin: top center;
139
+ animation: alert-overlay-spring 620ms cubic-bezier(0.18, 0.9, 0.22, 1.16) both;
140
+ }
141
+
142
+ .alert-banner {
143
+ animation: alert-banner-spring 560ms cubic-bezier(0.22, 0.88, 0.28, 1.14) both;
144
+ }
145
+
146
+ .alert-banner-card {
147
+ transform-origin: top center;
148
+ }
149
+
150
+ @keyframes alert-slide {
151
+ 0% {
152
+ transform: translateY(-100%);
153
+ opacity: 0;
154
+ }
155
+ 60% {
156
+ opacity: 1;
157
+ }
158
+ 100% {
159
+ transform: translateY(0);
160
+ opacity: 1;
161
+ }
162
+ }
163
+
164
+ @keyframes alert-overlay-fade {
165
+ 0% {
166
+ opacity: 0;
167
+ }
168
+ 100% {
169
+ opacity: 1;
170
+ }
171
+ }
172
+
173
+ @keyframes alert-overlay-spring {
174
+ 0% {
175
+ transform: translateY(-30px) scale(0.97);
176
+ opacity: 0;
177
+ }
178
+ 55% {
179
+ transform: translateY(8px) scale(1.01);
180
+ opacity: 1;
181
+ }
182
+ 78% {
183
+ transform: translateY(-3px) scale(0.998);
184
+ }
185
+ 100% {
186
+ transform: translateY(0) scale(1);
187
+ opacity: 1;
188
+ }
189
+ }
190
+
191
+ @keyframes alert-banner-spring {
192
+ 0% {
193
+ transform: translateY(-22px) scale(0.96);
194
+ opacity: 0;
195
+ }
196
+ 60% {
197
+ transform: translateY(4px) scale(1.01);
198
+ opacity: 1;
199
+ }
200
+ 82% {
201
+ transform: translateY(-2px) scale(0.998);
202
+ }
203
+ 100% {
204
+ transform: translateY(0) scale(1);
205
+ opacity: 1;
206
+ }
207
+ }
208
+
209
+ /* ===== Smooth Gauge Transitions ===== */
210
+
211
+ .gauge-arc {
212
+ transition: stroke-dashoffset 0.8s cubic-bezier(0.22, 1, 0.36, 1),
213
+ stroke 0.45s ease,
214
+ filter 0.45s ease;
215
+ filter: drop-shadow(0 0 10px color-mix(in srgb, var(--gauge-color) 40%, transparent));
216
+ }
217
+
218
+ .gauge-needle {
219
+ transition: transform 0.6s cubic-bezier(0.4, 0, 0.2, 1);
220
+ transform-origin: center center;
221
+ }
222
+
223
+ .score-gauge-shell {
224
+ isolation: isolate;
225
+ }
226
+
227
+ .score-gauge-svg {
228
+ filter: drop-shadow(0 0 12px color-mix(in srgb, var(--gauge-color) 28%, transparent));
229
+ transition: filter 0.45s ease;
230
+ }
231
+
232
+ .score-gauge-glow {
233
+ background:
234
+ radial-gradient(circle at center, color-mix(in srgb, var(--gauge-color) 22%, transparent) 0%, transparent 68%);
235
+ filter: blur(8px);
236
+ opacity: 0.95;
237
+ transition: background 0.45s ease, opacity 0.45s ease, transform 0.7s ease;
238
+ }
239
+
240
+ .score-gauge-center {
241
+ transition: color 0.45s ease, transform 0.35s ease, filter 0.45s ease;
242
+ }
243
+
244
+ .score-gauge-threshold-pulse .score-gauge-glow {
245
+ animation: score-gauge-threshold-glow 700ms cubic-bezier(0.22, 1, 0.36, 1);
246
+ }
247
+
248
+ .score-gauge-threshold-pulse .score-gauge-center {
249
+ animation: score-gauge-center-pop 520ms cubic-bezier(0.22, 1, 0.36, 1);
250
+ }
251
+
252
+ .score-gauge-burst {
253
+ overflow: visible;
254
+ }
255
+
256
+ .score-gauge-particle {
257
+ width: 6px;
258
+ height: 6px;
259
+ margin-left: -3px;
260
+ margin-top: -3px;
261
+ border-radius: 9999px;
262
+ background:
263
+ radial-gradient(circle at 30% 30%, rgba(255, 255, 255, 0.95), var(--burst-color) 58%, transparent 72%);
264
+ box-shadow: 0 0 14px color-mix(in srgb, var(--burst-color) 60%, transparent);
265
+ opacity: 0;
266
+ transform: rotate(var(--particle-angle)) translateY(0) scale(0.4);
267
+ animation: score-gauge-particle-burst 860ms cubic-bezier(0.2, 0.9, 0.3, 1) forwards;
268
+ }
269
+
270
+ @keyframes score-gauge-threshold-glow {
271
+ 0% {
272
+ transform: scale(0.92);
273
+ opacity: 0.55;
274
+ }
275
+ 55% {
276
+ transform: scale(1.08);
277
+ opacity: 1;
278
+ }
279
+ 100% {
280
+ transform: scale(1);
281
+ opacity: 0.95;
282
+ }
283
+ }
284
+
285
+ @keyframes score-gauge-center-pop {
286
+ 0% {
287
+ transform: scale(0.96);
288
+ }
289
+ 55% {
290
+ transform: scale(1.05);
291
+ }
292
+ 100% {
293
+ transform: scale(1);
294
+ }
295
+ }
296
+
297
+ @keyframes score-gauge-particle-burst {
298
+ 0% {
299
+ opacity: 0;
300
+ transform: rotate(var(--particle-angle)) translateY(0) scale(0.4);
301
+ }
302
+ 20% {
303
+ opacity: 1;
304
+ }
305
+ 100% {
306
+ opacity: 0;
307
+ transform: rotate(var(--particle-angle)) translateY(calc(-1 * var(--particle-distance))) scale(0.95);
308
+ }
309
+ }
310
+
311
+ /* ===== Score Bar Animations ===== */
312
+
313
+ .score-bar {
314
+ transition: width 0.7s cubic-bezier(0.4, 0, 0.2, 1),
315
+ background-color 0.3s ease;
316
+ }
317
+
318
+ /* ===== Gradient Vector Scroll ===== */
319
+
320
+ .gradient-scroll {
321
+ mask-image: linear-gradient(
322
+ to bottom,
323
+ transparent,
324
+ black 5%,
325
+ black 95%,
326
+ transparent
327
+ );
328
+ -webkit-mask-image: linear-gradient(
329
+ to bottom,
330
+ transparent,
331
+ black 5%,
332
+ black 95%,
333
+ transparent
334
+ );
335
+ }
336
+
337
+ /* ===== Glass Effect Enhancement ===== */
338
+
339
+ .glass-effect {
340
+ background: rgba(30, 41, 59, 0.6);
341
+ backdrop-filter: blur(20px) saturate(150%);
342
+ -webkit-backdrop-filter: blur(20px) saturate(150%);
343
+ }
344
+
345
+ /* ===== Streaming Dots ===== */
346
+
347
+ @keyframes streaming-dot {
348
+ 0%,
349
+ 80%,
350
+ 100% {
351
+ transform: scale(0.6);
352
+ opacity: 0.4;
353
+ }
354
+ 40% {
355
+ transform: scale(1);
356
+ opacity: 1;
357
+ }
358
+ }
359
+
360
+ .streaming-dot:nth-child(1) {
361
+ animation: streaming-dot 1.4s ease-in-out 0s infinite;
362
+ }
363
+ .streaming-dot:nth-child(2) {
364
+ animation: streaming-dot 1.4s ease-in-out 0.2s infinite;
365
+ }
366
+ .streaming-dot:nth-child(3) {
367
+ animation: streaming-dot 1.4s ease-in-out 0.4s infinite;
368
+ }
369
+
370
+ /* ===== Tab Underline Animation ===== */
371
+
372
+ .tab-underline {
373
+ position: relative;
374
+ }
375
+
376
+ .tab-underline::after {
377
+ content: "";
378
+ position: absolute;
379
+ bottom: -2px;
380
+ left: 0;
381
+ right: 0;
382
+ height: 2px;
383
+ background: #0d9488;
384
+ transform: scaleX(0);
385
+ transition: transform 0.2s ease;
386
+ }
387
+
388
+ .tab-underline.active::after {
389
+ transform: scaleX(1);
390
+ }
391
+
392
+ /* ===== Feature Bar Hover ===== */
393
+
394
+ .feature-bar-container:hover .feature-bar {
395
+ filter: brightness(1.2);
396
+ }
397
+
398
+ .feature-bar-container {
399
+ animation: feature-bar-slide-in 520ms cubic-bezier(0.22, 1, 0.36, 1) both;
400
+ }
401
+
402
+ .feature-track {
403
+ background:
404
+ linear-gradient(90deg, var(--feature-track), rgba(15, 23, 42, 0.26));
405
+ transition: background 0.35s ease;
406
+ }
407
+
408
+ .feature-bar {
409
+ background:
410
+ linear-gradient(90deg, color-mix(in srgb, var(--feature-color) 80%, white 8%) 0%, var(--feature-color) 55%, color-mix(in srgb, var(--feature-color) 75%, black 6%) 100%);
411
+ box-shadow: 0 0 18px var(--feature-glow);
412
+ transform-origin: left center;
413
+ animation: feature-bar-fill 700ms cubic-bezier(0.22, 1, 0.36, 1) both;
414
+ transition: width 0.75s cubic-bezier(0.22, 1, 0.36, 1), background 0.4s ease, box-shadow 0.4s ease, filter 0.25s ease;
415
+ }
416
+
417
+ .feature-tooltip {
418
+ opacity: 0;
419
+ transform: translate(-50%, 8px) scale(0.98);
420
+ transition: opacity 0.2s ease, transform 0.2s ease;
421
+ }
422
+
423
+ .feature-tooltip::before {
424
+ content: "";
425
+ position: absolute;
426
+ top: -6px;
427
+ left: 50%;
428
+ width: 12px;
429
+ height: 12px;
430
+ border-left: 1px solid rgba(255, 255, 255, 0.1);
431
+ border-top: 1px solid rgba(255, 255, 255, 0.1);
432
+ background: rgba(2, 6, 23, 0.95);
433
+ transform: translateX(-50%) rotate(45deg);
434
+ }
435
+
436
+ .feature-tooltip-trigger:hover .feature-tooltip {
437
+ opacity: 1;
438
+ transform: translate(-50%, 0) scale(1);
439
+ }
440
+
441
+ @keyframes feature-bar-slide-in {
442
+ 0% {
443
+ opacity: 0;
444
+ transform: translateX(-18px);
445
+ }
446
+ 100% {
447
+ opacity: 1;
448
+ transform: translateX(0);
449
+ }
450
+ }
451
+
452
+ @keyframes feature-bar-fill {
453
+ 0% {
454
+ transform: scaleX(0);
455
+ opacity: 0.6;
456
+ }
457
+ 100% {
458
+ transform: scaleX(1);
459
+ opacity: 1;
460
+ }
461
+ }
462
+
463
+ /* ===== Ring Pulse for Avatar ===== */
464
+
465
+ @keyframes ring-pulse {
466
+ 0% {
467
+ transform: scale(1);
468
+ opacity: 0.4;
469
+ }
470
+ 100% {
471
+ transform: scale(1.5);
472
+ opacity: 0;
473
+ }
474
+ }
475
+
476
+ .ring-pulse {
477
+ animation: ring-pulse 2s ease-out infinite;
478
+ }
479
+
480
+ /* ===== Privacy Panel Arrow Animation ===== */
481
+
482
+ @keyframes flow-arrow {
483
+ 0% {
484
+ transform: translateX(-4px);
485
+ opacity: 0.3;
486
+ }
487
+ 50% {
488
+ opacity: 1;
489
+ }
490
+ 100% {
491
+ transform: translateX(4px);
492
+ opacity: 0.3;
493
+ }
494
+ }
495
+
496
+ .flow-arrow {
497
+ animation: flow-arrow 1.5s ease-in-out infinite;
498
+ }
499
+
500
+ /* ===== Smooth Number Transition ===== */
501
+
502
+ .number-transition {
503
+ transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1);
504
+ }
505
+
506
+ /* ===== Chart Tooltip Styling ===== */
507
+
508
+ .recharts-default-tooltip {
509
+ background-color: #1e293b !important;
510
+ border: 1px solid #334155 !important;
511
+ border-radius: 8px !important;
512
+ }
513
+
514
+ .recharts-tooltip-label {
515
+ color: #94a3b8 !important;
516
+ }
517
+
518
+ /* ===== Scrollbar for Transcript ===== */
519
+
520
+ .transcript-scroll::-webkit-scrollbar {
521
+ width: 4px;
522
+ }
523
+
524
+ .transcript-scroll::-webkit-scrollbar-track {
525
+ background: transparent;
526
+ }
527
+
528
+ .transcript-scroll::-webkit-scrollbar-thumb {
529
+ background: #334155;
530
+ border-radius: 2px;
531
+ }
532
+
533
+ .transcript-scroll::-webkit-scrollbar-thumb:hover {
534
+ background: #475569;
535
+ }
demo/frontend/tailwind.config.js ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /** @type {import('tailwindcss').Config} */
2
+ export default {
3
+ content: [
4
+ "./index.html",
5
+ "./src/**/*.{js,ts,jsx,tsx}",
6
+ ],
7
+ theme: {
8
+ extend: {
9
+ colors: {
10
+ brand: {
11
+ teal: '#0D9488',
12
+ 'teal-light': '#14B8A6',
13
+ 'teal-dark': '#0F766E',
14
+ },
15
+ dark: {
16
+ bg: '#0F172A',
17
+ card: '#1E293B',
18
+ surface: '#334155',
19
+ border: '#475569',
20
+ },
21
+ safe: {
22
+ DEFAULT: '#10B981',
23
+ light: '#34D399',
24
+ },
25
+ warning: {
26
+ DEFAULT: '#F59E0B',
27
+ light: '#FBBF24',
28
+ },
29
+ alert: {
30
+ DEFAULT: '#EF4444',
31
+ light: '#F87171',
32
+ dark: '#DC2626',
33
+ },
34
+ },
35
+ fontFamily: {
36
+ mono: ['JetBrains Mono', 'Fira Code', 'monospace'],
37
+ sans: ['Inter', 'system-ui', 'sans-serif'],
38
+ },
39
+ animation: {
40
+ 'pulse-alert': 'pulse-alert 1.5s ease-in-out infinite',
41
+ 'slide-down': 'slide-down 0.4s ease-out',
42
+ 'fade-in': 'fade-in 0.3s ease-out',
43
+ 'gauge-fill': 'gauge-fill 1s ease-out',
44
+ 'typewriter': 'typewriter 0.5s ease-out',
45
+ },
46
+ keyframes: {
47
+ 'pulse-alert': {
48
+ '0%, 100%': { opacity: '1', boxShadow: '0 0 0 0 rgba(239, 68, 68, 0.4)' },
49
+ '50%': { opacity: '0.9', boxShadow: '0 0 20px 10px rgba(239, 68, 68, 0.2)' },
50
+ },
51
+ 'slide-down': {
52
+ '0%': { transform: 'translateY(-100%)', opacity: '0' },
53
+ '100%': { transform: 'translateY(0)', opacity: '1' },
54
+ },
55
+ 'fade-in': {
56
+ '0%': { opacity: '0', transform: 'translateY(8px)' },
57
+ '100%': { opacity: '1', transform: 'translateY(0)' },
58
+ },
59
+ 'gauge-fill': {
60
+ '0%': { strokeDashoffset: '283' },
61
+ },
62
+ 'typewriter': {
63
+ '0%': { opacity: '0', transform: 'translateX(-4px)' },
64
+ '100%': { opacity: '1', transform: 'translateX(0)' },
65
+ },
66
+ },
67
+ },
68
+ },
69
+ plugins: [],
70
+ }
demo/frontend/tsconfig.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "compilerOptions": {
3
+ "target": "ES2020",
4
+ "useDefineForClassFields": true,
5
+ "lib": ["ES2020", "DOM", "DOM.Iterable"],
6
+ "module": "ESNext",
7
+ "skipLibCheck": true,
8
+ "moduleResolution": "bundler",
9
+ "allowImportingTsExtensions": true,
10
+ "resolveJsonModule": true,
11
+ "isolatedModules": true,
12
+ "noEmit": true,
13
+ "jsx": "react-jsx",
14
+ "strict": true,
15
+ "noUnusedLocals": false,
16
+ "noUnusedParameters": false,
17
+ "noFallthroughCasesInSwitch": true,
18
+ "baseUrl": ".",
19
+ "paths": {
20
+ "@/*": ["src/*"]
21
+ }
22
+ },
23
+ "include": ["src"],
24
+ "references": [{ "path": "./tsconfig.node.json" }]
25
+ }
demo/frontend/tsconfig.node.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "compilerOptions": {
3
+ "composite": true,
4
+ "skipLibCheck": true,
5
+ "module": "ESNext",
6
+ "moduleResolution": "bundler",
7
+ "allowSyntheticDefaultImports": true
8
+ },
9
+ "include": ["vite.config.ts"]
10
+ }
demo/frontend/vite.config.ts ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { defineConfig } from 'vite'
2
+ import react from '@vitejs/plugin-react'
3
+
4
+ export default defineConfig({
5
+ plugins: [react()],
6
+ server: {
7
+ port: 5173,
8
+ proxy: {
9
+ '/api': 'http://localhost:8000',
10
+ '/ws': {
11
+ target: 'ws://localhost:8000',
12
+ ws: true,
13
+ }
14
+ }
15
+ }
16
+ })
deploy_server.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ SentinelEdge deployment server for Hugging Face Spaces.
3
+ Adds static file serving to the demo backend app.
4
+ """
5
+
6
+ import os
7
+ import sys
8
+ from pathlib import Path
9
+
10
+ sys.path.insert(0, str(Path(__file__).resolve().parent))
11
+
12
+ import uvicorn
13
+ from fastapi.staticfiles import StaticFiles
14
+ from fastapi.responses import FileResponse
15
+
16
+ # The demo backend app already has /api/* and /ws/* routes
17
+ from demo.backend.main import app
18
+ from hub.server import app as hub_app
19
+
20
+ # Mount hub server at /hub
21
+ app.mount("/hub", hub_app)
22
+
23
+ # Serve built React frontend as static files
24
+ static_dir = Path(__file__).parent / "static"
25
+
26
+ if static_dir.exists():
27
+ @app.get("/")
28
+ async def serve_index() -> FileResponse:
29
+ return FileResponse(str(static_dir / "index.html"))
30
+
31
+ # Catch-all for SPA client-side routing (must be after API/WS routes)
32
+ @app.get("/{path:path}")
33
+ async def serve_spa(path: str) -> FileResponse:
34
+ file_path = static_dir / path
35
+ if file_path.exists() and file_path.is_file():
36
+ return FileResponse(str(file_path))
37
+ return FileResponse(str(static_dir / "index.html"))
38
+
39
+
40
+ port = int(os.environ.get("PORT", 7860))
41
+
42
+ if __name__ == "__main__":
43
+ uvicorn.run(app, host="0.0.0.0", port=port)
federated/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ """Federated learning simulation."""
federated/dp_injector.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Differential privacy noise injection for federated MLP updates."""
2
+ import numpy as np
3
+
4
+
5
+ class DPInjector:
6
+ """Add calibrated Gaussian noise to MLP gradient deltas before transmission.
7
+
8
+ Implements the Gaussian mechanism for (epsilon, delta)-differential privacy:
9
+ 1. Clip gradient delta to bounded L2 norm (max_grad_norm)
10
+ 2. Add Gaussian noise calibrated to the sensitivity
11
+ 3. Track cumulative privacy budget across rounds
12
+ """
13
+
14
+ def __init__(self, epsilon: float = 0.3, delta: float = 1e-5,
15
+ max_grad_norm: float = 1.0):
16
+ self.epsilon = epsilon
17
+ self.delta = delta
18
+ self.max_grad_norm = max_grad_norm
19
+ self.rounds_participated = 0
20
+
21
+ def clip_gradient(self, gradient: np.ndarray) -> np.ndarray:
22
+ """Clip gradient delta to max_grad_norm for bounded sensitivity."""
23
+ norm = np.linalg.norm(gradient)
24
+ if norm > self.max_grad_norm:
25
+ gradient = gradient * (self.max_grad_norm / norm)
26
+ return gradient
27
+
28
+ def compute_sigma(self, n_local_samples: int) -> float:
29
+ """Compute noise standard deviation.
30
+
31
+ sensitivity = max_grad_norm / n_local_samples
32
+ sigma = sensitivity * sqrt(2 * ln(1.25 / delta)) / epsilon
33
+ """
34
+ sensitivity = self.max_grad_norm / max(n_local_samples, 1)
35
+ sigma = sensitivity * np.sqrt(2.0 * np.log(1.25 / self.delta)) / self.epsilon
36
+ return sigma
37
+
38
+ def add_noise(self, gradient_delta: np.ndarray,
39
+ n_local_samples: int) -> tuple:
40
+ """Apply DP to a gradient delta: clip, noise, return.
41
+
42
+ Args:
43
+ gradient_delta: Flat 1D array (MLP weight delta).
44
+ n_local_samples: Number of local training samples.
45
+
46
+ Returns:
47
+ (noised_delta, sigma_used, epsilon_spent_this_round)
48
+ """
49
+ # Step 1: Clip the gradient delta to bounded L2 norm
50
+ clipped = self.clip_gradient(gradient_delta)
51
+
52
+ # Step 2: Compute calibrated noise scale
53
+ sigma = self.compute_sigma(n_local_samples)
54
+
55
+ # Step 3: Add Gaussian noise
56
+ noise = np.random.normal(0.0, sigma, size=clipped.shape)
57
+ noised_delta = clipped + noise
58
+
59
+ # Track rounds
60
+ self.rounds_participated += 1
61
+
62
+ return noised_delta, sigma, self.epsilon
63
+
64
+ def privacy_budget_spent(self, n_rounds: int = None) -> float:
65
+ """Track cumulative privacy budget using advanced composition theorem.
66
+
67
+ Total epsilon after k rounds:
68
+ eps_total = epsilon * sqrt(2 * k * ln(1/delta)) + k * epsilon * (exp(epsilon) - 1)
69
+
70
+ Args:
71
+ n_rounds: Number of rounds to compute budget for.
72
+ If None, uses self.rounds_participated.
73
+ """
74
+ k = n_rounds if n_rounds is not None else self.rounds_participated
75
+ if k == 0:
76
+ return 0.0
77
+
78
+ eps = self.epsilon
79
+ d = self.delta
80
+
81
+ term1 = eps * np.sqrt(2.0 * k * np.log(1.0 / d))
82
+ term2 = k * eps * (np.exp(eps) - 1.0)
83
+ return term1 + term2
federated/local_trainer.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """On-device local training with real MLP backpropagation."""
2
+ import numpy as np
3
+ from dataclasses import dataclass, field
4
+
5
+
6
+ @dataclass
7
+ class LocalTrainingBuffer:
8
+ """Encrypted local label store. Stores feature vectors + labels only."""
9
+ features: list = field(default_factory=list)
10
+ labels: list = field(default_factory=list)
11
+ max_samples: int = 500
12
+
13
+ def add(self, feature_vector: np.ndarray, label: int):
14
+ """Add a training sample. Auto-evicts oldest if at capacity."""
15
+ if len(self.labels) >= self.max_samples:
16
+ self.features.pop(0)
17
+ self.labels.pop(0)
18
+ self.features.append(feature_vector.copy())
19
+ self.labels.append(label)
20
+
21
+ def size(self) -> int:
22
+ return len(self.labels)
23
+
24
+ def to_arrays(self) -> tuple:
25
+ """Return (features_array, labels_array) as numpy arrays."""
26
+ if len(self.features) == 0:
27
+ return np.array([]).reshape(0, 0), np.array([], dtype=int)
28
+ X = np.vstack(self.features)
29
+ y = np.array(self.labels, dtype=int)
30
+ return X, y
31
+
32
+
33
+ class LocalTrainer:
34
+ """On-device model fine-tuning via real mini-batch SGD on a 3-layer MLP.
35
+
36
+ The MLP architecture mirrors MLPClassifier:
37
+ input -> hidden1 (128, ReLU) -> hidden2 (64, ReLU) -> output (1, Sigmoid)
38
+
39
+ Fine-tuning performs real numpy backpropagation through all three layers
40
+ with BCE loss.
41
+ """
42
+
43
+ # MLP architecture constants (must match MLPClassifier)
44
+ HIDDEN1_DIM = 128
45
+ HIDDEN2_DIM = 64
46
+ OUTPUT_DIM = 1
47
+
48
+ def __init__(self, device_id: str, input_dim: int = 402):
49
+ self.device_id = device_id
50
+ self.input_dim = input_dim
51
+ self.buffer = LocalTrainingBuffer()
52
+ self.current_model_version = 0
53
+
54
+ def ingest_call_data(self, features: np.ndarray, label: int):
55
+ """Store a labeled sample from user feedback."""
56
+ self.buffer.add(features, label)
57
+
58
+ def _unpack_weights(self, flat_weights: np.ndarray) -> dict:
59
+ """Unpack a flat weight vector into the six MLP parameter arrays."""
60
+ idx = 0
61
+ params = {}
62
+
63
+ size = self.input_dim * self.HIDDEN1_DIM
64
+ params['W1'] = flat_weights[idx:idx + size].reshape(self.input_dim, self.HIDDEN1_DIM)
65
+ idx += size
66
+
67
+ size = self.HIDDEN1_DIM
68
+ params['b1'] = flat_weights[idx:idx + size].copy()
69
+ idx += size
70
+
71
+ size = self.HIDDEN1_DIM * self.HIDDEN2_DIM
72
+ params['W2'] = flat_weights[idx:idx + size].reshape(self.HIDDEN1_DIM, self.HIDDEN2_DIM)
73
+ idx += size
74
+
75
+ size = self.HIDDEN2_DIM
76
+ params['b2'] = flat_weights[idx:idx + size].copy()
77
+ idx += size
78
+
79
+ size = self.HIDDEN2_DIM * self.OUTPUT_DIM
80
+ params['W3'] = flat_weights[idx:idx + size].reshape(self.HIDDEN2_DIM, self.OUTPUT_DIM)
81
+ idx += size
82
+
83
+ size = self.OUTPUT_DIM
84
+ params['b3'] = flat_weights[idx:idx + size].copy()
85
+ idx += size
86
+
87
+ return params
88
+
89
+ def _pack_weights(self, params: dict) -> np.ndarray:
90
+ """Pack the six MLP parameter arrays into a flat weight vector."""
91
+ return np.concatenate([
92
+ params['W1'].ravel(), params['b1'].ravel(),
93
+ params['W2'].ravel(), params['b2'].ravel(),
94
+ params['W3'].ravel(), params['b3'].ravel(),
95
+ ])
96
+
97
+ def fine_tune(self, global_model_weights: np.ndarray,
98
+ lr: float = 0.01, n_epochs: int = 3,
99
+ batch_size: int = 32) -> np.ndarray:
100
+ """Fine-tune on local buffer using real mini-batch SGD with numpy backprop.
101
+
102
+ Steps:
103
+ 1. Load global MLP weights via unpack
104
+ 2. Normalize input features for numerical stability
105
+ 3. Run forward pass on each mini-batch
106
+ 4. Compute BCE loss gradient (backprop through 3-layer MLP)
107
+ 5. Update weights with SGD
108
+ 6. Return gradient delta = local_weights - global_weights
109
+
110
+ Args:
111
+ global_model_weights: Flat 1D array of all MLP parameters.
112
+ lr: Learning rate for SGD.
113
+ n_epochs: Number of training epochs over local data.
114
+ batch_size: Mini-batch size.
115
+
116
+ Returns:
117
+ Gradient delta (flat 1D array): local_weights - global_weights.
118
+ """
119
+ X, y = self.buffer.to_arrays()
120
+ if X.shape[0] == 0:
121
+ return np.zeros_like(global_model_weights)
122
+
123
+ n_samples = X.shape[0]
124
+
125
+ # Pad or truncate features to match input_dim
126
+ if X.shape[1] < self.input_dim:
127
+ pad = np.zeros((n_samples, self.input_dim - X.shape[1]))
128
+ X = np.hstack([X, pad])
129
+ elif X.shape[1] > self.input_dim:
130
+ X = X[:, :self.input_dim]
131
+
132
+ # Cast to float64 for numerical stability
133
+ X = X.astype(np.float64)
134
+
135
+ # Unpack global weights into local parameters
136
+ params = self._unpack_weights(global_model_weights.astype(np.float64))
137
+
138
+ # Mini-batch SGD with real backpropagation
139
+ for epoch in range(n_epochs):
140
+ indices = np.arange(n_samples)
141
+ np.random.shuffle(indices)
142
+
143
+ for start in range(0, n_samples, batch_size):
144
+ end = min(start + batch_size, n_samples)
145
+ batch_idx = indices[start:end]
146
+ X_batch = X[batch_idx].astype(np.float64) # (B, input_dim)
147
+ y_batch = y[batch_idx].reshape(-1, 1).astype(np.float64) # (B, 1)
148
+ B = X_batch.shape[0]
149
+
150
+ # ---- Forward pass ----
151
+ # Layer 1: input -> hidden1
152
+ z1 = X_batch @ params['W1'] + params['b1'] # (B, 128)
153
+ z1 = np.nan_to_num(z1, nan=0.0, posinf=50.0, neginf=-50.0)
154
+ h1 = np.maximum(0, z1) # ReLU
155
+
156
+ # Layer 2: hidden1 -> hidden2
157
+ z2 = h1 @ params['W2'] + params['b2'] # (B, 64)
158
+ z2 = np.nan_to_num(z2, nan=0.0, posinf=50.0, neginf=-50.0)
159
+ h2 = np.maximum(0, z2) # ReLU
160
+
161
+ # Layer 3: hidden2 -> output
162
+ logit = h2 @ params['W3'] + params['b3'] # (B, 1)
163
+ logit = np.nan_to_num(logit, nan=0.0, posinf=50.0, neginf=-50.0)
164
+ logit = np.clip(logit, -50, 50)
165
+ pred = 1.0 / (1.0 + np.exp(-logit)) # sigmoid
166
+
167
+ # ---- Backward pass (BCE loss) ----
168
+ # d(BCE)/d(logit) = pred - y
169
+ dlogit = (pred - y_batch) / B # (B, 1), averaged over batch
170
+
171
+ # Layer 3 gradients
172
+ dW3 = h2.T @ dlogit # (64, 1)
173
+ db3 = np.sum(dlogit, axis=0) # (1,)
174
+
175
+ # Backprop into h2
176
+ dh2 = dlogit @ params['W3'].T # (B, 64)
177
+ dh2 = dh2 * (z2 > 0) # ReLU backward
178
+
179
+ # Layer 2 gradients
180
+ dW2 = h1.T @ dh2 # (128, 64)
181
+ db2 = np.sum(dh2, axis=0) # (64,)
182
+
183
+ # Backprop into h1
184
+ dh1 = dh2 @ params['W2'].T # (B, 128)
185
+ dh1 = dh1 * (z1 > 0) # ReLU backward
186
+
187
+ # Layer 1 gradients
188
+ dW1 = X_batch.T @ dh1 # (input_dim, 128)
189
+ db1 = np.sum(dh1, axis=0) # (128,)
190
+
191
+ # ---- NaN protection and gradient clipping ----
192
+ max_grad = 10.0
193
+ for name, grad in [('W1', dW1), ('b1', db1),
194
+ ('W2', dW2), ('b2', db2),
195
+ ('W3', dW3), ('b3', db3)]:
196
+ np.nan_to_num(grad, copy=False, nan=0.0,
197
+ posinf=max_grad, neginf=-max_grad)
198
+ np.clip(grad, -max_grad, max_grad, out=grad)
199
+
200
+ # ---- SGD update ----
201
+ params['W1'] -= lr * dW1
202
+ params['b1'] -= lr * db1
203
+ params['W2'] -= lr * dW2
204
+ params['b2'] -= lr * db2
205
+ params['W3'] -= lr * dW3
206
+ params['b3'] -= lr * db3
207
+
208
+ # Return the delta: local_weights - global_weights
209
+ local_flat = self._pack_weights(params)
210
+ delta = local_flat - global_model_weights
211
+ return delta
federated/simulate.py ADDED
@@ -0,0 +1,507 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Simulate federated learning with N devices over M rounds using a real MLP."""
2
+ import numpy as np
3
+ import json
4
+ import os
5
+ import sys
6
+ import argparse
7
+ from typing import Optional
8
+
9
+ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
10
+
11
+ from federated.local_trainer import LocalTrainer, LocalTrainingBuffer
12
+ from federated.dp_injector import DPInjector
13
+ from sentinel_edge.classifier.mlp_classifier import MLPClassifier
14
+
15
+
16
+ INPUT_DIM = 402
17
+
18
+
19
+ class FederatedSimulation:
20
+ """Simulate federated learning with a real 3-layer MLP classifier.
21
+
22
+ Each round:
23
+ 1. Each device copies global weights, fine-tunes with real numpy backprop
24
+ 2. Computes gradient delta (local_weights - global_weights)
25
+ 3. Applies DP noise (clip + Gaussian)
26
+ 4. Hub aggregates via FedAvg weighted by n_samples
27
+ 5. Global model updated, evaluated on hold-out test set
28
+ """
29
+
30
+ def __init__(self, n_devices: int = 5, n_rounds: int = 10,
31
+ epsilon: float = 0.3, use_dp: bool = True):
32
+ self.n_devices = n_devices
33
+ self.n_rounds = n_rounds
34
+ self.use_dp = use_dp
35
+ self.devices: list = []
36
+ self.global_model: MLPClassifier = None
37
+ self.round_results: list = []
38
+ self.dp_injector = DPInjector(epsilon=epsilon)
39
+
40
+ # Hold-out test set
41
+ self.test_features: np.ndarray = None
42
+ self.test_labels: np.ndarray = None
43
+
44
+ # ------------------------------------------------------------------
45
+ # Device & data initialisation
46
+ # ------------------------------------------------------------------
47
+
48
+ def initialize(self):
49
+ """Create N simulated devices with non-IID data distributions.
50
+
51
+ Device data profiles:
52
+ Device 0: Heavy on IRS scams (60% scam rate)
53
+ Device 1: Heavy on tech support scams (55% scam rate)
54
+ Device 2: Mixed scam types (50% scam rate)
55
+ Device 3: Mostly legitimate calls (15% scam rate)
56
+ Device 4: Heavy on bank fraud (55% scam rate)
57
+ Device 5+: Random profile
58
+
59
+ All data is globally normalized once (z-score) so that all
60
+ devices and the test set share the same feature scale.
61
+ """
62
+ np.random.seed(42)
63
+
64
+ # Generate all device data first to compute global normalization
65
+ all_X = []
66
+ all_y = []
67
+ device_splits = []
68
+ for i in range(self.n_devices):
69
+ X, y = self._generate_device_data(i, n_samples=100)
70
+ device_splits.append((len(all_X), len(all_X) + len(X)))
71
+ all_X.append(X)
72
+ all_y.append(y)
73
+
74
+ # Balanced test set
75
+ rng = np.random.RandomState(999)
76
+ X_test, y_test = self._generate_test_set(rng, n_samples=300)
77
+ all_X.append(X_test)
78
+
79
+ # Global z-score normalization
80
+ all_data = np.vstack(all_X)
81
+ self._global_mean = all_data.mean(axis=0)
82
+ self._global_std = all_data.std(axis=0) + 1e-8
83
+
84
+ # Create devices with normalized data
85
+ self.devices = []
86
+ for i in range(self.n_devices):
87
+ device = LocalTrainer(device_id=f"device_{i}", input_dim=INPUT_DIM)
88
+ X = all_X[i]
89
+ y = all_y[i]
90
+ X_norm = (X - self._global_mean) / self._global_std
91
+ for j in range(X_norm.shape[0]):
92
+ device.ingest_call_data(X_norm[j], int(y[j]))
93
+ self.devices.append(device)
94
+
95
+ # Normalized test set
96
+ self.test_features = (X_test - self._global_mean) / self._global_std
97
+ self.test_labels = y_test
98
+
99
+ def _generate_device_data(self, device_idx: int,
100
+ n_samples: int = 100) -> tuple:
101
+ """Generate synthetic 402-dim feature vectors for a device.
102
+
103
+ Scam vectors: positive bias in the first half of dimensions.
104
+ Legit vectors: negative bias in the first half.
105
+ Each device gets different class distributions (non-IID).
106
+ """
107
+ rng = np.random.RandomState(42 + device_idx * 1000)
108
+
109
+ device_profiles = {
110
+ 0: {"scam_rate": 0.60, "irs": 0.70, "tech": 0.10, "bank": 0.10, "generic": 0.10},
111
+ 1: {"scam_rate": 0.55, "irs": 0.10, "tech": 0.65, "bank": 0.10, "generic": 0.15},
112
+ 2: {"scam_rate": 0.50, "irs": 0.25, "tech": 0.25, "bank": 0.25, "generic": 0.25},
113
+ 3: {"scam_rate": 0.15, "irs": 0.25, "tech": 0.25, "bank": 0.25, "generic": 0.25},
114
+ 4: {"scam_rate": 0.55, "irs": 0.05, "tech": 0.10, "bank": 0.70, "generic": 0.15},
115
+ }
116
+ profile = device_profiles.get(device_idx, {
117
+ "scam_rate": rng.uniform(0.3, 0.6),
118
+ "irs": 0.25, "tech": 0.25, "bank": 0.25, "generic": 0.25,
119
+ })
120
+
121
+ X = np.zeros((n_samples, INPUT_DIM))
122
+ y = np.zeros(n_samples, dtype=int)
123
+
124
+ for i in range(n_samples):
125
+ is_scam = rng.random() < profile["scam_rate"]
126
+ y[i] = 1 if is_scam else 0
127
+
128
+ if is_scam:
129
+ scam_type = rng.choice(
130
+ ["irs", "tech", "bank", "generic"],
131
+ p=[profile["irs"], profile["tech"],
132
+ profile["bank"], profile["generic"]],
133
+ )
134
+ X[i] = self._make_scam_vector(rng, scam_type)
135
+ else:
136
+ X[i] = self._make_legit_vector(rng)
137
+
138
+ return X, y
139
+
140
+ # ------------------------------------------------------------------
141
+ # Synthetic feature vector generators
142
+ # ------------------------------------------------------------------
143
+
144
+ def _make_scam_vector(self, rng: np.random.RandomState,
145
+ scam_type: str) -> np.ndarray:
146
+ """Create a 402-dim feature vector for a scam call.
147
+
148
+ The discriminative signal is sparse: only a small subset of
149
+ features carry class information, embedded in high-dimensional
150
+ noise. This makes the classification problem realistically
151
+ difficult for federated learning with DP.
152
+ """
153
+ n = INPUT_DIM
154
+ v = rng.normal(0.0, 0.5, size=n) # lower background noise
155
+
156
+ # Strong discriminative signal in the first 30 features
157
+ signal_end = 30
158
+ v[:signal_end] += rng.normal(2.0, 0.5, size=signal_end)
159
+
160
+ # Scam-type-specific sub-patterns
161
+ type_start = 30
162
+ type_block = 10
163
+ offsets = {"irs": 0, "tech": 1, "bank": 2, "generic": 3}
164
+ idx = offsets.get(scam_type, 3)
165
+ start = type_start + idx * type_block
166
+ v[start:start + type_block] += rng.normal(1.5, 0.4, size=type_block)
167
+
168
+ return v
169
+
170
+ def _make_legit_vector(self, rng: np.random.RandomState) -> np.ndarray:
171
+ """Create a 402-dim feature vector for a legitimate call.
172
+
173
+ Negative bias in the same sparse feature block that scam
174
+ vectors use, so the MLP must learn to separate in that subspace.
175
+ """
176
+ n = INPUT_DIM
177
+ v = rng.normal(0.0, 0.5, size=n) # lower background noise
178
+
179
+ # Opposite signal in the discriminative block
180
+ signal_end = 30
181
+ v[:signal_end] += rng.normal(-2.0, 0.5, size=signal_end)
182
+
183
+ return v
184
+
185
+ def _generate_test_set(self, rng: np.random.RandomState,
186
+ n_samples: int = 300) -> tuple:
187
+ """Generate a balanced test set (50/50 scam/legit)."""
188
+ n_half = n_samples // 2
189
+ X = np.zeros((n_samples, INPUT_DIM))
190
+ y = np.zeros(n_samples, dtype=int)
191
+
192
+ scam_types = ["irs", "tech", "bank", "generic"]
193
+ for i in range(n_half):
194
+ stype = rng.choice(scam_types)
195
+ X[i] = self._make_scam_vector(rng, stype)
196
+ y[i] = 1
197
+
198
+ for i in range(n_half, n_samples):
199
+ X[i] = self._make_legit_vector(rng)
200
+ y[i] = 0
201
+
202
+ perm = rng.permutation(n_samples)
203
+ return X[perm], y[perm]
204
+
205
+ # ------------------------------------------------------------------
206
+ # Global model
207
+ # ------------------------------------------------------------------
208
+
209
+ def initialize_global_model(self):
210
+ """Initialize global MLPClassifier with random weights."""
211
+ self.global_model = MLPClassifier(input_dim=INPUT_DIM)
212
+
213
+ # ------------------------------------------------------------------
214
+ # Federated round
215
+ # ------------------------------------------------------------------
216
+
217
+ def run_round(self, round_num: int) -> dict:
218
+ """Execute one federated round.
219
+
220
+ 1. Each device fine-tunes on its local data (real backprop)
221
+ 2. Compute gradient delta
222
+ 3. Add DP noise (if enabled)
223
+ 4. Hub: FedAvg weighted by n_samples
224
+ 5. Update global model
225
+ 6. Evaluate on test set using real MLP forward pass
226
+ """
227
+ global_weights = self.global_model.get_weights()
228
+ updates = []
229
+ device_sigmas = []
230
+
231
+ for device in self.devices:
232
+ n_local = device.buffer.size()
233
+ if n_local == 0:
234
+ continue
235
+
236
+ # Fine-tune locally with aggressive local training --
237
+ # high lr (0.5) and 20 epochs needed to produce a gradient
238
+ # delta large enough to survive DP noise and FedAvg averaging
239
+ delta = device.fine_tune(global_weights, lr=0.5, n_epochs=20)
240
+
241
+ if self.use_dp:
242
+ # DP noise injection
243
+ noised_delta, sigma, eps_round = self.dp_injector.add_noise(
244
+ delta, n_local
245
+ )
246
+ device_sigmas.append(sigma)
247
+ updates.append((noised_delta, n_local))
248
+ else:
249
+ updates.append((delta, n_local))
250
+
251
+ device.current_model_version = round_num + 1
252
+
253
+ if len(updates) == 0:
254
+ metrics = self._evaluate()
255
+ metrics.update({
256
+ "round": round_num,
257
+ "n_devices": 0,
258
+ "epsilon_spent": self.dp_injector.privacy_budget_spent(
259
+ round_num + 1
260
+ ) if self.use_dp else 0.0,
261
+ "avg_sigma": 0.0,
262
+ })
263
+ return metrics
264
+
265
+ # FedAvg aggregation
266
+ aggregated_delta = self._fedavg_aggregate(updates)
267
+
268
+ # Apply aggregated update to global model (server lr = 1.0, no inflation)
269
+ new_weights = global_weights + aggregated_delta
270
+ self.global_model.set_weights(new_weights)
271
+
272
+ # Evaluate
273
+ metrics = self._evaluate()
274
+ metrics.update({
275
+ "round": round_num,
276
+ "n_devices": len(updates),
277
+ "epsilon_spent": self.dp_injector.privacy_budget_spent(
278
+ round_num + 1
279
+ ) if self.use_dp else 0.0,
280
+ "avg_sigma": float(np.mean(device_sigmas)) if device_sigmas else 0.0,
281
+ })
282
+
283
+ # Inject fresh data each round to simulate ongoing call activity
284
+ self._add_round_data(round_num)
285
+
286
+ return metrics
287
+
288
+ def _add_round_data(self, round_num: int):
289
+ """Add new training samples each round to simulate ongoing calls."""
290
+ extra = 30 + round_num * 10
291
+ profiles = {0: 0.60, 1: 0.55, 2: 0.50, 3: 0.15, 4: 0.55}
292
+ scam_types = ["irs", "tech", "bank", "generic"]
293
+
294
+ for i, device in enumerate(self.devices):
295
+ rng = np.random.RandomState(42 + i * 1000 + (round_num + 1) * 500)
296
+ scam_rate = profiles.get(i, 0.4)
297
+
298
+ for j in range(extra):
299
+ is_scam = rng.random() < scam_rate
300
+ if is_scam:
301
+ stype = rng.choice(scam_types)
302
+ vec = self._make_scam_vector(rng, stype)
303
+ label = 1
304
+ else:
305
+ vec = self._make_legit_vector(rng)
306
+ label = 0
307
+ # Apply global normalization
308
+ vec = (vec - self._global_mean) / self._global_std
309
+ device.ingest_call_data(vec, label)
310
+
311
+ # ------------------------------------------------------------------
312
+ # FedAvg aggregation
313
+ # ------------------------------------------------------------------
314
+
315
+ def _fedavg_aggregate(self, updates: list) -> np.ndarray:
316
+ """FedAvg: weighted mean of gradient deltas.
317
+
318
+ G_global = sum(n_i * G_i) / sum(n_i)
319
+ """
320
+ total_samples = sum(n for _, n in updates)
321
+ if total_samples == 0:
322
+ return np.zeros_like(updates[0][0])
323
+
324
+ weighted_sum = np.zeros_like(updates[0][0])
325
+ for delta, n_i in updates:
326
+ weighted_sum += n_i * delta
327
+ return weighted_sum / total_samples
328
+
329
+ # ------------------------------------------------------------------
330
+ # Evaluation
331
+ # ------------------------------------------------------------------
332
+
333
+ def _evaluate(self) -> dict:
334
+ """Evaluate global MLP on test set.
335
+
336
+ Uses the real MLP forward pass (not a linear classifier).
337
+ Returns accuracy, precision, recall, F1.
338
+ """
339
+ X = self.test_features # already globally normalized
340
+ y = self.test_labels
341
+
342
+ # Forward pass through the real MLP
343
+ probs = self.global_model.forward(X)
344
+ if isinstance(probs, float):
345
+ probs = np.array([probs])
346
+ preds = (probs >= 0.5).astype(int)
347
+
348
+ tp = int(np.sum((preds == 1) & (y == 1)))
349
+ tn = int(np.sum((preds == 0) & (y == 0)))
350
+ fp = int(np.sum((preds == 1) & (y == 0)))
351
+ fn = int(np.sum((preds == 0) & (y == 1)))
352
+
353
+ accuracy = (tp + tn) / max(tp + tn + fp + fn, 1)
354
+ precision = tp / max(tp + fp, 1)
355
+ recall = tp / max(tp + fn, 1)
356
+ f1 = 2 * precision * recall / max(precision + recall, 1e-8)
357
+
358
+ return {
359
+ "accuracy": float(accuracy),
360
+ "precision": float(precision),
361
+ "recall": float(recall),
362
+ "f1": float(f1),
363
+ }
364
+
365
+ # ------------------------------------------------------------------
366
+ # Main run loop
367
+ # ------------------------------------------------------------------
368
+
369
+ def run(self) -> list:
370
+ """Run full simulation: initialize + all rounds."""
371
+ self.initialize()
372
+ self.initialize_global_model()
373
+
374
+ dp_label = f"epsilon={self.dp_injector.epsilon}" if self.use_dp else "OFF"
375
+ print(f"\n{'='*60}")
376
+ print(f"SentinelEdge Federated Learning Simulation (MLP)")
377
+ print(f"Devices: {self.n_devices} | Rounds: {self.n_rounds}")
378
+ print(f"Differential Privacy: {dp_label}")
379
+ print(f"MLP: {INPUT_DIM} -> 128 -> 64 -> 1")
380
+ print(f"{'='*60}\n")
381
+
382
+ for r in range(self.n_rounds):
383
+ result = self.run_round(r)
384
+ self.round_results.append(result)
385
+
386
+ print(f"Round {r+1}/{self.n_rounds}:")
387
+ print(f" Accuracy: {result['accuracy']:.4f}")
388
+ print(f" Precision: {result['precision']:.4f}")
389
+ print(f" Recall: {result['recall']:.4f}")
390
+ print(f" F1 Score: {result['f1']:.4f}")
391
+ print(f" Devices: {result['n_devices']}")
392
+ if self.use_dp:
393
+ print(f" Epsilon: {result['epsilon_spent']:.4f}")
394
+ print(f" Avg sigma: {result['avg_sigma']:.6f}")
395
+ print()
396
+
397
+ return self.round_results
398
+
399
+
400
+ def run_dp_comparison(n_devices: int = 5, n_rounds: int = 10) -> dict:
401
+ """Run the simulation twice: with DP and without DP.
402
+
403
+ Returns a dict with keys 'with_dp' and 'without_dp', each containing
404
+ the list of round results. Used by visualization.py for comparison plots.
405
+ """
406
+ print("=" * 60)
407
+ print(" RUNNING COMPARISON: WITH DP vs WITHOUT DP")
408
+ print("=" * 60)
409
+
410
+ # Run WITH DP
411
+ np.random.seed(42)
412
+ sim_dp = FederatedSimulation(
413
+ n_devices=n_devices, n_rounds=n_rounds,
414
+ epsilon=0.3, use_dp=True,
415
+ )
416
+ results_dp = sim_dp.run()
417
+
418
+ # Run WITHOUT DP
419
+ np.random.seed(42)
420
+ sim_no_dp = FederatedSimulation(
421
+ n_devices=n_devices, n_rounds=n_rounds,
422
+ epsilon=0.3, use_dp=False,
423
+ )
424
+ results_no_dp = sim_no_dp.run()
425
+
426
+ return {"with_dp": results_dp, "without_dp": results_no_dp}
427
+
428
+
429
+ def main():
430
+ parser = argparse.ArgumentParser(
431
+ description="Run federated learning simulation with real MLP"
432
+ )
433
+ parser.add_argument("--devices", type=int, default=5,
434
+ help="Number of simulated devices")
435
+ parser.add_argument("--rounds", type=int, default=10,
436
+ help="Number of federated rounds")
437
+ parser.add_argument("--compare", action="store_true",
438
+ help="Run DP vs no-DP comparison")
439
+ args = parser.parse_args()
440
+
441
+ output_dir = os.path.dirname(os.path.abspath(__file__))
442
+
443
+ if args.compare:
444
+ comparison = run_dp_comparison(
445
+ n_devices=args.devices, n_rounds=args.rounds
446
+ )
447
+ output_path = os.path.join(output_dir, "simulation_results.json")
448
+ serializable = {
449
+ "with_dp": _make_serializable(comparison["with_dp"]),
450
+ "without_dp": _make_serializable(comparison["without_dp"]),
451
+ }
452
+ with open(output_path, "w") as f:
453
+ json.dump(serializable, f, indent=2)
454
+ print(f"\nSaved comparison results to {output_path}")
455
+
456
+ # Also generate plots
457
+ try:
458
+ from federated.visualization import (
459
+ plot_accuracy_over_rounds, plot_dp_comparison,
460
+ )
461
+ plot_accuracy_over_rounds(
462
+ comparison["with_dp"],
463
+ output_path=os.path.join(output_dir, "federated_results.png"),
464
+ )
465
+ plot_dp_comparison(
466
+ comparison,
467
+ output_path=os.path.join(output_dir, "dp_comparison.png"),
468
+ )
469
+ except ImportError:
470
+ print("(Skipping plots: matplotlib not available)")
471
+ else:
472
+ np.random.seed(42)
473
+ sim = FederatedSimulation(
474
+ n_devices=args.devices, n_rounds=args.rounds
475
+ )
476
+ results = sim.run()
477
+
478
+ output_path = os.path.join(output_dir, "simulation_results.json")
479
+ serializable = _make_serializable(results)
480
+ with open(output_path, "w") as f:
481
+ json.dump(serializable, f, indent=2)
482
+ print(f"\nSaved results to {output_path}")
483
+
484
+ # Generate plot
485
+ try:
486
+ from federated.visualization import plot_accuracy_over_rounds
487
+ plot_accuracy_over_rounds(
488
+ results,
489
+ output_path=os.path.join(output_dir, "federated_results.png"),
490
+ )
491
+ except ImportError:
492
+ print("(Skipping plot: matplotlib not available)")
493
+
494
+
495
+ def _make_serializable(results: list) -> list:
496
+ """Convert numpy types to JSON-serializable Python types."""
497
+ out = []
498
+ for r in results:
499
+ out.append({
500
+ k: float(v) if isinstance(v, (np.floating, float)) else v
501
+ for k, v in r.items()
502
+ })
503
+ return out
504
+
505
+
506
+ if __name__ == "__main__":
507
+ main()
federated/simulation_results.json ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "with_dp": [
3
+ {
4
+ "accuracy": 0.72,
5
+ "precision": 0.83,
6
+ "recall": 0.5533333333333333,
7
+ "f1": 0.664,
8
+ "round": 0,
9
+ "n_devices": 5,
10
+ "epsilon_spent": 1.5445154159292251,
11
+ "avg_sigma": 0.16149350875351298
12
+ },
13
+ {
14
+ "accuracy": 0.9633333333333334,
15
+ "precision": 1.0,
16
+ "recall": 0.9266666666666666,
17
+ "f1": 0.9619377162629758,
18
+ "round": 1,
19
+ "n_devices": 5,
20
+ "epsilon_spent": 2.245757411870135,
21
+ "avg_sigma": 0.12422577596424074
22
+ },
23
+ {
24
+ "accuracy": 1.0,
25
+ "precision": 1.0,
26
+ "recall": 1.0,
27
+ "f1": 1.0,
28
+ "round": 2,
29
+ "n_devices": 5,
30
+ "epsilon_spent": 2.8082601312220676,
31
+ "avg_sigma": 0.09499618161971352
32
+ },
33
+ {
34
+ "accuracy": 1.0,
35
+ "precision": 1.0,
36
+ "recall": 1.0,
37
+ "f1": 1.0,
38
+ "round": 3,
39
+ "n_devices": 5,
40
+ "epsilon_spent": 3.298946116404052,
41
+ "avg_sigma": 0.0734061403425059
42
+ },
43
+ {
44
+ "accuracy": 1.0,
45
+ "precision": 1.0,
46
+ "recall": 1.0,
47
+ "f1": 1.0,
48
+ "round": 4,
49
+ "n_devices": 5,
50
+ "epsilon_spent": 3.7437372507980253,
51
+ "avg_sigma": 0.05767625312625464
52
+ },
53
+ {
54
+ "accuracy": 1.0,
55
+ "precision": 1.0,
56
+ "recall": 1.0,
57
+ "f1": 1.0,
58
+ "round": 5,
59
+ "n_devices": 5,
60
+ "epsilon_spent": 4.155927854352004,
61
+ "avg_sigma": 0.046141002501003704
62
+ },
63
+ {
64
+ "accuracy": 1.0,
65
+ "precision": 1.0,
66
+ "recall": 1.0,
67
+ "f1": 1.0,
68
+ "round": 6,
69
+ "n_devices": 5,
70
+ "epsilon_spent": 4.543415362914315,
71
+ "avg_sigma": 0.03755662994267744
72
+ },
73
+ {
74
+ "accuracy": 1.0,
75
+ "precision": 1.0,
76
+ "recall": 1.0,
77
+ "f1": 1.0,
78
+ "round": 7,
79
+ "n_devices": 5,
80
+ "epsilon_spent": 4.911345392831474,
81
+ "avg_sigma": 0.032298701750702596
82
+ },
83
+ {
84
+ "accuracy": 1.0,
85
+ "precision": 1.0,
86
+ "recall": 1.0,
87
+ "f1": 1.0,
88
+ "round": 8,
89
+ "n_devices": 5,
90
+ "epsilon_spent": 5.263292101424481,
91
+ "avg_sigma": 0.032298701750702596
92
+ },
93
+ {
94
+ "accuracy": 1.0,
95
+ "precision": 1.0,
96
+ "recall": 1.0,
97
+ "f1": 1.0,
98
+ "round": 9,
99
+ "n_devices": 5,
100
+ "epsilon_spent": 5.601857810883448,
101
+ "avg_sigma": 0.032298701750702596
102
+ }
103
+ ],
104
+ "without_dp": [
105
+ {
106
+ "accuracy": 1.0,
107
+ "precision": 1.0,
108
+ "recall": 1.0,
109
+ "f1": 1.0,
110
+ "round": 0,
111
+ "n_devices": 5,
112
+ "epsilon_spent": 0.0,
113
+ "avg_sigma": 0.0
114
+ },
115
+ {
116
+ "accuracy": 1.0,
117
+ "precision": 1.0,
118
+ "recall": 1.0,
119
+ "f1": 1.0,
120
+ "round": 1,
121
+ "n_devices": 5,
122
+ "epsilon_spent": 0.0,
123
+ "avg_sigma": 0.0
124
+ },
125
+ {
126
+ "accuracy": 1.0,
127
+ "precision": 1.0,
128
+ "recall": 1.0,
129
+ "f1": 1.0,
130
+ "round": 2,
131
+ "n_devices": 5,
132
+ "epsilon_spent": 0.0,
133
+ "avg_sigma": 0.0
134
+ },
135
+ {
136
+ "accuracy": 1.0,
137
+ "precision": 1.0,
138
+ "recall": 1.0,
139
+ "f1": 1.0,
140
+ "round": 3,
141
+ "n_devices": 5,
142
+ "epsilon_spent": 0.0,
143
+ "avg_sigma": 0.0
144
+ },
145
+ {
146
+ "accuracy": 1.0,
147
+ "precision": 1.0,
148
+ "recall": 1.0,
149
+ "f1": 1.0,
150
+ "round": 4,
151
+ "n_devices": 5,
152
+ "epsilon_spent": 0.0,
153
+ "avg_sigma": 0.0
154
+ },
155
+ {
156
+ "accuracy": 1.0,
157
+ "precision": 1.0,
158
+ "recall": 1.0,
159
+ "f1": 1.0,
160
+ "round": 5,
161
+ "n_devices": 5,
162
+ "epsilon_spent": 0.0,
163
+ "avg_sigma": 0.0
164
+ },
165
+ {
166
+ "accuracy": 1.0,
167
+ "precision": 1.0,
168
+ "recall": 1.0,
169
+ "f1": 1.0,
170
+ "round": 6,
171
+ "n_devices": 5,
172
+ "epsilon_spent": 0.0,
173
+ "avg_sigma": 0.0
174
+ },
175
+ {
176
+ "accuracy": 1.0,
177
+ "precision": 1.0,
178
+ "recall": 1.0,
179
+ "f1": 1.0,
180
+ "round": 7,
181
+ "n_devices": 5,
182
+ "epsilon_spent": 0.0,
183
+ "avg_sigma": 0.0
184
+ },
185
+ {
186
+ "accuracy": 1.0,
187
+ "precision": 1.0,
188
+ "recall": 1.0,
189
+ "f1": 1.0,
190
+ "round": 8,
191
+ "n_devices": 5,
192
+ "epsilon_spent": 0.0,
193
+ "avg_sigma": 0.0
194
+ },
195
+ {
196
+ "accuracy": 1.0,
197
+ "precision": 1.0,
198
+ "recall": 1.0,
199
+ "f1": 1.0,
200
+ "round": 9,
201
+ "n_devices": 5,
202
+ "epsilon_spent": 0.0,
203
+ "avg_sigma": 0.0
204
+ }
205
+ ]
206
+ }
federated/visualization.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Visualize federated learning results."""
2
+ import json
3
+ import os
4
+
5
+
6
+ def plot_accuracy_over_rounds(results: list, output_path: str = "federated_results.png"):
7
+ """Plot accuracy, F1, precision, recall over federated rounds.
8
+
9
+ Shows:
10
+ - Line chart: accuracy, F1, precision, recall per round
11
+ - Bar chart: privacy budget consumed
12
+ """
13
+ import matplotlib
14
+ matplotlib.use("Agg")
15
+ import matplotlib.pyplot as plt
16
+
17
+ fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 6))
18
+
19
+ rounds = [r["round"] + 1 for r in results]
20
+ accuracies = [r["accuracy"] for r in results]
21
+ f1_scores = [r["f1"] for r in results]
22
+ precisions = [r["precision"] for r in results]
23
+ recalls = [r["recall"] for r in results]
24
+
25
+ # Plot 1: Metrics over rounds
26
+ ax1.plot(rounds, accuracies, "o-", color="#0D9488", linewidth=2,
27
+ markersize=6, label="Accuracy")
28
+ ax1.plot(rounds, f1_scores, "s-", color="#F59E0B", linewidth=2,
29
+ markersize=6, label="F1 Score")
30
+ ax1.plot(rounds, precisions, "^-", color="#3B82F6", linewidth=2,
31
+ markersize=5, label="Precision")
32
+ ax1.plot(rounds, recalls, "v-", color="#8B5CF6", linewidth=2,
33
+ markersize=5, label="Recall")
34
+
35
+ # Annotate first and last accuracy
36
+ ax1.annotate(
37
+ f"{accuracies[0]:.2f}",
38
+ xy=(rounds[0], accuracies[0]),
39
+ xytext=(rounds[0] + 0.3, accuracies[0] - 0.04),
40
+ fontsize=9, color="#0D9488",
41
+ )
42
+ ax1.annotate(
43
+ f"{accuracies[-1]:.2f}",
44
+ xy=(rounds[-1], accuracies[-1]),
45
+ xytext=(rounds[-1] - 0.8, accuracies[-1] + 0.03),
46
+ fontsize=9, color="#0D9488",
47
+ )
48
+
49
+ ax1.set_xlabel("Federated Round")
50
+ ax1.set_ylabel("Score")
51
+ ax1.set_title("MLP Model Improvement Over Federated Rounds")
52
+ ax1.legend(loc="lower right")
53
+ ax1.grid(True, alpha=0.3)
54
+ ax1.set_ylim(0.0, 1.05)
55
+ ax1.set_xticks(rounds)
56
+
57
+ # Plot 2: Privacy budget over rounds
58
+ epsilons = [r.get("epsilon_spent", 0) for r in results]
59
+ if any(e > 0 for e in epsilons):
60
+ bars = ax2.bar(rounds, epsilons, color="#EF4444", alpha=0.7)
61
+ ax2.set_ylabel("Cumulative Privacy Budget (\u03b5)")
62
+ for bar, eps in zip(bars, epsilons):
63
+ ax2.text(
64
+ bar.get_x() + bar.get_width() / 2,
65
+ bar.get_height() + 0.01,
66
+ f"{eps:.2f}",
67
+ ha="center", va="bottom", fontsize=8,
68
+ )
69
+ else:
70
+ ax2.text(0.5, 0.5, "No DP applied", ha="center", va="center",
71
+ transform=ax2.transAxes, fontsize=14, color="gray")
72
+
73
+ ax2.set_xlabel("Federated Round")
74
+ ax2.set_title("Privacy Budget Consumption")
75
+ ax2.grid(True, alpha=0.3)
76
+ ax2.set_xticks(rounds)
77
+
78
+ plt.tight_layout()
79
+ plt.savefig(output_path, dpi=150, bbox_inches="tight")
80
+ print(f"Saved plot to {output_path}")
81
+ plt.close()
82
+
83
+
84
+ def plot_dp_comparison(comparison: dict, output_path: str = "dp_comparison.png"):
85
+ """Plot accuracy with DP vs without DP side by side.
86
+
87
+ Args:
88
+ comparison: dict with keys 'with_dp' and 'without_dp',
89
+ each a list of round result dicts.
90
+ output_path: Where to save the figure.
91
+ """
92
+ import matplotlib
93
+ matplotlib.use("Agg")
94
+ import matplotlib.pyplot as plt
95
+
96
+ results_dp = comparison["with_dp"]
97
+ results_no_dp = comparison["without_dp"]
98
+
99
+ rounds_dp = [r["round"] + 1 for r in results_dp]
100
+ rounds_no_dp = [r["round"] + 1 for r in results_no_dp]
101
+
102
+ fig, axes = plt.subplots(1, 2, figsize=(14, 6))
103
+
104
+ # Plot 1: Accuracy comparison
105
+ ax = axes[0]
106
+ ax.plot(rounds_dp, [r["accuracy"] for r in results_dp],
107
+ "o-", color="#EF4444", linewidth=2, markersize=6,
108
+ label="With DP (\u03b5=0.3)")
109
+ ax.plot(rounds_no_dp, [r["accuracy"] for r in results_no_dp],
110
+ "s-", color="#0D9488", linewidth=2, markersize=6,
111
+ label="Without DP")
112
+ ax.set_xlabel("Federated Round")
113
+ ax.set_ylabel("Accuracy")
114
+ ax.set_title("Accuracy: DP vs No-DP")
115
+ ax.legend()
116
+ ax.grid(True, alpha=0.3)
117
+ ax.set_ylim(0.0, 1.05)
118
+ ax.set_xticks(rounds_dp)
119
+
120
+ # Annotate final values
121
+ acc_dp_final = results_dp[-1]["accuracy"]
122
+ acc_no_dp_final = results_no_dp[-1]["accuracy"]
123
+ ax.annotate(f"{acc_dp_final:.3f}", xy=(rounds_dp[-1], acc_dp_final),
124
+ xytext=(rounds_dp[-1] - 1.5, acc_dp_final - 0.05),
125
+ fontsize=9, color="#EF4444")
126
+ ax.annotate(f"{acc_no_dp_final:.3f}", xy=(rounds_no_dp[-1], acc_no_dp_final),
127
+ xytext=(rounds_no_dp[-1] - 1.5, acc_no_dp_final + 0.03),
128
+ fontsize=9, color="#0D9488")
129
+
130
+ # Plot 2: F1 comparison
131
+ ax = axes[1]
132
+ ax.plot(rounds_dp, [r["f1"] for r in results_dp],
133
+ "o-", color="#EF4444", linewidth=2, markersize=6,
134
+ label="With DP (\u03b5=0.3)")
135
+ ax.plot(rounds_no_dp, [r["f1"] for r in results_no_dp],
136
+ "s-", color="#0D9488", linewidth=2, markersize=6,
137
+ label="Without DP")
138
+ ax.set_xlabel("Federated Round")
139
+ ax.set_ylabel("F1 Score")
140
+ ax.set_title("F1 Score: DP vs No-DP")
141
+ ax.legend()
142
+ ax.grid(True, alpha=0.3)
143
+ ax.set_ylim(0.0, 1.05)
144
+ ax.set_xticks(rounds_dp)
145
+
146
+ plt.tight_layout()
147
+ plt.savefig(output_path, dpi=150, bbox_inches="tight")
148
+ print(f"Saved DP comparison plot to {output_path}")
149
+ plt.close()
150
+
151
+
152
+ def print_summary(results: list):
153
+ """Print a text summary of simulation results."""
154
+ print("\n" + "=" * 60)
155
+ print("FEDERATED LEARNING SIMULATION SUMMARY")
156
+ print("=" * 60)
157
+
158
+ for r in results:
159
+ print(f"\nRound {r['round']+1}:")
160
+ print(f" Accuracy: {r['accuracy']:.4f}")
161
+ print(f" F1 Score: {r['f1']:.4f}")
162
+ print(f" Precision: {r.get('precision', 0):.4f}")
163
+ print(f" Recall: {r.get('recall', 0):.4f}")
164
+ print(f" Devices: {r['n_devices']}")
165
+ eps = r.get('epsilon_spent', 0)
166
+ if eps > 0:
167
+ print(f" \u03b5 spent: {eps:.4f}")
168
+
169
+ first = results[0]
170
+ last = results[-1]
171
+ acc_delta = last["accuracy"] - first["accuracy"]
172
+ f1_delta = last["f1"] - first["f1"]
173
+ print(
174
+ f"\nImprovement: accuracy {first['accuracy']:.4f} -> "
175
+ f"{last['accuracy']:.4f} ({acc_delta:+.4f})"
176
+ )
177
+ print(
178
+ f" F1 {first['f1']:.4f} -> "
179
+ f"{last['f1']:.4f} ({f1_delta:+.4f})"
180
+ )
181
+
182
+
183
+ if __name__ == "__main__":
184
+ results_path = os.path.join(
185
+ os.path.dirname(os.path.abspath(__file__)), "simulation_results.json"
186
+ )
187
+ if os.path.exists(results_path):
188
+ with open(results_path) as f:
189
+ data = json.load(f)
190
+
191
+ # Handle both formats: list (single run) or dict (comparison)
192
+ if isinstance(data, list):
193
+ results = data
194
+ print_summary(results)
195
+ plot_accuracy_over_rounds(
196
+ results,
197
+ output_path=os.path.join(
198
+ os.path.dirname(os.path.abspath(__file__)),
199
+ "federated_results.png",
200
+ ),
201
+ )
202
+ elif isinstance(data, dict):
203
+ # Comparison format
204
+ if "with_dp" in data:
205
+ print("\n--- WITH DP ---")
206
+ print_summary(data["with_dp"])
207
+ print("\n--- WITHOUT DP ---")
208
+ print_summary(data["without_dp"])
209
+ plot_dp_comparison(
210
+ data,
211
+ output_path=os.path.join(
212
+ os.path.dirname(os.path.abspath(__file__)),
213
+ "dp_comparison.png",
214
+ ),
215
+ )
216
+ else:
217
+ print("Unknown results format.")
218
+ else:
219
+ print("No simulation results found. Run simulate.py first.")
hub/__init__.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """SentinelEdge Hub - Federated aggregation server."""
2
+
3
+ from .schemas import (
4
+ FederatedUpdate,
5
+ AggregationResponse,
6
+ DeviceRegistration,
7
+ DeviceRegistrationResponse,
8
+ ModelVersionInfo,
9
+ GlobalMetrics,
10
+ RoundStatus,
11
+ )
12
+
13
+ __all__ = [
14
+ "FederatedUpdate",
15
+ "AggregationResponse",
16
+ "DeviceRegistration",
17
+ "DeviceRegistrationResponse",
18
+ "ModelVersionInfo",
19
+ "GlobalMetrics",
20
+ "RoundStatus",
21
+ ]
hub/aggregator.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Federated Averaging aggregator with Byzantine fault detection."""
2
+
3
+ import logging
4
+
5
+ import numpy as np
6
+
7
+ from .schemas import FederatedUpdate
8
+
9
+ logger = logging.getLogger(__name__)
10
+
11
+
12
+ class FedAvgAggregator:
13
+ """Federated Averaging: weighted mean of DP-noised gradient deltas.
14
+
15
+ Implements the FedAvg algorithm from McMahan et al. (2017) with
16
+ additional outlier detection for Byzantine fault tolerance.
17
+ """
18
+
19
+ def aggregate(self, updates: list[FederatedUpdate]) -> np.ndarray:
20
+ """Compute weighted average of gradient deltas.
21
+
22
+ Weight = n_samples for each device.
23
+ G_global = sum(n_i * G_i) / sum(n_i)
24
+
25
+ Args:
26
+ updates: List of federated updates from edge devices.
27
+
28
+ Returns:
29
+ Weighted average gradient delta as a numpy array.
30
+
31
+ Raises:
32
+ ValueError: If updates list is empty or gradient dimensions mismatch.
33
+ """
34
+ if not updates:
35
+ raise ValueError("Cannot aggregate empty updates list")
36
+
37
+ dim = len(updates[0].gradient_delta)
38
+ for i, u in enumerate(updates):
39
+ if len(u.gradient_delta) != dim:
40
+ raise ValueError(
41
+ f"Gradient dimension mismatch: update 0 has {dim}, "
42
+ f"update {i} has {len(u.gradient_delta)}"
43
+ )
44
+
45
+ total_samples = sum(u.n_samples for u in updates)
46
+ if total_samples == 0:
47
+ raise ValueError("Total samples across all updates is zero")
48
+
49
+ weighted_sum = np.zeros(dim, dtype=np.float64)
50
+ for update in updates:
51
+ delta = np.array(update.gradient_delta, dtype=np.float64)
52
+ weighted_sum += update.n_samples * delta
53
+
54
+ global_delta = weighted_sum / total_samples
55
+
56
+ logger.info(
57
+ "Aggregated %d updates (%d total samples), "
58
+ "delta norm=%.6f",
59
+ len(updates),
60
+ total_samples,
61
+ float(np.linalg.norm(global_delta)),
62
+ )
63
+ return global_delta
64
+
65
+ def aggregate_feature_importances(
66
+ self, updates: list[FederatedUpdate]
67
+ ) -> np.ndarray:
68
+ """Weighted averaging for feature importances.
69
+
70
+ Same weighting scheme as gradient aggregation: weight = n_samples.
71
+
72
+ Args:
73
+ updates: List of federated updates from edge devices.
74
+
75
+ Returns:
76
+ Weighted average feature importances as a numpy array.
77
+
78
+ Raises:
79
+ ValueError: If updates list is empty or dimensions mismatch.
80
+ """
81
+ if not updates:
82
+ raise ValueError("Cannot aggregate empty updates list")
83
+
84
+ dim = len(updates[0].feature_importances)
85
+ for i, u in enumerate(updates):
86
+ if len(u.feature_importances) != dim:
87
+ raise ValueError(
88
+ f"Feature importance dimension mismatch: update 0 has {dim}, "
89
+ f"update {i} has {len(u.feature_importances)}"
90
+ )
91
+
92
+ total_samples = sum(u.n_samples for u in updates)
93
+ if total_samples == 0:
94
+ raise ValueError("Total samples across all updates is zero")
95
+
96
+ weighted_sum = np.zeros(dim, dtype=np.float64)
97
+ for update in updates:
98
+ importances = np.array(update.feature_importances, dtype=np.float64)
99
+ weighted_sum += update.n_samples * importances
100
+
101
+ global_importances = weighted_sum / total_samples
102
+ return global_importances
103
+
104
+ def detect_outliers(
105
+ self, updates: list[FederatedUpdate], threshold: float = 3.0
106
+ ) -> list[int]:
107
+ """Detect potential Byzantine/poisoning updates.
108
+
109
+ Flag updates where the gradient L2 norm is more than `threshold`
110
+ standard deviations from the mean norm across all updates.
111
+
112
+ Args:
113
+ updates: List of federated updates from edge devices.
114
+ threshold: Number of standard deviations for outlier detection.
115
+
116
+ Returns:
117
+ List of indices of suspicious updates.
118
+ """
119
+ if len(updates) < 2:
120
+ return []
121
+
122
+ norms = np.array(
123
+ [np.linalg.norm(u.gradient_delta) for u in updates], dtype=np.float64
124
+ )
125
+
126
+ mean_norm = float(np.mean(norms))
127
+ std_norm = float(np.std(norms))
128
+
129
+ if std_norm < 1e-10:
130
+ # All norms are essentially the same -- no outliers
131
+ return []
132
+
133
+ outlier_indices = []
134
+ for i, norm in enumerate(norms):
135
+ z_score = abs(norm - mean_norm) / std_norm
136
+ if z_score > threshold:
137
+ logger.warning(
138
+ "Outlier detected: update %d from device %s, "
139
+ "norm=%.4f, z_score=%.2f (threshold=%.2f)",
140
+ i,
141
+ updates[i].device_id,
142
+ norm,
143
+ z_score,
144
+ threshold,
145
+ )
146
+ outlier_indices.append(i)
147
+
148
+ return outlier_indices
149
+
150
+ def aggregate_safe(
151
+ self, updates: list[FederatedUpdate], outlier_threshold: float = 3.0
152
+ ) -> tuple[np.ndarray, np.ndarray, list[int]]:
153
+ """Aggregate with automatic outlier removal.
154
+
155
+ Detects outliers, removes them, then aggregates the remaining
156
+ updates. If too few updates remain after outlier removal, uses
157
+ all updates anyway with a warning.
158
+
159
+ Args:
160
+ updates: List of federated updates.
161
+ outlier_threshold: Z-score threshold for outlier detection.
162
+
163
+ Returns:
164
+ Tuple of (global_delta, feature_importances, outlier_indices).
165
+ """
166
+ outlier_indices = self.detect_outliers(updates, outlier_threshold)
167
+
168
+ if outlier_indices:
169
+ clean_updates = [
170
+ u for i, u in enumerate(updates) if i not in outlier_indices
171
+ ]
172
+ if len(clean_updates) < 2:
173
+ logger.warning(
174
+ "Too many outliers (%d/%d) -- using all updates",
175
+ len(outlier_indices),
176
+ len(updates),
177
+ )
178
+ clean_updates = updates
179
+ outlier_indices = []
180
+ else:
181
+ logger.info(
182
+ "Removed %d outlier updates, aggregating %d remaining",
183
+ len(outlier_indices),
184
+ len(clean_updates),
185
+ )
186
+ else:
187
+ clean_updates = updates
188
+
189
+ global_delta = self.aggregate(clean_updates)
190
+ feature_importances = self.aggregate_feature_importances(clean_updates)
191
+
192
+ return global_delta, feature_importances, outlier_indices