diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..9c83142145c4befa8d9aab39a10039e3cd251672
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,92 @@
+# =============================================================================
+# GitPilot - Hugging Face Spaces Dockerfile
+# =============================================================================
+# Follows the official HF Docker Spaces pattern:
+# https://huggingface.co/docs/hub/spaces-sdks-docker
+#
+# Architecture:
+# React UI (Vite build) -> FastAPI backend -> OllaBridge Cloud / any LLM
+# =============================================================================
+
+# -- Stage 1: Build React frontend -------------------------------------------
+FROM node:20-slim AS frontend-builder
+
+WORKDIR /build
+
+COPY frontend/package.json frontend/package-lock.json ./
+RUN npm ci --production=false
+
+COPY frontend/ ./
+RUN npm run build
+
+# -- Stage 2: Python runtime -------------------------------------------------
+FROM python:3.12-slim
+
+# System deps needed at runtime
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ git curl ca-certificates \
+ && rm -rf /var/lib/apt/lists/*
+
+# HF Spaces runs containers as UID 1000 — create user early (official pattern)
+RUN useradd -m -u 1000 user
+
+USER user
+
+ENV HOME=/home/user \
+ PATH=/home/user/.local/bin:$PATH \
+ PYTHONUNBUFFERED=1 \
+ GITPILOT_PROVIDER=ollabridge \
+ OLLABRIDGE_BASE_URL=https://ruslanmv-ollabridge.hf.space \
+ GITPILOT_OLLABRIDGE_MODEL=qwen2.5:1.5b \
+ CORS_ORIGINS="*" \
+ GITPILOT_CONFIG_DIR=/tmp/gitpilot
+
+WORKDIR $HOME/app
+
+# ── Install Python dependencies BEFORE copying source code ──────────
+# This ensures pip install layers are cached even when code changes.
+COPY --chown=user pyproject.toml README.md ./
+
+# Step 1: lightweight deps (cached layer)
+RUN pip install --no-cache-dir --upgrade pip && \
+ pip install --no-cache-dir \
+ "fastapi>=0.111.0" \
+ "uvicorn[standard]>=0.30.0" \
+ "httpx>=0.27.0" \
+ "python-dotenv>=1.1.0,<1.2.0" \
+ "typer>=0.12.0,<0.24.0" \
+ "pydantic>=2.7.0,<2.12.0" \
+ "rich>=13.0.0" \
+ "pyjwt[crypto]>=2.8.0"
+
+# Step 2: heavy ML/agent deps (separate layer for better caching)
+RUN pip install --no-cache-dir \
+ "litellm" \
+ "crewai[anthropic]>=0.76.9" \
+ "crewai-tools>=0.13.4" \
+ "anthropic>=0.39.0" \
+ "ibm-watsonx-ai>=1.1.0" \
+ "langchain-ibm>=0.3.0"
+
+# ── Now copy source code (cache-busting only affects layers below) ──
+COPY --chown=user gitpilot ./gitpilot
+
+# Copy built frontend into gitpilot/web/
+COPY --chown=user --from=frontend-builder /build/dist/ ./gitpilot/web/
+
+# Step 3: editable install of gitpilot itself (deps already satisfied)
+RUN pip install --no-cache-dir --no-deps -e .
+
+EXPOSE 7860
+
+# NOTE: Do NOT add a Docker HEALTHCHECK here.
+# HF Spaces has its own HTTP probe on app_port (7860) and ignores the
+# Docker HEALTHCHECK directive.
+
+# Direct CMD — no shell script, fewer failure points.
+CMD ["python", "-m", "uvicorn", "gitpilot.api:app", \
+ "--host", "0.0.0.0", \
+ "--port", "7860", \
+ "--workers", "2", \
+ "--limit-concurrency", "10", \
+ "--timeout-keep-alive", "120"]
diff --git a/README.md b/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..5de6067d4f4f2254ae1c5014e1c868aac1865212
--- /dev/null
+++ b/README.md
@@ -0,0 +1,80 @@
+---
+title: GitPilot
+emoji: "\U0001F916"
+colorFrom: blue
+colorTo: indigo
+sdk: docker
+app_port: 7860
+startup_duration_timeout: 5m
+pinned: true
+license: mit
+short_description: Enterprise AI Coding Assistant for GitHub Repositories
+---
+
+# GitPilot — Hugging Face Spaces
+
+**Enterprise-grade AI coding assistant** for GitHub repositories with multi-LLM support, visual workflow insights, and intelligent code analysis.
+
+## What This Does
+
+This Space runs the full GitPilot stack:
+1. **React Frontend** — Professional dark-theme UI with chat, file browser, and workflow visualization
+2. **FastAPI Backend** — 80+ API endpoints for repository management, AI chat, planning, and execution
+3. **Multi-Agent AI** — CrewAI orchestration with 7 switchable agent topologies
+
+## LLM Providers
+
+GitPilot connects to your favorite LLM provider. Configure in **Admin / LLM Settings**:
+
+| Provider | Default | API Key Required |
+|---|---|---|
+| **OllaBridge Cloud** (default) | `qwen2.5:1.5b` | No |
+| OpenAI | `gpt-4o-mini` | Yes |
+| Anthropic Claude | `claude-sonnet-4-5` | Yes |
+| Ollama (local) | `llama3` | No |
+| Custom endpoint | Any model | Optional |
+
+## Quick Start
+
+1. Open the Space UI
+2. Enter your **GitHub Token** (Settings -> GitHub)
+3. Select a repository from the sidebar
+4. Start chatting with your AI coding assistant
+
+## API Endpoints
+
+| Endpoint | Description |
+|---|---|
+| `GET /api/health` | Health check |
+| `POST /api/chat/message` | Chat with AI assistant |
+| `POST /api/chat/plan` | Generate implementation plan |
+| `GET /api/repos` | List repositories |
+| `GET /api/settings` | View/update settings |
+| `GET /docs` | Interactive API docs (Swagger) |
+
+## Connect to OllaBridge Cloud
+
+By default, GitPilot connects to [OllaBridge Cloud](https://huggingface.co/spaces/ruslanmv/ollabridge) for LLM inference. This provides free access to open-source models without needing API keys.
+
+To use your own OllaBridge instance:
+1. Go to **Admin / LLM Settings**
+2. Select **OllaBridge** provider
+3. Enter your OllaBridge URL and model
+
+## Environment Variables
+
+Configure via HF Spaces secrets:
+
+| Variable | Description | Default |
+|---|---|---|
+| `GITPILOT_PROVIDER` | LLM provider | `ollabridge` |
+| `OLLABRIDGE_BASE_URL` | OllaBridge Cloud URL | `https://ruslanmv-ollabridge.hf.space` |
+| `GITHUB_TOKEN` | GitHub personal access token | - |
+| `OPENAI_API_KEY` | OpenAI API key (if using OpenAI) | - |
+| `ANTHROPIC_API_KEY` | Anthropic API key (if using Claude) | - |
+
+## Links
+
+- [GitPilot Repository](https://github.com/ruslanmv/gitpilot)
+- [OllaBridge Cloud](https://huggingface.co/spaces/ruslanmv/ollabridge)
+- [Documentation](https://github.com/ruslanmv/gitpilot#readme)
diff --git a/REPO_README.md b/REPO_README.md
new file mode 100644
index 0000000000000000000000000000000000000000..62c078c50fc2200679db161fb2bb4432c973d2e2
--- /dev/null
+++ b/REPO_README.md
@@ -0,0 +1,418 @@
+
+
+
+
+# GitPilot
+
+### The first open-source multi-agent AI coding assistant.
+
+Multiple specialized agents — including Explorer, Planner, Coder, and Reviewer — collaborate seamlessly on every task. By default, GitPilot requests confirmation before executing high-impact actions. Switch to Auto or Plan mode at any time.
+
+[](https://pypi.org/project/gitcopilot/)
+[](https://www.python.org/)
+[](LICENSE)
+[](https://marketplace.visualstudio.com/)
+[](#contributing)
+
+[**Get Started**](#get-started) · [VS Code](#vs-code-extension) · [Web App](#web-app) · [How It Works](#how-it-works) · [Providers](#supported-ai-providers)
+
+
+
+---
+
+
+
+
+
+
+
+
+## Why GitPilot?
+
+Most AI coding tools are a **single model behind a chat box**. GitPilot is fundamentally different: it deploys a **team of four specialized AI agents** that collaborate on every task — just like a real engineering team.
+
+| Agent | Role | What it does |
+|---|---|---|
+| **Explorer** | Context | Reads your full repo, git log, test suite, and dependencies so the plan starts with real knowledge — not guesses |
+| **Planner** | Strategy | Drafts a safe, step-by-step plan with diffs and surfaces risks before any file is touched |
+| **Coder** | Execution | Writes code, runs your tests, and self-corrects on failure — iterating until the suite passes |
+| **Reviewer** | Quality | Validates the output, re-runs the suite, and drafts a commit message and PR summary |
+
+**You control how the agent runs.** Three execution modes — selectable per session from the VS Code compose bar or backend API:
+
+| Mode | Default? | Behavior |
+|---|---|---|
+| **Ask** | Yes | Prompts you before each dangerous action (write, edit, run, commit). You see the diff and click Allow / Deny. |
+| **Auto** | | Executes all tools automatically. Fastest for experienced users who trust the plan. |
+| **Plan** | | Read-only. Generates and displays the plan but blocks all file writes and commands. |
+
+Diffs are shown before they're applied. Tests run before anything is committed. No surprises.
+
+### What else sets GitPilot apart
+
+- 🧭 **Works where you work** — VS Code, web app, and CLI share one login, one history, and one set of approvals.
+- 🧠 **Any LLM, zero lock-in** — OpenAI, Anthropic Claude, IBM Watsonx, Ollama (local & free) or OllaBridge. Switch in settings, no code change.
+- 🔐 **Private by default** — run the entire stack locally with Ollama. No telemetry, no data leaves your machine.
+- 🏢 **Enterprise-ready, Apache 2.0 open source** — 854 passing tests, Docker & Hugging Face deployment recipes, audit the code yourself.
+- 🌍 **Runs anywhere** — laptop, private cloud, air-gapped environments, or managed hosting. Your repo, your rules.
+
+---
+
+## What is GitPilot?
+
+GitPilot is an AI assistant that helps you ship better code, faster — without giving up control. It understands your project, plans changes you can read before they happen, writes the code, runs your tests, and drafts the commit message and pull request for you.
+
+**Works with any language. Runs on any LLM.** Start free and local with Ollama, or bring your own OpenAI, Claude, or Watsonx key.
+
+```
+You: "Add input validation to the login form"
+
+GitPilot:
+ 1. Reading src/auth/login.ts...
+ 2. Planning 3 changes...
+ 3. Editing login.ts → [Apply Patch] [Revert]
+ 4. Running npm test... 3 passed
+ 5. Done — files written to your workspace.
+```
+
+---
+
+## Get Started
+
+### Option 1: VS Code Extension (recommended)
+
+Install the extension, configure your LLM, and start chatting:
+
+```
+1. Open VS Code
+2. Install "GitPilot Workspace" from Extensions
+3. Click the GitPilot icon in the sidebar
+4. Choose your AI provider (OpenAI, Claude, Ollama...)
+5. Start asking questions about your code
+```
+
+### Option 2: Web App
+
+Run the full web interface with Docker:
+
+```bash
+git clone https://github.com/ruslanmv/gitpilot.git
+cd gitpilot
+docker compose up
+```
+
+Open [http://localhost:3000](http://localhost:3000) in your browser.
+
+### Live Demo on Hugging Face
+
+Experience the application in action through our hosted demo environment:
+
+[](https://huggingface.co/spaces/ruslanmv/gitpilot)
+
+🔗 **Access the live demo:**
+[https://huggingface.co/spaces/ruslanmv/gitpilot](https://huggingface.co/spaces/ruslanmv/gitpilot)
+
+### Option 3: Python CLI (fastest)
+
+```bash
+pip install gitcopilot
+gitpilot serve
+```
+
+Open [http://localhost:8000](http://localhost:8000) and you're done.
+
+> **Heads up:** the PyPI package is published as **`gitcopilot`** (the name `gitpilot` was already taken) but the command you run is `gitpilot`. Python **3.11** or **3.12** required.
+
+---
+
+## VS Code Extension
+
+The sidebar panel gives you everything in one place:
+
+| Feature | What it does |
+|---|---|
+| **Chat** | Ask questions, request changes, review code |
+| **Execution Modes** | Bottom bar: `Auto` / `Ask` / `Plan` — controls agent permissions per session |
+| **Plan View** | See the step-by-step plan before changes are made |
+| **Plan Approval** | "Approve & Execute" / "Dismiss" bar — execution waits for your OK |
+| **Tool Approvals** | Per-action Allow / Allow for session / Deny cards (Ask mode) |
+| **Diff Preview** | Review proposed edits in VS Code's native diff viewer |
+| **Apply / Revert** | One click to apply changes, one click to undo |
+| **Quick Actions** | Explain, Review, Fix, Generate Tests, Security Scan |
+| **Smart Commit** | AI-generated commit messages |
+| **Code Lens** | Inline "Explain / Review" hints on functions |
+| **Settings Tab** | Branded settings page (General, Provider, Agent, Editor) |
+| **New Chat** | One click to clear chat and start a fresh session |
+
+### Execution modes
+
+The compose bar includes a mode selector that controls how the multi-agent pipeline runs:
+
+```
+[ Auto | Ask | Plan ] [ Send ] [ New Chat ]
+```
+
+| Mode | VS Code setting | Backend value | What happens |
+|---|---|---|---|
+| **Ask** (default) | `gitpilot.permissionMode: "normal"` | `"normal"` | Each dangerous tool (write, edit, run, commit) shows an approval card |
+| **Auto** | `gitpilot.permissionMode: "auto"` | `"auto"` | Tools execute automatically — no approval prompts |
+| **Plan** | `gitpilot.permissionMode: "plan"` | `"plan"` | Plan is generated and displayed, all writes/commands blocked |
+
+Mode changes are persisted to VS Code settings and synced to the backend via `PUT /api/permissions/mode`.
+
+### How approvals work
+
+```
+You send a request
+ → Explorer reads repo context
+ → Planner drafts step-by-step plan
+ → Plan appears in sidebar (Approve & Execute / Dismiss)
+ → You click Approve
+ → Coder begins execution
+ → Dangerous tool requested (e.g. write_file)
+ → Ask mode: approval card shown (Allow / Allow for session / Deny)
+ → Auto mode: executes immediately
+ → Plan mode: blocked
+ → Tests run, Reviewer validates
+ → Done — Apply Patch or Revert
+```
+
+> **Note:** Simple questions (e.g. "explain this code") may return a direct answer without generating a multi-step plan. This is expected — the planner activates for tasks that require file changes or multi-step execution.
+
+### Code generation and Apply Patch
+
+When you ask GitPilot to create or edit files, the response includes structured `edits` — not just text. The **Apply Patch** button writes them directly to your workspace.
+
+```
+You: "Create a Flask app with app.py, requirements.txt, and README.md"
+
+GitPilot:
+ → LLM generates 3 files with content
+ → Backend extracts structured edits (path + content)
+ → VS Code shows [Apply Patch] [Revert]
+ → You click Apply Patch
+ → 3 files written to disk
+ → Project context refreshes automatically
+ → First file opens in the editor
+```
+
+How it works under the hood:
+- The LLM is instructed to output code blocks with the filename on the fence line (` ```python hello.py`)
+- The backend parses these blocks into `ProposedEdit` objects with file path, kind, and content
+- All paths are sanitized (rejects `../` traversal, absolute paths, drive letters)
+- The extension stores edits in `activeTask.edits` and shows Apply / Revert
+- `PatchApplier` writes files via `vscode.workspace.fs.writeFile`
+- After apply, project context refreshes and the first file opens
+
+> **Note:** For folder-only sessions (no GitHub remote), code generation uses the LLM directly with structured output instructions. For GitHub-connected sessions, the full CrewAI multi-agent pipeline (Explorer → Planner → Coder → Reviewer) handles planning and execution.
+
+### Supported AI Providers
+
+| Provider | Setup | Free? |
+|---|---|---|
+| **Ollama** | Install Ollama, run `ollama pull llama3` | Yes |
+| **OllaBridge** | Works out of the box (cloud Ollama) | Yes |
+| **OpenAI** | Add your API key in settings | Paid |
+| **Claude** | Add your Anthropic API key | Paid |
+| **Watsonx** | Add IBM credentials | Paid |
+
+---
+
+## Web App
+
+The web interface includes:
+
+- Chat with real-time responses
+- GitHub integration (connect your repos)
+- File tree browser
+- Diff viewer with line-by-line changes
+- Pull request creation
+- Session history with checkpoints
+- Multi-repo support
+
+
+
+### Example: File Deletion
+
+
+### Example: Content Generation
+
+
+### Example: File Creation
+
+
+### Example multiple operations
+
+
+### Example of multiagent topologies
+
+
+---
+
+## How It Works
+
+
+
+
+
+
+
+
+GitPilot uses a multi-agent system powered by CrewAI:
+
+1. **Explorer** reads your repo structure, git log, and key files
+2. **Planner** creates a safe step-by-step plan with diffs
+3. **Executor** writes code and runs tests, self-correcting on failure
+4. **Reviewer** validates the output and summarises what changed
+
+In **Ask** mode (default), you approve every change before it's applied. In **Auto** mode, tools execute without prompts. In **Plan** mode, only the plan is generated — no files are touched.
+
+---
+
+## Project Structure
+
+```
+gitpilot/
+ gitpilot/ Python backend (FastAPI)
+ frontend/ React web app
+ extensions/vscode/ VS Code extension
+ docs/ Documentation and assets
+ tests/ Test suite
+```
+
+---
+
+## Configuration
+
+GitPilot works with environment variables or the settings UI.
+
+**Minimal setup** (Ollama, free, local):
+
+```bash
+# .env
+GITPILOT_PROVIDER=ollama
+OLLAMA_BASE_URL=http://localhost:11434
+GITPILOT_OLLAMA_MODEL=llama3
+```
+
+**Cloud setup** (OpenAI):
+
+```bash
+# .env
+GITPILOT_PROVIDER=openai
+OPENAI_API_KEY=sk-...
+GITPILOT_OPENAI_MODEL=gpt-4o-mini
+```
+
+**Cloud setup** (Claude):
+
+```bash
+# .env
+GITPILOT_PROVIDER=claude
+ANTHROPIC_API_KEY=sk-ant-...
+GITPILOT_CLAUDE_MODEL=claude-sonnet-4-5
+```
+
+All settings can also be changed from the VS Code extension or web UI without editing files.
+
+---
+
+## API
+
+GitPilot exposes a REST + WebSocket API:
+
+| Endpoint | What it does |
+|---|---|
+| `GET /api/status` | Server health check |
+| `POST /api/chat/send` | Send a message, get a response |
+| `POST /api/v2/chat/stream` | Stream agent events (SSE) — accepts `permission_mode` |
+| `WS /ws/v2/sessions/{id}` | Real-time WebSocket streaming |
+| `POST /api/chat/plan` | Generate an execution plan |
+| `POST /api/chat/execute` | Execute a plan |
+| `GET /api/repos` | List connected repositories |
+| `GET /api/sessions` | List chat sessions |
+| `GET /api/permissions` | Current permission policy |
+| `PUT /api/permissions/mode` | Set execution mode: `normal` / `auto` / `plan` |
+| `POST /api/v2/approval/respond` | Approve or deny a tool execution request |
+
+Full API docs at `http://localhost:8000/docs` (Swagger UI).
+
+---
+
+## Deployment
+
+### Hugging Face Spaces
+
+GitPilot runs on Hugging Face Spaces with OllaBridge (free):
+
+```
+Runtime: Docker
+Port: 7860
+Provider: OllaBridge (cloud Ollama)
+```
+
+### Docker Compose
+
+```bash
+docker compose up -d
+# Backend: http://localhost:8000
+# Frontend: http://localhost:3000
+```
+
+### Vercel
+
+The frontend deploys to Vercel. Set `VITE_BACKEND_URL` to your backend.
+
+---
+
+## Contributing
+
+```bash
+# Standard install: runtime backend + frontend + MCP stack
+make install
+# WSL note: the Makefile defaults uv to UV_LINK_MODE=copy to avoid
+# hardlink fallback warnings on /mnt/c checkouts. For best install speed,
+# clone the repo inside the native WSL filesystem (for example ~/workspace).
+
+# Developer/test tooling
+make install-dev
+make test
+
+# Frontend only
+cd frontend
+npm ci
+npm run dev
+
+# VS Code Extension
+cd extensions/vscode
+npm install
+make compile
+# Press F5 in VS Code to launch debug host
+```
+
+---
+
+## License
+
+Apache License 2.0. See [LICENSE](LICENSE).
+
+---
+
+
+
+**GitPilot** is made by [Ruslan Magana Vsevolodovna](https://github.com/ruslanmv)
+
+[Star on GitHub](https://github.com/ruslanmv/gitpilot) • [Report a Bug](https://github.com/ruslanmv/gitpilot/issues) • [Request a Feature](https://github.com/ruslanmv/gitpilot/issues)
+
+
+
+---
+**MCP Context Forge integration** — GitPilot ships a default MCP stack (Forge + PostgreSQL / Milvus / Inspector servers) wired into the agents like Claude Code's built-ins; `make run` brings everything up. No Docker? Use `make run-bare` to start GitPilot core without MCP. See [docs/deploy/install-mcp.md](./docs/deploy/install-mcp.md) and [docs/deploy/production-mcp.md](./docs/deploy/production-mcp.md).
+
+---
+
+## What's New
+
+> **Enterprise-ready foundation:** GitPilot now ships with safer defaults and production-grade controls, including thread-safe feature flags, strict typing, CI coverage enforcement, structured error handling, and a fast `gitpilot doctor` health check. All upgrades are additive, flag-gated, and disabled by default, so existing installations remain stable while teams can adopt new capabilities gradually.
+
+> **Performance, onboarding, and release confidence:** GitPilot now improves runtime efficiency with prompt caching, lazy tool loading, context memoisation, SSE streaming, and safe model warmup. First-time setup is easier with `gitpilot init --wizard`, which creates configuration files atomically with rollback protection and no secret exposure. The platform also adds a stable public API, deprecation handling, MkDocs documentation, broken-link checks, SBOM generation, npm auditing, and Sigstore-based release signing.
diff --git a/frontend/.dockerignore b/frontend/.dockerignore
new file mode 100644
index 0000000000000000000000000000000000000000..9da1acab57b3a83f0649dc5deb28b33600fe4ad3
--- /dev/null
+++ b/frontend/.dockerignore
@@ -0,0 +1,39 @@
+# Node
+node_modules/
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+.pnpm-debug.log*
+
+# Build
+dist/
+build/
+
+# Environment
+.env
+.env.local
+.env.development
+.env.test
+.env.production.local
+
+# IDE
+.vscode/
+.idea/
+*.swp
+*.swo
+*~
+
+# OS
+.DS_Store
+Thumbs.db
+
+# Git
+.git
+.gitignore
+
+# Testing
+coverage/
+.nyc_output/
+
+# Misc
+*.log
diff --git a/frontend/App.jsx b/frontend/App.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..693b5d298bb0232e5a06ae06f5f4fe46f7146f2d
--- /dev/null
+++ b/frontend/App.jsx
@@ -0,0 +1,1229 @@
+import React, { useCallback, useEffect, useMemo, useRef, useState } from "react";
+import StartupScreen from "./components/StartupScreen.jsx";
+import LoginPage from "./components/LoginPage.jsx";
+import RepoSelector from "./components/RepoSelector.jsx";
+import ProjectContextPanel from "./components/ProjectContextPanel.jsx";
+import ChatPanel from "./components/ChatPanel.jsx";
+import LlmSettings from "./components/LlmSettings.jsx";
+import FlowViewer from "./components/FlowViewer.jsx";
+import Footer from "./components/Footer.jsx";
+import ProjectSettingsModal from "./components/ProjectSettingsModal.jsx";
+import SessionSidebar from "./components/SessionSidebar.jsx";
+import ContextBar from "./components/ContextBar.jsx";
+import AddRepoModal from "./components/AddRepoModal.jsx";
+import UserMenu from "./components/UserMenu.jsx";
+import AboutModal from "./components/AboutModal.jsx";
+import {
+ WorkspaceModesTab,
+ SecurityTab,
+ IntegrationsTab,
+ MCPServersTab,
+ SkillsTab,
+ SessionsTab,
+ AdvancedTab,
+} from "./components/AdminTabs";
+import { apiUrl, safeFetchJSON, fetchStatus } from "./utils/api.js";
+import { initApp } from "./utils/appInit.js";
+
+function makeRepoKey(repo) {
+ if (!repo) return null;
+ return repo.full_name || `${repo.owner}/${repo.name}`;
+}
+
+function uniq(arr) {
+ return Array.from(new Set((arr || []).filter(Boolean)));
+}
+
+function getProviderLabel(status) {
+ if (!status) return "Checking...";
+ return (
+ status?.provider?.name ||
+ status?.provider_name ||
+ status?.provider?.provider ||
+ "Checking..."
+ );
+}
+
+function getBackendVersion(status) {
+ if (!status) return "Checking...";
+ return status?.version || status?.app_version || "Checking...";
+}
+
+export default function App() {
+ const frontendVersion = __APP_VERSION__ || "unknown";
+
+ // ---- Multi-repo context state ----
+ const [contextRepos, setContextRepos] = useState([]);
+ // Each entry: { repoKey: "owner/repo", repo: {...}, branch: "main" }
+ const [activeRepoKey, setActiveRepoKey] = useState(null);
+ const [addRepoOpen, setAddRepoOpen] = useState(false);
+
+ const [activePage, setActivePage] = useState("workspace");
+ const [isAuthenticated, setIsAuthenticated] = useState(false);
+ const [isLoading, setIsLoading] = useState(true);
+ const [userInfo, setUserInfo] = useState(null);
+
+ // Startup / enterprise loader state
+ const [startupPhase, setStartupPhase] = useState("booting");
+ const [startupStatusMessage, setStartupStatusMessage] = useState("Starting application...");
+ const [startupDetailMessage, setStartupDetailMessage] = useState(
+ "Initializing authentication, provider, and workspace context."
+ );
+ const [startupStatusSnapshot, setStartupStatusSnapshot] = useState(null);
+
+ // Repo + Session State Machine
+ const [repoStateByKey, setRepoStateByKey] = useState({});
+ const [toast, setToast] = useState(null);
+ const [settingsOpen, setSettingsOpen] = useState(false);
+ const [aboutOpen, setAboutOpen] = useState(false);
+ const [adminTab, setAdminTab] = useState("overview");
+ const [adminStatus, setAdminStatus] = useState(null);
+
+ // Fetch admin status when overview tab is active
+ useEffect(() => {
+ if (activePage === "admin" && adminTab === "overview") {
+ fetchStatus()
+ .then((data) => setAdminStatus(data))
+ .catch(() => setAdminStatus(null));
+ }
+ }, [activePage, adminTab]);
+
+ // Claude-Code-on-Web: Session sidebar + Environment state
+ const [activeSessionId, setActiveSessionId] = useState(null);
+ const [activeEnvId, setActiveEnvId] = useState("default");
+ const [sessionRefreshNonce, setSessionRefreshNonce] = useState(0);
+
+ // Sidebar collapse state (persisted in localStorage)
+ const [sidebarCollapsed, setSidebarCollapsed] = useState(() => {
+ try {
+ return localStorage.getItem("gitpilot_sidebar_collapsed") === "true";
+ } catch {
+ return false;
+ }
+ });
+
+ const toggleSidebar = useCallback(() => {
+ setSidebarCollapsed((prev) => {
+ const next = !prev;
+ try {
+ localStorage.setItem("gitpilot_sidebar_collapsed", String(next));
+ } catch {}
+ return next;
+ });
+ }, []);
+
+ // Keyboard shortcut: Cmd/Ctrl + B to toggle sidebar
+ useEffect(() => {
+ const handler = (e) => {
+ if ((e.metaKey || e.ctrlKey) && e.key === "b") {
+ e.preventDefault();
+ toggleSidebar();
+ }
+ };
+ window.addEventListener("keydown", handler);
+ return () => window.removeEventListener("keydown", handler);
+ }, [toggleSidebar]);
+
+ // ---- Derived `repo` — keeps all downstream consumers unchanged ----
+ const repo = useMemo(() => {
+ const entry = contextRepos.find((r) => r.repoKey === activeRepoKey);
+ return entry?.repo || null;
+ }, [contextRepos, activeRepoKey]);
+
+ const repoKey = activeRepoKey;
+
+ // Convenient selectors
+ const currentRepoState = repoKey ? repoStateByKey[repoKey] : null;
+
+ const defaultBranch = currentRepoState?.defaultBranch || repo?.default_branch || "main";
+ const currentBranch = currentRepoState?.currentBranch || defaultBranch;
+ const sessionBranches = currentRepoState?.sessionBranches || [];
+ const lastExecution = currentRepoState?.lastExecution || null;
+ const pulseNonce = currentRepoState?.pulseNonce || 0;
+ const chatByBranch = currentRepoState?.chatByBranch || {};
+
+ // ---------------------------------------------------------------------------
+ // Multi-repo context management
+ // ---------------------------------------------------------------------------
+ const addRepoToContext = useCallback((r) => {
+ const key = makeRepoKey(r);
+ if (!key) return;
+
+ setContextRepos((prev) => {
+ if (prev.some((e) => e.repoKey === key)) {
+ setActiveRepoKey(key);
+ return prev;
+ }
+ const entry = { repoKey: key, repo: r, branch: r.default_branch || "main" };
+ return [...prev, entry];
+ });
+
+ setActiveRepoKey(key);
+ setAddRepoOpen(false);
+ }, []);
+
+ const removeRepoFromContext = useCallback((key) => {
+ setContextRepos((prev) => {
+ const next = prev.filter((e) => e.repoKey !== key);
+ setActiveRepoKey((curActive) => {
+ if (curActive === key) {
+ return next.length > 0 ? next[0].repoKey : null;
+ }
+ return curActive;
+ });
+ return next;
+ });
+ }, []);
+
+ const clearAllContext = useCallback(() => {
+ setContextRepos([]);
+ setActiveRepoKey(null);
+ }, []);
+
+ const handleContextBranchChange = useCallback((targetRepoKey, newBranch) => {
+ setContextRepos((prev) =>
+ prev.map((e) =>
+ e.repoKey === targetRepoKey ? { ...e, branch: newBranch } : e
+ )
+ );
+
+ setRepoStateByKey((prev) => {
+ const cur = prev[targetRepoKey];
+ if (!cur) return prev;
+ return {
+ ...prev,
+ [targetRepoKey]: { ...cur, currentBranch: newBranch },
+ };
+ });
+ }, []);
+
+ // Init / reconcile repo state when active repo changes
+ useEffect(() => {
+ if (!repoKey || !repo) return;
+
+ setRepoStateByKey((prev) => {
+ const existing = prev[repoKey];
+ const d = repo.default_branch || "main";
+
+ if (!existing) {
+ return {
+ ...prev,
+ [repoKey]: {
+ defaultBranch: d,
+ currentBranch: d,
+ sessionBranches: [],
+ lastExecution: null,
+ pulseNonce: 0,
+ chatByBranch: {
+ [d]: { messages: [], plan: null },
+ },
+ },
+ };
+ }
+
+ const next = { ...existing };
+ next.defaultBranch = d;
+
+ if (!next.chatByBranch?.[d]) {
+ next.chatByBranch = {
+ ...(next.chatByBranch || {}),
+ [d]: { messages: [], plan: null },
+ };
+ }
+
+ if (!next.currentBranch) next.currentBranch = d;
+
+ return { ...prev, [repoKey]: next };
+ });
+ }, [repoKey, repo?.id, repo?.default_branch]);
+
+ const showToast = (title, message) => {
+ setToast({ title, message });
+ window.setTimeout(() => setToast(null), 5000);
+ };
+
+ // ---------------------------------------------------------------------------
+ // Session management — every chat is backed by a Session (Claude Code parity)
+ // ---------------------------------------------------------------------------
+
+ const _creatingSessionRef = useRef(false);
+
+ const [chatBySession, setChatBySession] = useState({});
+
+ const ensureSession = useCallback(
+ async (sessionName, seedMessages) => {
+ if (activeSessionId) return activeSessionId;
+ if (!repo) return null;
+ if (_creatingSessionRef.current) return null;
+ _creatingSessionRef.current = true;
+
+ try {
+ const token = localStorage.getItem("github_token");
+ const headers = {
+ "Content-Type": "application/json",
+ ...(token ? { Authorization: `Bearer ${token}` } : {}),
+ };
+
+ const res = await fetch("/api/sessions", {
+ method: "POST",
+ headers,
+ body: JSON.stringify({
+ repo_full_name: repoKey,
+ branch: currentBranch,
+ name: sessionName || undefined,
+ repos: contextRepos.map((e) => ({
+ full_name: e.repoKey,
+ branch: e.branch,
+ mode: e.repoKey === activeRepoKey ? "write" : "read",
+ })),
+ active_repo: activeRepoKey,
+ }),
+ });
+
+ if (!res.ok) return null;
+ const data = await res.json();
+ const newId = data.session_id;
+
+ if (seedMessages && seedMessages.length > 0) {
+ setChatBySession((prev) => ({
+ ...prev,
+ [newId]: { messages: seedMessages, plan: null },
+ }));
+ }
+
+ setActiveSessionId(newId);
+ setSessionRefreshNonce((n) => n + 1);
+ return newId;
+ } catch (err) {
+ console.warn("Failed to create session:", err);
+ return null;
+ } finally {
+ _creatingSessionRef.current = false;
+ }
+ },
+ [activeSessionId, repo, repoKey, currentBranch, contextRepos, activeRepoKey]
+ );
+
+ const handleNewSession = async () => {
+ setActiveSessionId(null);
+
+ try {
+ const token = localStorage.getItem("github_token");
+ const headers = {
+ "Content-Type": "application/json",
+ ...(token ? { Authorization: `Bearer ${token}` } : {}),
+ };
+
+ const res = await fetch("/api/sessions", {
+ method: "POST",
+ headers,
+ body: JSON.stringify({
+ repo_full_name: repoKey,
+ branch: currentBranch,
+ repos: contextRepos.map((e) => ({
+ full_name: e.repoKey,
+ branch: e.branch,
+ mode: e.repoKey === activeRepoKey ? "write" : "read",
+ })),
+ active_repo: activeRepoKey,
+ }),
+ });
+
+ if (!res.ok) return;
+ const data = await res.json();
+ setActiveSessionId(data.session_id);
+ setSessionRefreshNonce((n) => n + 1);
+ showToast("Session Created", "New session started.");
+ } catch (err) {
+ console.warn("Failed to create session:", err);
+ }
+ };
+
+ /**
+ * Convert a backend Message object to the frontend chat UI shape.
+ * Backend: { role: "user|assistant|system", content: "...", timestamp, metadata }
+ * Frontend: { from: "user|ai", role: "user|assistant|system", content, answer, ... }
+ */
+ const normalizeBackendMessage = (m) => {
+ const role = m.role || "assistant";
+ const content = m.content || "";
+ if (role === "user") {
+ return { from: "user", role: "user", content, text: content };
+ }
+ if (role === "system") {
+ return { from: "ai", role: "system", content };
+ }
+ // assistant
+ return {
+ from: "ai",
+ role: "assistant",
+ content,
+ answer: content,
+ // Preserve any structured metadata the backend stored (plan, diff, etc.)
+ ...(m.metadata && typeof m.metadata === "object" ? m.metadata : {}),
+ };
+ };
+
+ /**
+ * Fetch persisted messages for a session from the backend.
+ * Returns an array of normalized frontend messages (ready for ChatPanel),
+ * or an empty array on failure.
+ */
+ const fetchSessionMessages = useCallback(async (sessionId) => {
+ if (!sessionId) return [];
+ try {
+ const token = localStorage.getItem("github_token");
+ const headers = { "Content-Type": "application/json" };
+ if (token) headers["Authorization"] = `Bearer ${token}`;
+
+ const res = await fetch(apiUrl(`/api/sessions/${sessionId}/messages`), {
+ headers,
+ });
+ if (!res.ok) {
+ console.warn(`[fetchSessionMessages] ${res.status} for ${sessionId}`);
+ return [];
+ }
+ const data = await res.json();
+ const backendMessages = Array.isArray(data.messages) ? data.messages : [];
+ return backendMessages.map(normalizeBackendMessage);
+ } catch (err) {
+ console.warn(`[fetchSessionMessages] Failed to fetch ${sessionId}:`, err);
+ return [];
+ }
+ }, []);
+
+ /**
+ * Handle click on a session in the sidebar.
+ *
+ * Critical ordering: we must hydrate chatBySession BEFORE setting
+ * activeSessionId, because ChatPanel's session-sync useEffect reads
+ * sessionChatState only when sessionId changes (it does NOT depend on
+ * chatBySession to avoid prop/state loops). If we set activeSessionId
+ * first, ChatPanel would see an empty messages array, then our async
+ * hydration would complete but ChatPanel wouldn't re-sync.
+ */
+ const handleSelectSession = useCallback(async (session) => {
+ // 1. Fetch persisted messages first
+ const messages = await fetchSessionMessages(session.id);
+
+ // 2. Seed the chat cache (ChatPanel will read this via sessionChatState)
+ setChatBySession((prev) => ({
+ ...prev,
+ [session.id]: {
+ ...(prev[session.id] || { plan: null }),
+ messages,
+ },
+ }));
+
+ // 3. NOW activate the session — ChatPanel's sync effect will read
+ // the hydrated messages from chatBySession[session.id]
+ setActiveSessionId(session.id);
+ if (session.branch && session.branch !== currentBranch) {
+ handleBranchChange(session.branch);
+ }
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [fetchSessionMessages, currentBranch]);
+
+ const handleDeleteSession = useCallback(
+ (deletedId) => {
+ if (deletedId === activeSessionId) {
+ setActiveSessionId(null);
+
+ setChatBySession((prev) => {
+ const next = { ...prev };
+ delete next[deletedId];
+ return next;
+ });
+
+ if (repoKey) {
+ setRepoStateByKey((prev) => {
+ const cur = prev[repoKey];
+ if (!cur) return prev;
+ const branchKey = cur.currentBranch || cur.defaultBranch || defaultBranch;
+ return {
+ ...prev,
+ [repoKey]: {
+ ...cur,
+ chatByBranch: {
+ ...(cur.chatByBranch || {}),
+ [branchKey]: { messages: [], plan: null },
+ },
+ },
+ };
+ });
+ }
+ }
+ },
+ [activeSessionId, repoKey, defaultBranch]
+ );
+
+ // ---------------------------------------------------------------------------
+ // Chat persistence helpers
+ // ---------------------------------------------------------------------------
+ const updateChatForCurrentBranch = (patch) => {
+ if (!repoKey) return;
+
+ setRepoStateByKey((prev) => {
+ const cur = prev[repoKey];
+ if (!cur) return prev;
+
+ const branchKey = cur.currentBranch || cur.defaultBranch || defaultBranch;
+
+ const existing = cur.chatByBranch?.[branchKey] || {
+ messages: [],
+ plan: null,
+ };
+
+ return {
+ ...prev,
+ [repoKey]: {
+ ...cur,
+ chatByBranch: {
+ ...(cur.chatByBranch || {}),
+ [branchKey]: { ...existing, ...patch },
+ },
+ },
+ };
+ });
+ };
+
+ const currentChatState = useMemo(() => {
+ const b = currentBranch || defaultBranch;
+ return chatByBranch[b] || { messages: [], plan: null };
+ }, [chatByBranch, currentBranch, defaultBranch]);
+
+ const sessionChatState = useMemo(() => {
+ if (!activeSessionId) {
+ return currentChatState;
+ }
+ return chatBySession[activeSessionId] || { messages: [], plan: null };
+ }, [activeSessionId, chatBySession, currentChatState]);
+
+ const updateSessionChat = (patch) => {
+ if (activeSessionId) {
+ setChatBySession((prev) => ({
+ ...prev,
+ [activeSessionId]: {
+ ...(prev[activeSessionId] || { messages: [], plan: null }),
+ ...patch,
+ },
+ }));
+ } else {
+ updateChatForCurrentBranch(patch);
+ }
+ };
+
+ // ---------------------------------------------------------------------------
+ // Branch change (manual — for active repo)
+ // ---------------------------------------------------------------------------
+ const handleBranchChange = (nextBranch) => {
+ if (!repoKey) return;
+ if (!nextBranch || nextBranch === currentBranch) return;
+
+ setRepoStateByKey((prev) => {
+ const cur = prev[repoKey];
+ if (!cur) return prev;
+
+ const nextState = { ...cur, currentBranch: nextBranch };
+
+ if (nextBranch === cur.defaultBranch) {
+ nextState.chatByBranch = {
+ ...nextState.chatByBranch,
+ [nextBranch]: { messages: [], plan: null },
+ };
+ }
+
+ return { ...prev, [repoKey]: nextState };
+ });
+
+ setContextRepos((prev) =>
+ prev.map((e) =>
+ e.repoKey === repoKey ? { ...e, branch: nextBranch } : e
+ )
+ );
+
+ if (nextBranch === defaultBranch) {
+ showToast("New Session", `Switched to ${defaultBranch}. Chat cleared.`);
+ } else {
+ showToast("Context Switched", `Now viewing ${nextBranch}.`);
+ }
+ };
+
+ // ---------------------------------------------------------------------------
+ // Execution complete
+ // ---------------------------------------------------------------------------
+ const handleExecutionComplete = ({
+ branch,
+ mode,
+ commit_url,
+ completionMsg,
+ sourceBranch,
+ }) => {
+ if (!repoKey || !branch) return;
+
+ // Clear the session-keyed chat cache's ``plan`` AND append the
+ // completion message synchronously, before any branch change can
+ // trigger ChatPanel's session-sync effect. Two bugs need to be
+ // fixed in the same write:
+ //
+ // 1. Stale plan: without clearing, the sync effect re-reads the
+ // old approved plan and restores the Approve & execute / Reject
+ // plan buttons, enabling accidental double-execution.
+ //
+ // 2. Wiped completion: in hard-switch mode the sync effect runs
+ // BEFORE the persistence effect (declared earlier in
+ // ChatPanel), so it overwrites local ``messages`` with
+ // ``sessionChatState.messages`` — which doesn't yet contain
+ // completionMsg. The user's "Answer / Execution Log" block
+ // then vanishes from the session view.
+ //
+ // By appending normalizedCompletion here, sessionChatState already
+ // carries the completion when the sync effect reads it. No
+ // duplicate is introduced: local ``messages`` already has the same
+ // entry, so the subsequent persistence pass is a no-op write.
+ if (activeSessionId) {
+ const normalizedCompletion =
+ completionMsg &&
+ (completionMsg.answer || completionMsg.content || completionMsg.executionLog)
+ ? {
+ from: completionMsg.from || "ai",
+ role: completionMsg.role || "assistant",
+ answer: completionMsg.answer,
+ content: completionMsg.content,
+ executionLog: completionMsg.executionLog,
+ diff: completionMsg.diff,
+ }
+ : null;
+ setChatBySession((prev) => {
+ const existing = prev[activeSessionId];
+ if (!existing) return prev;
+ const noPlanChange = existing.plan == null;
+ if (noPlanChange && !normalizedCompletion) return prev;
+ return {
+ ...prev,
+ [activeSessionId]: {
+ ...existing,
+ messages: normalizedCompletion
+ ? [...(existing.messages || []), normalizedCompletion]
+ : existing.messages,
+ plan: null,
+ },
+ };
+ });
+ }
+
+ setRepoStateByKey((prev) => {
+ const cur =
+ prev[repoKey] || {
+ defaultBranch,
+ currentBranch: defaultBranch,
+ sessionBranches: [],
+ lastExecution: null,
+ pulseNonce: 0,
+ chatByBranch: { [defaultBranch]: { messages: [], plan: null } },
+ };
+
+ const next = { ...cur };
+ next.lastExecution = { mode, branch, ts: Date.now() };
+
+ if (!next.chatByBranch) next.chatByBranch = {};
+
+ const prevBranchKey =
+ sourceBranch || cur.currentBranch || cur.defaultBranch || defaultBranch;
+
+ const successSystemMsg = {
+ role: "system",
+ isSuccess: true,
+ link: commit_url,
+ content:
+ mode === "hard-switch"
+ ? `🌱 **Session Started:** Created branch \`${branch}\`.`
+ : `✅ **Update Published:** Commits pushed to \`${branch}\`.`,
+ };
+
+ const normalizedCompletion =
+ completionMsg &&
+ (completionMsg.answer || completionMsg.content || completionMsg.executionLog)
+ ? {
+ from: completionMsg.from || "ai",
+ role: completionMsg.role || "assistant",
+ answer: completionMsg.answer,
+ content: completionMsg.content,
+ executionLog: completionMsg.executionLog,
+ }
+ : null;
+
+ if (mode === "hard-switch") {
+ next.sessionBranches = uniq([...(next.sessionBranches || []), branch]);
+ next.currentBranch = branch;
+ next.pulseNonce = (next.pulseNonce || 0) + 1;
+
+ const existingTargetChat = next.chatByBranch[branch];
+ const isExistingSession =
+ existingTargetChat && (existingTargetChat.messages || []).length > 0;
+
+ if (isExistingSession) {
+ const appended = [
+ ...(existingTargetChat.messages || []),
+ ...(normalizedCompletion ? [normalizedCompletion] : []),
+ successSystemMsg,
+ ];
+
+ next.chatByBranch[branch] = {
+ ...existingTargetChat,
+ messages: appended,
+ plan: null,
+ };
+ } else {
+ const prevChat =
+ (cur.chatByBranch && cur.chatByBranch[prevBranchKey]) || {
+ messages: [],
+ plan: null,
+ };
+
+ next.chatByBranch[branch] = {
+ messages: [
+ ...(prevChat.messages || []),
+ ...(normalizedCompletion ? [normalizedCompletion] : []),
+ successSystemMsg,
+ ],
+ plan: null,
+ };
+ }
+
+ if (!next.chatByBranch[next.defaultBranch]) {
+ next.chatByBranch[next.defaultBranch] = { messages: [], plan: null };
+ }
+ } else if (mode === "sticky") {
+ next.currentBranch = cur.currentBranch || branch;
+
+ const targetChat = next.chatByBranch[branch] || { messages: [], plan: null };
+
+ next.chatByBranch[branch] = {
+ messages: [
+ ...(targetChat.messages || []),
+ ...(normalizedCompletion ? [normalizedCompletion] : []),
+ successSystemMsg,
+ ],
+ plan: null,
+ };
+ }
+
+ return { ...prev, [repoKey]: next };
+ });
+
+ if (mode === "hard-switch") {
+ showToast("Context Switched", `Active on ${branch}.`);
+ } else {
+ showToast("Changes Committed", `Updated ${branch}.`);
+ }
+ };
+
+ // ---------------------------------------------------------------------------
+ // Auth & startup render
+ // ---------------------------------------------------------------------------
+ useEffect(() => {
+ checkAuthentication();
+ }, []);
+
+ const checkAuthentication = async () => {
+ setStartupPhase("booting");
+ setStartupStatusMessage("Starting application...");
+ setStartupDetailMessage(
+ "Initializing authentication, provider, and workspace context."
+ );
+
+ try {
+ setStartupPhase("checking-backend");
+ setStartupStatusMessage("Connecting to backend...");
+ setStartupDetailMessage(
+ "Waiting for the server to be ready. This may take a few seconds on first start."
+ );
+
+ // Single-source-of-truth init: combines /api/status + /api/auth/status
+ // in one request. Runs exactly once per page load (StrictMode-safe).
+ const initResult = await initApp();
+ const status = initResult.status;
+ if (status) {
+ setStartupStatusSnapshot(status);
+ setAdminStatus(status);
+ }
+
+ const token = localStorage.getItem("github_token");
+ const user = localStorage.getItem("github_user");
+
+ if (token && user) {
+ setStartupPhase("validating-auth");
+ setStartupStatusMessage("Validating authentication...");
+ setStartupDetailMessage(
+ "Restoring your GitHub session and confirming access."
+ );
+
+ try {
+ const data = await safeFetchJSON(apiUrl("/api/auth/validate"), {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ access_token: token }),
+ timeout: 20000, // 20s — first-load GitHub API validation can be slow
+ });
+
+ if (data.authenticated) {
+ setStartupPhase("restoring-session");
+ setStartupStatusMessage("Restoring workspace...");
+ setStartupDetailMessage(
+ "Loading user profile, reconnecting provider state, and preparing the workspace."
+ );
+
+ setIsAuthenticated(true);
+ setUserInfo(JSON.parse(user));
+ setIsLoading(false);
+ return;
+ }
+ } catch (err) {
+ console.error(err);
+ }
+
+ localStorage.removeItem("github_token");
+ localStorage.removeItem("github_user");
+ }
+
+ setStartupPhase("ready");
+ setStartupStatusMessage("Preparing sign-in...");
+ setStartupDetailMessage(
+ "GitPilot is ready. Please authenticate to continue."
+ );
+
+ setIsAuthenticated(false);
+ setIsLoading(false);
+ } catch (err) {
+ console.error(err);
+ setStartupPhase("fallback");
+ setStartupStatusMessage("Starting application...");
+ setStartupDetailMessage(
+ "Continuing with basic startup while backend status is still loading."
+ );
+ setIsAuthenticated(false);
+ setIsLoading(false);
+ }
+ };
+
+ const handleAuthenticated = (session) => {
+ setIsAuthenticated(true);
+ setUserInfo(session.user);
+ };
+
+ const handleLogout = () => {
+ localStorage.removeItem("github_token");
+ localStorage.removeItem("github_user");
+ setIsAuthenticated(false);
+ setUserInfo(null);
+ clearAllContext();
+ };
+
+ if (isLoading) {
+ return (
+
+ );
+ }
+
+ if (!isAuthenticated) {
+ return (
+
+ );
+ }
+
+ const hasContext = contextRepos.length > 0;
+
+ return (
+
+
+
+
+
+ {activePage === "admin" && (
+
+
+ {["overview", "providers", "workspace-modes", "integrations", "mcp-servers", "sessions", "skills", "security", "advanced"].map((tab) => (
+ setAdminTab(tab)}
+ style={{
+ padding: "8px 16px",
+ borderRadius: "6px",
+ border: adminTab === tab ? "1px solid #3B82F6" : "1px solid #333",
+ background: adminTab === tab ? "#1e3a5f" : "#1a1b26",
+ color: adminTab === tab ? "#93c5fd" : "#a0a0b0",
+ cursor: "pointer",
+ fontSize: "13px",
+ textTransform: "capitalize",
+ }}
+ >
+ {tab.replace("-", " ")}
+
+ ))}
+
+
+ {adminTab === "overview" && (
+
+
+
Server
+
+ {adminStatus?.server_ready ? "Connected" : "Checking..."}
+
+
127.0.0.1:8000
+
+
+
+
Provider
+
+ {adminStatus?.provider?.name || "Loading..."}
+
+
+ {adminStatus?.provider?.configured
+ ? `${adminStatus.provider.model || "Ready"}`
+ : "Not configured"}
+
+
+
+
+
Workspace Modes
+
+ Folder: {adminStatus?.workspace?.folder_mode_available ? "Yes" : "—"}
+
+
+ Local Git: {adminStatus?.workspace?.local_git_available ? "Yes" : "—"}
+
+
+ GitHub: {adminStatus?.workspace?.github_mode_available ? "Yes" : "Optional"}
+
+
+
+
+
GitHub
+
+ {adminStatus?.github?.connected ? "Connected" : "Optional"}
+
+
+ {adminStatus?.github?.username || "Not linked"}
+
+
+
+
+
+
+
Get Started
+
setAdminTab("providers")}
+ style={{
+ padding: "6px 12px",
+ background: "#3B82F6",
+ color: "#fff",
+ border: "none",
+ borderRadius: "4px",
+ cursor: "pointer",
+ fontSize: "12px",
+ marginRight: "4px",
+ }}
+ >
+ Configure Provider
+
+
+
+ )}
+
+ {adminTab === "providers" && (
+
+
AI Providers
+
+
+ )}
+
+ {adminTab === "workspace-modes" && (
+
{
+ setActiveSessionId(result.session_id);
+ setSessionRefreshNonce((n) => n + 1);
+ setActivePage("workspace");
+ }}
+ />
+ )}
+
+ {adminTab === "integrations" && (
+
+ )}
+
+ {adminTab === "mcp-servers" && (
+
+ )}
+
+ {adminTab === "security" && (
+
+ )}
+
+ {adminTab === "sessions" && (
+ {
+ handleSelectSession(s);
+ setActivePage("workspace");
+ }}
+ />
+ )}
+
+ {adminTab === "skills" && }
+
+ {adminTab === "advanced" && (
+ setSettingsOpen(true)}
+ />
+ )}
+
+ )}
+
+ {activePage === "flow" && }
+
+ {activePage === "workspace" &&
+ (repo ? (
+
+
setAddRepoOpen(true)}
+ onBranchChange={handleContextBranchChange}
+ />
+
+
+
+ setSettingsOpen(true)}
+ />
+
+
+
+
+ GitPilot chat
+
+
+
+
+
+
+ ) : (
+
+
🤖
+
Select a repository
+
Select a repo to begin agentic workflow.
+
+ ))}
+
+
+
+
+
+ {repo && (
+
setSettingsOpen(false)}
+ activeEnvId={activeEnvId}
+ onEnvChange={setActiveEnvId}
+ />
+ )}
+
+ setAddRepoOpen(false)}
+ excludeKeys={contextRepos.map((e) => e.repoKey)}
+ />
+
+ setAboutOpen(false)}
+ />
+
+ {toast && (
+
+
{toast.title}
+
{toast.message}
+
+ )}
+
+
+
+ );
+}
\ No newline at end of file
diff --git a/frontend/components/AboutModal.jsx b/frontend/components/AboutModal.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..e80dd1a98dd3d6caa062310469b6135ec24800dc
--- /dev/null
+++ b/frontend/components/AboutModal.jsx
@@ -0,0 +1,488 @@
+// frontend/components/AboutModal.jsx
+import React, { useEffect, useCallback, useState } from "react";
+import { apiUrl, safeFetchJSON } from "../utils/api.js";
+
+/**
+ * AboutModal — "About GitPilot" dialog shown from the user menu.
+ *
+ * Enterprise design goals:
+ * - Prominent brand mark matching docs/logo.svg (orange ring + GP monogram)
+ * - Clear identity: name, tagline, version (frontend + backend)
+ * - Credits the creator (Ruslan Magana Vsevolodovna) as a link to GitHub
+ * - Open-source positioning: Apache 2.0 license + GitHub repo link
+ * - Action row: View on GitHub, Report Issue, Documentation
+ * - Accessible: role="dialog", aria-modal, aria-labelledby, Escape to close,
+ * focus trap via initial focus on close button
+ * - Brand palette: #D95C3D accent, #1C1C1F card, #27272A border, #EDEDED text
+ */
+
+const FRONTEND_VERSION =
+ typeof __APP_VERSION__ !== "undefined" ? __APP_VERSION__ : "0.1.5";
+
+export default function AboutModal({ isOpen, onClose }) {
+ const [backendVersion, setBackendVersion] = useState(null);
+
+ // Fetch backend version when opened
+ useEffect(() => {
+ if (!isOpen) return;
+ let cancelled = false;
+ (async () => {
+ try {
+ const data = await safeFetchJSON(apiUrl("/api/ping"), { timeout: 4000 });
+ if (!cancelled) {
+ setBackendVersion(data?.version || null);
+ }
+ } catch {
+ if (!cancelled) setBackendVersion(null);
+ }
+ })();
+ return () => {
+ cancelled = true;
+ };
+ }, [isOpen]);
+
+ // Escape to close
+ useEffect(() => {
+ if (!isOpen) return;
+ const handleKey = (e) => {
+ if (e.key === "Escape") onClose?.();
+ };
+ document.addEventListener("keydown", handleKey);
+ return () => document.removeEventListener("keydown", handleKey);
+ }, [isOpen, onClose]);
+
+ // Lock body scroll while open
+ useEffect(() => {
+ if (!isOpen) return;
+ const prev = document.body.style.overflow;
+ document.body.style.overflow = "hidden";
+ return () => {
+ document.body.style.overflow = prev;
+ };
+ }, [isOpen]);
+
+ const handleBackdropClick = useCallback(
+ (e) => {
+ if (e.target === e.currentTarget) onClose?.();
+ },
+ [onClose]
+ );
+
+ if (!isOpen) return null;
+
+ return (
+
+
+ {/* Close button */}
+
{
+ e.currentTarget.style.background = "#27272A";
+ e.currentTarget.style.color = "#EDEDED";
+ }}
+ onMouseLeave={(e) => {
+ e.currentTarget.style.background = "transparent";
+ e.currentTarget.style.color = "#A1A1AA";
+ }}
+ >
+
+
+
+
+
+ {/* Hero: brand mark + name */}
+
+
+
+
+ GitPilot
+
+
+ Enterprise Workspace Copilot
+
+
+
+
+ Open Source · Apache 2.0
+
+
+
+ {/* Body */}
+
+
+ An agentic AI coding companion for your repositories. Ask, plan,
+ code, and ship — with multi-LLM support, security scanning, and
+ VS Code integration.
+
+
+ {/* Meta table */}
+
+
+
+
+
+ (e.currentTarget.style.textDecoration = "underline")
+ }
+ onMouseLeave={(e) =>
+ (e.currentTarget.style.textDecoration = "none")
+ }
+ >
+ Ruslan Magana Vsevolodovna
+
+ }
+ isLast
+ />
+
+
+
+ {/* Action row */}
+
+
}
+ label="GitHub"
+ />
+
}
+ label="Docs"
+ />
+
}
+ label="Report"
+ />
+
+
+ {/* Footer */}
+
+ © {new Date().getFullYear()} GitPilot · Made with care for
+ developers everywhere
+
+
+
+
+
+ );
+}
+
+// ── Brand mark (mirrors docs/logo.svg) ──────────────────────────────
+function BrandMark() {
+ return (
+
+ {/* Outer subtle ring */}
+
+ {/* Active arc (top-right, uses conic gradient for smooth arc) */}
+
+ {/* Soft core glow */}
+
+ {/* GP monogram */}
+
+ GP
+
+
+ );
+}
+
+// ── Meta row ────────────────────────────────────────────────────────
+function MetaRow({ label, value, isLast = false }) {
+ return (
+
+ {label}
+
+ {value}
+
+
+ );
+}
+
+// ── Action button ───────────────────────────────────────────────────
+function ActionButton({ href, icon, label }) {
+ return (
+ {
+ e.currentTarget.style.borderColor = "#D95C3D";
+ e.currentTarget.style.background = "rgba(217, 92, 61, 0.08)";
+ }}
+ onMouseLeave={(e) => {
+ e.currentTarget.style.borderColor = "#27272A";
+ e.currentTarget.style.background = "#131316";
+ }}
+ >
+
+ {icon}
+
+ {label}
+
+ );
+}
+
+// ── Icons ───────────────────────────────────────────────────────────
+function GitHubIcon() {
+ return (
+
+
+
+ );
+}
+
+function DocsIcon() {
+ return (
+
+
+
+
+ );
+}
+
+function BugIcon() {
+ return (
+
+
+
+
+ );
+}
diff --git a/frontend/components/AddRepoModal.jsx b/frontend/components/AddRepoModal.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..7832877ed985bac5ec81f1b4c43978e525dd8bd2
--- /dev/null
+++ b/frontend/components/AddRepoModal.jsx
@@ -0,0 +1,256 @@
+import React, { useCallback, useEffect, useState } from "react";
+import { createPortal } from "react-dom";
+import { authFetch } from "../utils/api.js";
+
+/**
+ * AddRepoModal — lightweight portal modal for adding repos to context.
+ *
+ * Embeds a minimal repo search/list (not the full RepoSelector) to keep
+ * the modal focused. Filters out repos already in context.
+ */
+export default function AddRepoModal({ isOpen, onSelect, onClose, excludeKeys = [] }) {
+ const [query, setQuery] = useState("");
+ const [repos, setRepos] = useState([]);
+ const [loading, setLoading] = useState(false);
+
+ const fetchRepos = useCallback(
+ async (searchQuery) => {
+ setLoading(true);
+ try {
+ const params = new URLSearchParams({ per_page: "50" });
+ if (searchQuery) params.set("query", searchQuery);
+ const res = await authFetch(`/api/repos?${params}`);
+ if (!res.ok) return;
+ const data = await res.json();
+ setRepos(data.repositories || []);
+ } catch (err) {
+ console.warn("AddRepoModal: fetch failed:", err);
+ } finally {
+ setLoading(false);
+ }
+ },
+ []
+ );
+
+ useEffect(() => {
+ if (isOpen) {
+ setQuery("");
+ fetchRepos("");
+ }
+ }, [isOpen, fetchRepos]);
+
+ // Debounced search
+ useEffect(() => {
+ if (!isOpen) return;
+ const t = setTimeout(() => fetchRepos(query), 300);
+ return () => clearTimeout(t);
+ }, [query, isOpen, fetchRepos]);
+
+ const excludeSet = new Set(excludeKeys);
+ const filtered = repos.filter((r) => {
+ const key = r.full_name || `${r.owner}/${r.name}`;
+ return !excludeSet.has(key);
+ });
+
+ if (!isOpen) return null;
+
+ return createPortal(
+ {
+ if (e.target === e.currentTarget) onClose();
+ }}
+ >
+
e.stopPropagation()}>
+
+ Add Repository
+
+ ×
+
+
+
+
+ setQuery(e.target.value)}
+ style={styles.searchInput}
+ autoFocus
+ onKeyDown={(e) => {
+ if (e.key === "Escape") onClose();
+ }}
+ />
+
+
+
+ {loading && filtered.length === 0 && (
+
Loading...
+ )}
+ {!loading && filtered.length === 0 && (
+
+ {excludeKeys.length > 0 && repos.length > 0
+ ? "All matching repos are already in context"
+ : "No repositories found"}
+
+ )}
+ {filtered.map((r) => {
+ const key = r.full_name || `${r.owner}/${r.name}`;
+ return (
+
onSelect(r)}
+ >
+
+ {r.name}
+ {r.owner}
+
+
+ {r.private && Private }
+ {r.default_branch || "main"}
+
+
+ );
+ })}
+ {loading && filtered.length > 0 && (
+
Updating...
+ )}
+
+
+
,
+ document.body
+ );
+}
+
+const styles = {
+ overlay: {
+ position: "fixed",
+ top: 0,
+ left: 0,
+ right: 0,
+ bottom: 0,
+ backgroundColor: "rgba(0, 0, 0, 0.6)",
+ zIndex: 10000,
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ },
+ modal: {
+ width: 440,
+ maxHeight: "70vh",
+ backgroundColor: "#131316",
+ border: "1px solid #27272A",
+ borderRadius: 12,
+ display: "flex",
+ flexDirection: "column",
+ overflow: "hidden",
+ boxShadow: "0 12px 40px rgba(0,0,0,0.5)",
+ },
+ header: {
+ display: "flex",
+ justifyContent: "space-between",
+ alignItems: "center",
+ padding: "12px 14px",
+ borderBottom: "1px solid #27272A",
+ backgroundColor: "#18181B",
+ },
+ headerTitle: {
+ fontSize: 14,
+ fontWeight: 600,
+ color: "#E4E4E7",
+ },
+ closeBtn: {
+ width: 26,
+ height: 26,
+ borderRadius: 6,
+ border: "1px solid #3F3F46",
+ background: "transparent",
+ color: "#A1A1AA",
+ fontSize: 16,
+ cursor: "pointer",
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ },
+ searchBox: {
+ padding: "10px 12px",
+ borderBottom: "1px solid #27272A",
+ },
+ searchInput: {
+ width: "100%",
+ padding: "8px 10px",
+ borderRadius: 6,
+ border: "1px solid #3F3F46",
+ background: "#18181B",
+ color: "#E4E4E7",
+ fontSize: 13,
+ outline: "none",
+ fontFamily: "monospace",
+ boxSizing: "border-box",
+ },
+ list: {
+ flex: 1,
+ overflowY: "auto",
+ maxHeight: 360,
+ },
+ statusRow: {
+ padding: "16px 12px",
+ textAlign: "center",
+ fontSize: 12,
+ color: "#71717A",
+ },
+ repoRow: {
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "space-between",
+ width: "100%",
+ padding: "10px 14px",
+ border: "none",
+ borderBottom: "1px solid rgba(39, 39, 42, 0.5)",
+ background: "transparent",
+ color: "#E4E4E7",
+ cursor: "pointer",
+ textAlign: "left",
+ transition: "background-color 0.1s",
+ },
+ repoInfo: {
+ display: "flex",
+ flexDirection: "column",
+ gap: 2,
+ minWidth: 0,
+ },
+ repoName: {
+ fontSize: 13,
+ fontWeight: 600,
+ fontFamily: "monospace",
+ overflow: "hidden",
+ textOverflow: "ellipsis",
+ whiteSpace: "nowrap",
+ },
+ repoOwner: {
+ fontSize: 11,
+ color: "#71717A",
+ },
+ repoMeta: {
+ display: "flex",
+ alignItems: "center",
+ gap: 8,
+ flexShrink: 0,
+ },
+ privateBadge: {
+ fontSize: 9,
+ padding: "1px 5px",
+ borderRadius: 8,
+ backgroundColor: "rgba(239, 68, 68, 0.12)",
+ color: "#F87171",
+ fontWeight: 600,
+ textTransform: "uppercase",
+ },
+ branchHint: {
+ fontSize: 10,
+ color: "#52525B",
+ fontFamily: "monospace",
+ },
+};
diff --git a/frontend/components/AdminTabs/AdvancedTab.jsx b/frontend/components/AdminTabs/AdvancedTab.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..3d0085dd2f5c794d98d18323d48c388e9855df32
--- /dev/null
+++ b/frontend/components/AdminTabs/AdvancedTab.jsx
@@ -0,0 +1,360 @@
+// frontend/components/AdminTabs/AdvancedTab.jsx
+import React, { useEffect, useState, useCallback } from "react";
+import { apiUrl, safeFetchJSON } from "../../utils/api.js";
+
+/**
+ * Advanced tab — inline toggles for:
+ * - Lite Mode (via /api/settings/topology — sets topology to "lite_mode")
+ * - Permission Mode (normal | auto | plan via /api/permissions/mode)
+ * - Link to full Settings modal for power users
+ *
+ * Best practices applied:
+ * - Optimistic UI with rollback on error
+ * - Each setting has its own loading indicator (no global lock)
+ * - Descriptions explain what each mode does
+ * - ARIA-labeled toggle switches for accessibility
+ */
+
+const PERMISSION_MODES = [
+ {
+ value: "normal",
+ label: "Normal",
+ description:
+ "Ask before writing files or running commands (recommended).",
+ },
+ {
+ value: "auto",
+ label: "Auto",
+ description:
+ "Approve all tool calls automatically. Use only when you trust the agent.",
+ },
+ {
+ value: "plan",
+ label: "Plan Only",
+ description:
+ "Read-only mode. Agent cannot write files or run commands.",
+ },
+];
+
+function ToggleSwitch({ checked, onChange, disabled, ariaLabel }) {
+ return (
+ !disabled && onChange(!checked)}
+ disabled={disabled}
+ style={{
+ position: "relative",
+ width: "44px",
+ height: "24px",
+ borderRadius: "12px",
+ background: checked ? "#3B82F6" : "#374151",
+ border: "none",
+ cursor: disabled ? "not-allowed" : "pointer",
+ transition: "background 150ms ease",
+ padding: 0,
+ opacity: disabled ? 0.5 : 1,
+ }}
+ >
+
+
+ );
+}
+
+export default function AdvancedTab({ showToast, onOpenFullSettings }) {
+ const [liteMode, setLiteMode] = useState(false);
+ const [permissionMode, setPermissionMode] = useState("normal");
+ const [loading, setLoading] = useState(true);
+ const [updatingLite, setUpdatingLite] = useState(false);
+ const [updatingPerm, setUpdatingPerm] = useState(false);
+ const [error, setError] = useState(null);
+
+ // Initial fetch: topology preference + permission mode
+ useEffect(() => {
+ let cancelled = false;
+ (async () => {
+ try {
+ const [topo, perms] = await Promise.all([
+ safeFetchJSON(apiUrl("/api/settings/topology"), { timeout: 5000 })
+ .catch(() => ({ topology: null })),
+ safeFetchJSON(apiUrl("/api/permissions"), { timeout: 5000 })
+ .catch(() => ({ mode: "normal" })),
+ ]);
+ if (cancelled) return;
+ setLiteMode(topo?.topology === "lite_mode");
+ setPermissionMode(perms?.mode || perms?.policy?.mode || "normal");
+ } catch (err) {
+ if (!cancelled) setError(err?.message || "Failed to load settings");
+ } finally {
+ if (!cancelled) setLoading(false);
+ }
+ })();
+ return () => {
+ cancelled = true;
+ };
+ }, []);
+
+ const handleLiteToggle = useCallback(async (next) => {
+ setUpdatingLite(true);
+ setError(null);
+ const previous = liteMode;
+ setLiteMode(next); // optimistic
+ try {
+ await safeFetchJSON(apiUrl("/api/settings/topology"), {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ topology: next ? "lite_mode" : null }),
+ timeout: 5000,
+ });
+ showToast?.(
+ "Lite Mode " + (next ? "enabled" : "disabled"),
+ next
+ ? "Single-agent path — better for small local models."
+ : "Multi-agent path — uses full CrewAI orchestration."
+ );
+ } catch (err) {
+ setLiteMode(previous); // rollback
+ setError(err?.message || "Failed to update lite mode");
+ } finally {
+ setUpdatingLite(false);
+ }
+ }, [liteMode, showToast]);
+
+ const handlePermissionChange = useCallback(async (next) => {
+ setUpdatingPerm(true);
+ setError(null);
+ const previous = permissionMode;
+ setPermissionMode(next); // optimistic
+ try {
+ const res = await fetch(apiUrl("/api/permissions/mode"), {
+ method: "PUT",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ mode: next }),
+ });
+ if (!res.ok) {
+ const body = await res.json().catch(() => ({}));
+ throw new Error(body.detail || `HTTP ${res.status}`);
+ }
+ showToast?.(
+ "Permission mode updated",
+ `Set to ${next}.`
+ );
+ } catch (err) {
+ setPermissionMode(previous); // rollback
+ setError(err?.message || "Failed to update permission mode");
+ } finally {
+ setUpdatingPerm(false);
+ }
+ }, [permissionMode, showToast]);
+
+ if (loading) {
+ return (
+
+
Advanced
+
+ Loading advanced settings...
+
+
+ );
+ }
+
+ return (
+
+
Advanced
+
+ Fine-tune GitPilot's agent behavior and safety settings.
+
+
+ {error && (
+
+ {error}
+
+ )}
+
+ {/* Lite Mode toggle */}
+
+
+
+
Lite Mode
+
+ Use a simplified single-agent prompt instead of the multi-agent
+ CrewAI pipeline. Recommended for small local models
+ (qwen2.5:1.5b, deepseek-r1, phi3:mini) that struggle with the
+ ReAct format.
+
+
+
+
+
+
+ {/* Permission Mode selector */}
+
+
Permission Mode
+
+ Controls when the agent needs your approval before writing files or
+ running commands.
+
+
+
+ {PERMISSION_MODES.map((mode) => {
+ const selected = permissionMode === mode.value;
+ return (
+
+ handlePermissionChange(mode.value)}
+ disabled={updatingPerm}
+ style={{ marginTop: "2px", cursor: "inherit" }}
+ />
+
+
+ {mode.label}
+
+
+ {mode.description}
+
+
+
+ );
+ })}
+
+
+
+ {/* Link to full settings modal */}
+
+
+
+
+ Full Settings
+
+
+ Server URL, telemetry, debug logs, environment variables, and more.
+
+
+
+ Open Settings Modal
+
+
+
+
+ );
+}
diff --git a/frontend/components/AdminTabs/IntegrationsTab.jsx b/frontend/components/AdminTabs/IntegrationsTab.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..e6c4d252324fdb7ce1fc329f05eb5821b4af4078
--- /dev/null
+++ b/frontend/components/AdminTabs/IntegrationsTab.jsx
@@ -0,0 +1,238 @@
+// frontend/components/AdminTabs/IntegrationsTab.jsx
+import React, { useEffect, useState } from "react";
+import { apiUrl, safeFetchJSON } from "../../utils/api.js";
+
+/**
+ * Integrations tab — shows connection status for GitHub (and future
+ * third-party integrations) with Connect/Disconnect actions.
+ *
+ * Best practices applied:
+ * - Fetch current status on mount via /api/auth/status
+ * - Show connected user info if already authenticated
+ * - "Connect GitHub" button opens /api/auth/url in the same window
+ * (OAuth flow will redirect back with ?code=...)
+ * - Disconnect clears localStorage token and re-fetches status
+ * - Handles both Web OAuth and Device Flow modes
+ */
+
+export default function IntegrationsTab({ userInfo, onDisconnect, showToast }) {
+ const [authStatus, setAuthStatus] = useState(null);
+ const [loading, setLoading] = useState(true);
+ const [connecting, setConnecting] = useState(false);
+ const [error, setError] = useState(null);
+
+ useEffect(() => {
+ let cancelled = false;
+ (async () => {
+ try {
+ const data = await safeFetchJSON(apiUrl("/api/auth/status"), { timeout: 5000 });
+ if (!cancelled) setAuthStatus(data);
+ } catch (err) {
+ if (!cancelled) setError(err?.message || "Failed to check auth status");
+ } finally {
+ if (!cancelled) setLoading(false);
+ }
+ })();
+ return () => {
+ cancelled = true;
+ };
+ }, []);
+
+ const handleConnect = async () => {
+ setConnecting(true);
+ setError(null);
+ try {
+ if (authStatus?.mode === "web") {
+ // Web OAuth flow — redirect to GitHub authorization URL
+ const { authorization_url, state } = await safeFetchJSON(
+ apiUrl("/api/auth/url"),
+ { timeout: 5000 }
+ );
+ if (state) {
+ sessionStorage.setItem("gitpilot_oauth_state", state);
+ }
+ // Full page redirect (OAuth providers don't support iframes)
+ window.location.href = authorization_url;
+ } else {
+ // Device flow — the LoginPage already handles this.
+ showToast?.(
+ "Device flow",
+ "GitHub device flow is configured. Sign out and sign in again to reconnect."
+ );
+ }
+ } catch (err) {
+ setError(err?.message || "Failed to start OAuth flow");
+ setConnecting(false);
+ }
+ };
+
+ const handleDisconnect = () => {
+ if (!window.confirm("Disconnect GitHub? You will be signed out.")) return;
+ localStorage.removeItem("github_token");
+ localStorage.removeItem("github_user");
+ onDisconnect?.();
+ showToast?.("Disconnected", "GitHub token removed.");
+ };
+
+ const isConnected = !!(userInfo && userInfo.login);
+
+ return (
+
+
Integrations
+
+ Connect third-party services to unlock additional GitPilot features.
+
+
+ {/* GitHub integration card */}
+
+
+
+
GitHub
+
+ Pull requests, issues, and remote repository workflows.
+
+
+
+ {loading ? "CHECKING..." : isConnected ? "CONNECTED" : "NOT CONNECTED"}
+
+
+
+ {isConnected && userInfo && (
+
+ {userInfo.avatar_url && (
+
+ )}
+
+
+ {userInfo.name || userInfo.login}
+
+
@{userInfo.login}
+
+
+ )}
+
+ {error && (
+
+ {error}
+
+ )}
+
+
+ {isConnected ? (
+
+ Disconnect
+
+ ) : (
+
+ {connecting ? "Connecting..." : "Connect GitHub"}
+
+ )}
+
+
+ {authStatus && !isConnected && (
+
+ Auth mode: {authStatus.mode || "unknown"}
+ {authStatus.oauth_configured && " (Web OAuth)"}
+ {authStatus.pat_configured && " (Personal Access Token)"}
+
+ )}
+
+
+ {/* Placeholder for future integrations */}
+
+
+ More integrations coming soon (GitLab, Bitbucket, Jira, Slack)
+
+
+
+ );
+}
diff --git a/frontend/components/AdminTabs/MCPServersTab.jsx b/frontend/components/AdminTabs/MCPServersTab.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..da6b21c1600e741350c89da0142161b851970c34
--- /dev/null
+++ b/frontend/components/AdminTabs/MCPServersTab.jsx
@@ -0,0 +1,337 @@
+// frontend/components/AdminTabs/MCPServersTab.jsx
+//
+// Settings tab for managing MCP Context Forge servers.
+//
+// UX layout (mirrors industry-standard plugin/extension managers):
+//
+// ┌─ Header ─ gateway pill, totals, global "MCP enabled" toggle ─┐
+// ├─ Sub-tabs: Installed · Catalog · Custom ────────────────────┤
+// ├─ ServerCard list (Installed) │
+// │ ▸ status / description / tags / tool count │
+// │ ▸ Test · Configure · Disable · Uninstall │
+// │ ▸ Expandable per-tool list with risk badges + toggles │
+// └──────────────────────────────────────────────────────────────┘
+
+import React, { useCallback, useEffect, useMemo, useState } from "react";
+import { apiUrl, safeFetchJSON } from "../../utils/api.js";
+
+import ServerCard from "./mcp/ServerCard.jsx";
+import CatalogList from "./mcp/CatalogList.jsx";
+import CustomInstallForm from "./mcp/CustomInstallForm.jsx";
+import GatewayHeader from "./mcp/GatewayHeader.jsx";
+import SyncReport from "./mcp/SyncReport.jsx";
+
+const TAB_INSTALLED = "installed";
+const TAB_CATALOG = "catalog";
+const TAB_CUSTOM = "custom";
+
+export default function MCPServersTab({ showToast }) {
+ const [activeSubTab, setActiveSubTab] = useState(TAB_INSTALLED);
+ const [status, setStatus] = useState(null);
+ const [servers, setServers] = useState([]);
+ const [catalog, setCatalog] = useState([]);
+ const [loading, setLoading] = useState(true);
+ const [error, setError] = useState(null);
+ const [syncing, setSyncing] = useState(false);
+ const [syncReport, setSyncReport] = useState(null);
+
+ const refresh = useCallback(async () => {
+ setLoading(true);
+ setError(null);
+ try {
+ const [statusData, serversData, catalogData] = await Promise.all([
+ safeFetchJSON(apiUrl("/api/mcp/status"), { timeout: 5000 }),
+ safeFetchJSON(apiUrl("/api/mcp/servers"), { timeout: 5000 }),
+ safeFetchJSON(apiUrl("/api/mcp/catalog"), { timeout: 5000 }),
+ ]);
+ setStatus(statusData);
+ setServers(serversData?.servers || []);
+ setCatalog(catalogData?.items || []);
+ } catch (err) {
+ setError(err?.message || "Failed to load MCP server state");
+ } finally {
+ setLoading(false);
+ }
+ }, []);
+
+ useEffect(() => {
+ refresh();
+ }, [refresh]);
+
+ const post = useCallback(
+ async (path, body) => {
+ try {
+ const res = await fetch(apiUrl(path), {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: body ? JSON.stringify(body) : undefined,
+ });
+ if (!res.ok) {
+ const detail = await res.json().catch(() => ({}));
+ throw new Error(detail?.detail || `HTTP ${res.status}`);
+ }
+ return await res.json();
+ } catch (err) {
+ showToast?.("MCP error", err?.message || String(err));
+ throw err;
+ }
+ },
+ [showToast]
+ );
+
+ // ---- Server actions -----------------------------------------------------
+ const onEnableServer = async (id) => {
+ await post(`/api/mcp/servers/${id}/enable`);
+ showToast?.("Server enabled", id);
+ await refresh();
+ };
+ const onDisableServer = async (id) => {
+ await post(`/api/mcp/servers/${id}/disable`);
+ showToast?.("Server disabled", id);
+ await refresh();
+ };
+ const onUninstallServer = async (id) => {
+ if (
+ !window.confirm(
+ `Uninstall ${id}? GitPilot will stop calling its tools immediately.`
+ )
+ )
+ return;
+ await post(`/api/mcp/servers/${id}/uninstall`);
+ showToast?.("Server uninstalled", id);
+ await refresh();
+ };
+ const onTestServer = async (id) => {
+ const result = await post(`/api/mcp/servers/${id}/test`);
+ if (result?.ok) {
+ showToast?.("Server healthy", id);
+ } else {
+ showToast?.(
+ "Server unreachable",
+ result?.reason || result?.error || "Unknown error"
+ );
+ }
+ return result;
+ };
+ const onSync = useCallback(async () => {
+ setSyncing(true);
+ setSyncReport(null);
+ try {
+ const report = await post("/api/mcp/sync", {});
+ setSyncReport(report);
+ const total =
+ (report.added?.length || 0) +
+ (report.kept?.length || 0) +
+ (report.orphaned?.length || 0);
+ showToast?.(
+ report.forge_unreachable ? "Sync failed" : "Sync complete",
+ report.forge_unreachable
+ ? report.error || "forge unreachable"
+ : `+${report.added?.length || 0} added · ${total} total`
+ );
+ await refresh();
+ } catch {
+ // post() already toasted; nothing more to do.
+ } finally {
+ setSyncing(false);
+ }
+ }, [post, refresh, showToast]);
+
+ const onForgetOrphan = async (id) => {
+ if (
+ !window.confirm(
+ `Forget ${id}? It will be removed from the local list.\n` +
+ "Re-attach it to MCP Context Forge then click Sync to bring it back."
+ )
+ )
+ return;
+ await post(`/api/mcp/servers/${id}/forget`);
+ showToast?.("Server forgotten", id);
+ await refresh();
+ };
+
+ const onToggleTool = async (serverId, toolName, enabled) => {
+ await post(
+ `/api/mcp/servers/${serverId}/tools/${encodeURIComponent(
+ toolName
+ )}/toggle`,
+ { enabled }
+ );
+ await refresh();
+ };
+
+ const onInstallFromCatalog = async (serverId) => {
+ await post("/api/mcp/servers/install", { server_id: serverId });
+ showToast?.("Installed", `${serverId} (disabled until you enable it)`);
+ await refresh();
+ setActiveSubTab(TAB_INSTALLED);
+ };
+
+ const onInstallCustom = async (registerJson) => {
+ await post("/api/mcp/servers/install-custom", {
+ register_json: registerJson,
+ });
+ showToast?.("Custom server added", registerJson.name);
+ await refresh();
+ setActiveSubTab(TAB_INSTALLED);
+ };
+
+ // ---- Derived totals -----------------------------------------------------
+ const installedCount = servers.filter((s) => s.installed).length;
+ const enabledCount = servers.filter((s) => s.installed && s.enabled).length;
+ const totalTools = useMemo(
+ () => servers.reduce((acc, s) => acc + (s.tool_count || 0), 0),
+ [servers]
+ );
+
+ return (
+
+
+
+ {syncReport && (
+
setSyncReport(null)}
+ />
+ )}
+
+ {/* Sub-tab strip */}
+
+ {[
+ { id: TAB_INSTALLED, label: `Installed (${installedCount})` },
+ { id: TAB_CATALOG, label: `Catalog (${catalog.length})` },
+ { id: TAB_CUSTOM, label: "Custom" },
+ ].map((t) => (
+ setActiveSubTab(t.id)}
+ style={{
+ padding: "10px 16px",
+ border: "none",
+ background: "transparent",
+ color: activeSubTab === t.id ? "#93c5fd" : "#a0a0b0",
+ borderBottom:
+ activeSubTab === t.id
+ ? "2px solid #3B82F6"
+ : "2px solid transparent",
+ cursor: "pointer",
+ fontSize: "13px",
+ fontWeight: activeSubTab === t.id ? 600 : 400,
+ }}
+ >
+ {t.label}
+
+ ))}
+
+
+ {error && (
+
+ {error}
+
+ )}
+
+ {loading && !servers.length && (
+ Loading…
+ )}
+
+ {activeSubTab === TAB_INSTALLED && !loading && (
+
+ {servers.length === 0 && (
+ setActiveSubTab(TAB_CATALOG)}
+ />
+ )}
+ {servers.map((s) => (
+ onEnableServer(s.id)}
+ onDisable={() => onDisableServer(s.id)}
+ onUninstall={() => onUninstallServer(s.id)}
+ onTest={() => onTestServer(s.id)}
+ onForget={s.orphan ? () => onForgetOrphan(s.id) : undefined}
+ onToggleTool={(tool, enabled) =>
+ onToggleTool(s.id, tool, enabled)
+ }
+ />
+ ))}
+
+ )}
+
+ {activeSubTab === TAB_CATALOG && !loading && (
+
+ )}
+
+ {activeSubTab === TAB_CUSTOM && (
+
+ )}
+
+ );
+}
+
+function EmptyState({ title, hint, actionLabel, onAction }) {
+ return (
+
+
{title}
+
+ {hint}
+
+ {actionLabel && (
+
+ {actionLabel}
+
+ )}
+
+ );
+}
diff --git a/frontend/components/AdminTabs/SecurityTab.jsx b/frontend/components/AdminTabs/SecurityTab.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..f39161c491b77076489b0fa0163abf5b8d4fe33f
--- /dev/null
+++ b/frontend/components/AdminTabs/SecurityTab.jsx
@@ -0,0 +1,341 @@
+// frontend/components/AdminTabs/SecurityTab.jsx
+import React, { useState } from "react";
+import { scanWorkspace } from "../../utils/api.js";
+
+/**
+ * Security tab — runs a workspace scan via /api/security/scan-workspace
+ * and renders findings grouped by severity.
+ *
+ * Best practices applied:
+ * - Custom path input (defaults to ".")
+ * - Loading spinner while scanning
+ * - Error state with retry
+ * - Empty state ("No findings") with green checkmark
+ * - Findings grouped by severity (critical → info)
+ * - Each finding shows file, line, CWE, recommendation
+ * - Color-coded severity badges
+ */
+
+const SEVERITY_ORDER = ["critical", "high", "medium", "low", "info"];
+
+const SEVERITY_COLORS = {
+ critical: { bg: "#7f1d1d", text: "#fecaca", border: "#991b1b" },
+ high: { bg: "#9a3412", text: "#fed7aa", border: "#c2410c" },
+ medium: { bg: "#78350f", text: "#fde68a", border: "#a16207" },
+ low: { bg: "#164e63", text: "#a5f3fc", border: "#0e7490" },
+ info: { bg: "#1e3a5f", text: "#93c5fd", border: "#3B82F6" },
+};
+
+function SeverityBadge({ severity }) {
+ const c = SEVERITY_COLORS[severity] || SEVERITY_COLORS.info;
+ return (
+
+ {severity}
+
+ );
+}
+
+export default function SecurityTab({ showToast }) {
+ const [path, setPath] = useState(".");
+ const [scanning, setScanning] = useState(false);
+ const [result, setResult] = useState(null);
+ const [error, setError] = useState(null);
+
+ const handleScan = async () => {
+ setScanning(true);
+ setError(null);
+ setResult(null);
+ try {
+ const data = await scanWorkspace(path.trim() || ".");
+ setResult(data);
+ const findingsCount = data.findings?.length || 0;
+ showToast?.(
+ "Scan complete",
+ findingsCount === 0
+ ? "No security findings."
+ : `Found ${findingsCount} issue${findingsCount !== 1 ? "s" : ""}.`
+ );
+ } catch (err) {
+ setError(err?.message || "Scan failed");
+ } finally {
+ setScanning(false);
+ }
+ };
+
+ // Group findings by severity
+ const grouped = React.useMemo(() => {
+ const out = {};
+ if (result?.findings) {
+ for (const f of result.findings) {
+ const sev = f.severity || "info";
+ if (!out[sev]) out[sev] = [];
+ out[sev].push(f);
+ }
+ }
+ return out;
+ }, [result]);
+
+ const totalFindings = result?.findings?.length || 0;
+
+ return (
+
+
Security Scanning
+
+ Scan your workspace for vulnerabilities, secrets, and insecure patterns (OWASP Top 10).
+
+
+ {/* Scan controls */}
+
+
+
+ Path to scan (relative or absolute)
+
+ setPath(e.target.value)}
+ disabled={scanning}
+ placeholder="."
+ style={{
+ width: "100%",
+ padding: "8px 10px",
+ background: "#0d0e15",
+ border: "1px solid #2a2b36",
+ borderRadius: "4px",
+ color: "#fff",
+ fontSize: "12px",
+ fontFamily: "monospace",
+ }}
+ />
+
+
+ {scanning ? "Scanning..." : "Scan Workspace"}
+
+
+
+ {/* Error state */}
+ {error && (
+
+ Scan failed:
+ {error}
+
+ )}
+
+ {/* Results summary */}
+ {result && (
+
+
+
+
Files Scanned
+
+ {result.files_scanned ?? 0}
+
+
+
+
Total Findings
+
+ {totalFindings}
+
+
+
+
Duration
+
+ {result.scan_duration_ms ?? 0}ms
+
+
+
+
+ )}
+
+ {/* Empty state — no findings */}
+ {result && totalFindings === 0 && (
+
+
✓
+
+ No security issues found
+
+
+ Your workspace passed all {result.files_scanned ?? 0} file checks.
+
+
+ )}
+
+ {/* Findings grouped by severity */}
+ {totalFindings > 0 &&
+ SEVERITY_ORDER.filter((sev) => grouped[sev]?.length > 0).map((sev) => (
+
+
+
+
+ {grouped[sev].length} {sev} issue{grouped[sev].length !== 1 ? "s" : ""}
+
+
+
+ {grouped[sev].map((f, idx) => (
+
+
+
{f.title}
+ {f.cwe_id && (
+
+ {f.cwe_id}
+
+ )}
+
+
+ {f.file_path}:{f.line_number}
+
+ {f.snippet && (
+
+ {f.snippet}
+
+ )}
+ {f.recommendation && (
+
+ Fix:
+ {f.recommendation}
+
+ )}
+
+ ))}
+
+
+ ))}
+
+ );
+}
diff --git a/frontend/components/AdminTabs/SessionsTab.jsx b/frontend/components/AdminTabs/SessionsTab.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..f55e9ef70ededefa2e89b63a7b9fff01ca00c5f6
--- /dev/null
+++ b/frontend/components/AdminTabs/SessionsTab.jsx
@@ -0,0 +1,362 @@
+// frontend/components/AdminTabs/SessionsTab.jsx
+import React, { useEffect, useMemo, useState, useCallback } from "react";
+import { apiUrl, safeFetchJSON } from "../../utils/api.js";
+
+/**
+ * Sessions tab — admin-level table view of all saved sessions with
+ * search, sort, and delete actions.
+ *
+ * Best practices applied:
+ * - Fetch all sessions on mount
+ * - Client-side search (useMemo for filtered list)
+ * - Confirmation dialog before delete
+ * - Row hover effect
+ * - Empty / loading / error states
+ * - Relative timestamps ("2 hours ago")
+ * - Click row to open in workspace view
+ */
+
+function formatRelativeTime(iso) {
+ if (!iso) return "—";
+ try {
+ const d = new Date(iso);
+ const diff = Date.now() - d.getTime();
+ if (diff < 60_000) return "just now";
+ if (diff < 3_600_000) return `${Math.floor(diff / 60_000)}m ago`;
+ if (diff < 86_400_000) return `${Math.floor(diff / 3_600_000)}h ago`;
+ if (diff < 2_592_000_000) return `${Math.floor(diff / 86_400_000)}d ago`;
+ return d.toLocaleDateString();
+ } catch {
+ return "—";
+ }
+}
+
+export default function SessionsTab({ onSelectSession, showToast }) {
+ const [sessions, setSessions] = useState([]);
+ const [loading, setLoading] = useState(true);
+ const [error, setError] = useState(null);
+ const [query, setQuery] = useState("");
+ const [deletingId, setDeletingId] = useState(null);
+
+ const fetchSessions = useCallback(async () => {
+ setError(null);
+ try {
+ const data = await safeFetchJSON(apiUrl("/api/sessions"), { timeout: 10000 });
+ setSessions(Array.isArray(data.sessions) ? data.sessions : []);
+ } catch (err) {
+ setError(err?.message || "Failed to load sessions");
+ } finally {
+ setLoading(false);
+ }
+ }, []);
+
+ useEffect(() => {
+ fetchSessions();
+ }, [fetchSessions]);
+
+ const handleDelete = async (session) => {
+ if (
+ !window.confirm(
+ `Delete session "${session.name || session.id?.slice(0, 8)}"? This cannot be undone.`
+ )
+ ) {
+ return;
+ }
+
+ setDeletingId(session.id);
+ try {
+ const res = await fetch(apiUrl(`/api/sessions/${session.id}`), {
+ method: "DELETE",
+ });
+ if (!res.ok) {
+ throw new Error(`Delete failed (${res.status})`);
+ }
+ showToast?.("Session deleted", session.name || session.id);
+ // Optimistic removal
+ setSessions((prev) => prev.filter((s) => s.id !== session.id));
+ } catch (err) {
+ setError(err?.message || "Failed to delete session");
+ } finally {
+ setDeletingId(null);
+ }
+ };
+
+ const filtered = useMemo(() => {
+ if (!query.trim()) return sessions;
+ const q = query.toLowerCase();
+ return sessions.filter((s) => {
+ return (
+ (s.name || "").toLowerCase().includes(q) ||
+ (s.repo || "").toLowerCase().includes(q) ||
+ (s.branch || "").toLowerCase().includes(q) ||
+ (s.id || "").toLowerCase().includes(q)
+ );
+ });
+ }, [sessions, query]);
+
+ return (
+
+
+
+
Sessions
+
+ All saved chat sessions ({sessions.length} total
+ {query ? `, ${filtered.length} matching` : ""}).
+
+
+
+ setQuery(e.target.value)}
+ placeholder="Search sessions..."
+ style={{
+ padding: "6px 10px",
+ background: "#0d0e15",
+ border: "1px solid #2a2b36",
+ borderRadius: "4px",
+ color: "#fff",
+ fontSize: "12px",
+ width: "220px",
+ }}
+ />
+
+ Refresh
+
+
+
+
+ {/* Loading state */}
+ {loading && (
+
+ Loading sessions...
+
+ )}
+
+ {/* Error state */}
+ {error && !loading && (
+
+ Error:
+ {error}
+
+ )}
+
+ {/* Empty state */}
+ {!loading && !error && sessions.length === 0 && (
+
+
💬
+
+ No sessions yet
+
+
+ Start chatting with GitPilot to create your first session.
+
+
+ )}
+
+ {/* Table */}
+ {!loading && filtered.length > 0 && (
+
+
+
+
+ Name
+ Repository
+ Branch
+ Messages
+ Status
+ Updated
+ Actions
+
+
+
+ {filtered.map((s) => (
+
+ (e.currentTarget.style.background = "#22232e")
+ }
+ onMouseLeave={(e) =>
+ (e.currentTarget.style.background = "transparent")
+ }
+ onClick={() => onSelectSession?.(s)}
+ >
+
+
+ {s.name || (unnamed) }
+
+
+ {s.id?.slice(0, 12)}
+
+
+
+ {s.repo || — }
+
+
+ {s.branch || — }
+
+ {s.message_count ?? 0}
+
+
+ {s.status || "unknown"}
+
+
+
+ {formatRelativeTime(s.updated_at)}
+
+
+ {
+ e.stopPropagation();
+ handleDelete(s);
+ }}
+ disabled={deletingId === s.id}
+ style={{
+ padding: "4px 10px",
+ background: "transparent",
+ color: "#f87171",
+ border: "1px solid #991b1b",
+ borderRadius: "4px",
+ cursor: deletingId === s.id ? "not-allowed" : "pointer",
+ fontSize: "11px",
+ }}
+ >
+ {deletingId === s.id ? "..." : "Delete"}
+
+
+
+ ))}
+
+
+
+ )}
+
+ {/* No matches for search */}
+ {!loading && sessions.length > 0 && filtered.length === 0 && (
+
+ No sessions match "{query}"
+
+ )}
+
+ );
+}
+
+const thStyle = {
+ padding: "10px 12px",
+ textAlign: "left",
+ fontSize: "11px",
+ fontWeight: 600,
+ textTransform: "uppercase",
+ letterSpacing: "0.5px",
+ opacity: 0.7,
+};
+
+const tdStyle = {
+ padding: "10px 12px",
+ verticalAlign: "middle",
+};
diff --git a/frontend/components/AdminTabs/SkillsTab.jsx b/frontend/components/AdminTabs/SkillsTab.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..6068f4af61f6d2933f546997260ecc97a65823f3
--- /dev/null
+++ b/frontend/components/AdminTabs/SkillsTab.jsx
@@ -0,0 +1,266 @@
+// frontend/components/AdminTabs/SkillsTab.jsx
+import React, { useEffect, useState, useCallback } from "react";
+import { apiUrl, safeFetchJSON } from "../../utils/api.js";
+
+/**
+ * Skills tab — lists all loaded skills from /api/skills and allows
+ * reloading them from disk via /api/skills/reload.
+ *
+ * Best practices applied:
+ * - Fetch on mount
+ * - Explicit reload button (skills are loaded from .md files on disk)
+ * - Loading / empty / error states
+ * - Auto-trigger indicator badge
+ * - Required tools list per skill
+ * - Source file path for debugging
+ */
+
+export default function SkillsTab({ showToast }) {
+ const [skills, setSkills] = useState([]);
+ const [loading, setLoading] = useState(true);
+ const [reloading, setReloading] = useState(false);
+ const [error, setError] = useState(null);
+
+ const fetchSkills = useCallback(async () => {
+ setError(null);
+ try {
+ const data = await safeFetchJSON(apiUrl("/api/skills"), { timeout: 10000 });
+ setSkills(Array.isArray(data.skills) ? data.skills : []);
+ } catch (err) {
+ setError(err?.message || "Failed to load skills");
+ } finally {
+ setLoading(false);
+ }
+ }, []);
+
+ useEffect(() => {
+ fetchSkills();
+ }, [fetchSkills]);
+
+ const handleReload = async () => {
+ setReloading(true);
+ setError(null);
+ try {
+ const data = await safeFetchJSON(apiUrl("/api/skills/reload"), {
+ method: "POST",
+ timeout: 10000,
+ });
+ showToast?.(
+ "Skills reloaded",
+ `${data.count ?? 0} skill${data.count !== 1 ? "s" : ""} loaded from disk.`
+ );
+ await fetchSkills();
+ } catch (err) {
+ setError(err?.message || "Failed to reload skills");
+ } finally {
+ setReloading(false);
+ }
+ };
+
+ return (
+
+
+
+
Skills
+
+ Reusable prompt templates loaded from{" "}
+ .gitpilot/skills/*.md files.
+
+
+
+ {reloading ? "Reloading..." : "Reload Skills"}
+
+
+
+ {/* Loading state */}
+ {loading && (
+
+ Loading skills...
+
+ )}
+
+ {/* Error state */}
+ {error && !loading && (
+
+ Error:
+ {error}
+
+ )}
+
+ {/* Empty state */}
+ {!loading && !error && skills.length === 0 && (
+
+
📚
+
+ No skills loaded
+
+
+ Create a .gitpilot/skills/my-skill.md file with YAML
+ frontmatter to add custom skills.
+
+
+ )}
+
+ {/* Skills grid */}
+ {!loading && skills.length > 0 && (
+
+ {skills.map((skill) => (
+
+
+
+ {skill.name}
+
+ {skill.auto_trigger && (
+
+ Auto
+
+ )}
+
+
+
+ {skill.description || "No description"}
+
+
+ {Array.isArray(skill.required_tools) && skill.required_tools.length > 0 && (
+
+ {skill.required_tools.map((t) => (
+
+ {t}
+
+ ))}
+
+ )}
+
+ {skill.source && (
+
+ {skill.source}
+
+ )}
+
+ ))}
+
+ )}
+
+ );
+}
diff --git a/frontend/components/AdminTabs/WorkspaceModesTab.jsx b/frontend/components/AdminTabs/WorkspaceModesTab.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..124373c175c7c6543493487627d48467405b4252
--- /dev/null
+++ b/frontend/components/AdminTabs/WorkspaceModesTab.jsx
@@ -0,0 +1,254 @@
+// frontend/components/AdminTabs/WorkspaceModesTab.jsx
+import React, { useState } from "react";
+import { startSession } from "../../utils/api.js";
+
+/**
+ * Workspace Modes tab — allows the user to start a session in one of
+ * three modes (folder, local_git, github). Calls POST /api/session/start.
+ *
+ * Best practices applied:
+ * - Loading state while the request is in flight
+ * - Per-mode error state (not a global error)
+ * - Disabled card during submission to prevent double-click
+ * - ARIA role="button" + aria-disabled for accessibility
+ * - Toast notification on success
+ * - Success callback so App.jsx can set activeSessionId and switch to workspace view
+ */
+
+const MODES = [
+ {
+ id: "folder",
+ title: "Folder Mode",
+ description: "Work with any local folder. No Git required.",
+ requires: "A local folder path",
+ enables: "Chat, explain, review",
+ promptKey: "folder_path",
+ promptLabel: "Folder path (absolute)",
+ promptPlaceholder: "/home/you/myproject",
+ buildPayload: (value) => ({ mode: "folder", folder_path: value }),
+ },
+ {
+ id: "local_git",
+ title: "Local Git Mode",
+ description: "Full repo + branch context for AI assistance.",
+ requires: "A local Git repository",
+ enables: "All local features (branches, diff, commit)",
+ promptKey: "repo_root",
+ promptLabel: "Repository root (absolute path)",
+ promptPlaceholder: "/home/you/my-git-repo",
+ buildPayload: (value) => ({ mode: "local_git", repo_root: value }),
+ },
+ {
+ id: "github",
+ title: "GitHub Mode",
+ description: "PRs, issues, remote workflows via GitHub API.",
+ requires: "GitHub token (already signed in)",
+ enables: "Full platform features",
+ promptKey: "repo_full_name",
+ promptLabel: "Repository (owner/repo)",
+ promptPlaceholder: "octocat/hello-world",
+ buildPayload: (value) => ({ mode: "github", repo_full_name: value }),
+ },
+];
+
+export default function WorkspaceModesTab({ onSessionStarted, showToast }) {
+ const [activeModeId, setActiveModeId] = useState(null);
+ const [inputValue, setInputValue] = useState("");
+ const [submittingId, setSubmittingId] = useState(null);
+ const [errorByMode, setErrorByMode] = useState({});
+
+ const handleCardClick = (mode) => {
+ if (submittingId) return;
+ setActiveModeId(mode.id);
+ setInputValue("");
+ setErrorByMode((prev) => ({ ...prev, [mode.id]: null }));
+ };
+
+ const handleStart = async (mode) => {
+ const trimmed = inputValue.trim();
+ if (!trimmed) {
+ setErrorByMode((prev) => ({
+ ...prev,
+ [mode.id]: `${mode.promptLabel} is required`,
+ }));
+ return;
+ }
+
+ setSubmittingId(mode.id);
+ setErrorByMode((prev) => ({ ...prev, [mode.id]: null }));
+
+ try {
+ const payload = mode.buildPayload(trimmed);
+ const result = await startSession(payload);
+
+ showToast?.(
+ `${mode.title} started`,
+ `Session ${result.session_id?.slice(0, 8) || ""} is now active.`
+ );
+
+ onSessionStarted?.(result);
+ setActiveModeId(null);
+ setInputValue("");
+ } catch (err) {
+ setErrorByMode((prev) => ({
+ ...prev,
+ [mode.id]: err?.message || "Failed to start session",
+ }));
+ } finally {
+ setSubmittingId(null);
+ }
+ };
+
+ const handleCancel = () => {
+ if (submittingId) return;
+ setActiveModeId(null);
+ setInputValue("");
+ };
+
+ return (
+
+
Workspace Modes
+
+ Choose how you want GitPilot to interact with your code. You can switch modes at any time.
+
+
+
+ {MODES.map((mode) => {
+ const isActive = activeModeId === mode.id;
+ const isSubmitting = submittingId === mode.id;
+ const error = errorByMode[mode.id];
+
+ return (
+
!isActive && handleCardClick(mode)}
+ onKeyDown={(e) => {
+ if ((e.key === "Enter" || e.key === " ") && !isActive) {
+ e.preventDefault();
+ handleCardClick(mode);
+ }
+ }}
+ style={{
+ background: isActive ? "#1e3a5f" : "#1a1b26",
+ borderRadius: "8px",
+ padding: "20px",
+ border: isActive ? "1px solid #3B82F6" : "1px solid #2a2b36",
+ cursor: submittingId && !isSubmitting ? "not-allowed" : "pointer",
+ opacity: submittingId && !isSubmitting ? 0.5 : 1,
+ transition: "all 150ms ease",
+ }}
+ >
+
+ {mode.title}
+
+
+ {mode.description}
+
+
+ Requires:
+ {mode.requires}
+
+
+ Enables:
+ {mode.enables}
+
+
+ {isActive && (
+
e.stopPropagation()} style={{ marginTop: "12px" }}>
+
+ {mode.promptLabel}
+
+
setInputValue(e.target.value)}
+ onKeyDown={(e) => {
+ if (e.key === "Enter") {
+ e.preventDefault();
+ handleStart(mode);
+ } else if (e.key === "Escape") {
+ handleCancel();
+ }
+ }}
+ placeholder={mode.promptPlaceholder}
+ disabled={isSubmitting}
+ autoFocus
+ style={{
+ width: "100%",
+ padding: "6px 8px",
+ background: "#0d0e15",
+ border: "1px solid #2a2b36",
+ borderRadius: "4px",
+ color: "#fff",
+ fontSize: "12px",
+ fontFamily: "monospace",
+ }}
+ />
+ {error && (
+
+ {error}
+
+ )}
+
+ handleStart(mode)}
+ disabled={isSubmitting || !inputValue.trim()}
+ style={{
+ padding: "6px 12px",
+ background: isSubmitting ? "#555" : "#3B82F6",
+ color: "#fff",
+ border: "none",
+ borderRadius: "4px",
+ cursor: isSubmitting || !inputValue.trim() ? "not-allowed" : "pointer",
+ fontSize: "12px",
+ fontWeight: 600,
+ }}
+ >
+ {isSubmitting ? "Starting..." : "Start Session"}
+
+
+ Cancel
+
+
+
+ )}
+
+ );
+ })}
+
+
+ );
+}
diff --git a/frontend/components/AdminTabs/index.js b/frontend/components/AdminTabs/index.js
new file mode 100644
index 0000000000000000000000000000000000000000..44e95474e41896b65e27b32ecfc2830d24aa7e2e
--- /dev/null
+++ b/frontend/components/AdminTabs/index.js
@@ -0,0 +1,9 @@
+// frontend/components/AdminTabs/index.js
+// Barrel export — all admin tab components in one place
+export { default as WorkspaceModesTab } from "./WorkspaceModesTab.jsx";
+export { default as SecurityTab } from "./SecurityTab.jsx";
+export { default as IntegrationsTab } from "./IntegrationsTab.jsx";
+export { default as MCPServersTab } from "./MCPServersTab.jsx";
+export { default as SkillsTab } from "./SkillsTab.jsx";
+export { default as SessionsTab } from "./SessionsTab.jsx";
+export { default as AdvancedTab } from "./AdvancedTab.jsx";
diff --git a/frontend/components/AdminTabs/mcp/CatalogList.jsx b/frontend/components/AdminTabs/mcp/CatalogList.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..e1bf6c5930e5102fb65b7eec34a6537d1b22bb43
--- /dev/null
+++ b/frontend/components/AdminTabs/mcp/CatalogList.jsx
@@ -0,0 +1,101 @@
+// frontend/components/AdminTabs/mcp/CatalogList.jsx
+// Browse curated MCP servers shipped with GitPilot. Each item has a
+// one-click "Install" that lands the server in the Installed tab,
+// disabled by default.
+
+import React from "react";
+
+export default function CatalogList({ items, onInstall }) {
+ if (!items?.length) {
+ return (
+
+ No catalog entries shipped with this build.
+
+ );
+ }
+
+ return (
+
+ {items.map((item) => (
+
+
+
+
+ {item.id}
+
+ {item.installed && (
+
+ installed
+
+ )}
+
+
+ {item.description}
+
+
+ {(item.tags || []).map((t) => (
+
+ {t}
+
+ ))}
+
+
+
onInstall(item.id)}
+ disabled={item.installed}
+ style={{
+ padding: "6px 14px",
+ background: item.installed ? "#252634" : "#3B82F6",
+ color: item.installed ? "#7a7d8a" : "#fff",
+ border: "none",
+ borderRadius: "4px",
+ cursor: item.installed ? "not-allowed" : "pointer",
+ fontSize: "12px",
+ fontWeight: 600,
+ }}
+ >
+ {item.installed ? "Installed" : "Install"}
+
+
+ ))}
+
+ );
+}
diff --git a/frontend/components/AdminTabs/mcp/CustomInstallForm.jsx b/frontend/components/AdminTabs/mcp/CustomInstallForm.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..cef9a3fab26f9bb09fcc5ed92ed357c015d7a2ee
--- /dev/null
+++ b/frontend/components/AdminTabs/mcp/CustomInstallForm.jsx
@@ -0,0 +1,134 @@
+// frontend/components/AdminTabs/mcp/CustomInstallForm.jsx
+// Paste-a-register.json form for installing a custom MCP server.
+
+import React, { useState } from "react";
+
+const SAMPLE = `{
+ "name": "mcp-neo4j-server",
+ "endpoint": "http://mcp-neo4j-server:8083/mcp",
+ "description": "Neo4j MCP server for graph schema discovery",
+ "tags": ["graph", "neo4j"],
+ "auth": { "type": "bearer", "env": "MCP_NEO4J_SERVER_TOKEN" }
+}`;
+
+export default function CustomInstallForm({ onSubmit }) {
+ const [text, setText] = useState(SAMPLE);
+ const [err, setErr] = useState(null);
+ const [submitting, setSubmitting] = useState(false);
+
+ const handleSubmit = async () => {
+ setErr(null);
+ let parsed;
+ try {
+ parsed = JSON.parse(text);
+ } catch (e) {
+ setErr("Invalid JSON: " + (e?.message || ""));
+ return;
+ }
+ if (!parsed.name || !parsed.endpoint) {
+ setErr("register.json must include 'name' and 'endpoint'.");
+ return;
+ }
+ setSubmitting(true);
+ try {
+ await onSubmit(parsed);
+ setText(SAMPLE);
+ } catch (e) {
+ setErr(e?.message || "Install failed");
+ } finally {
+ setSubmitting(false);
+ }
+ };
+
+ return (
+
+
Install custom server
+
+ Paste a Context Forge register.json. The server lands
+ disabled — turn it on from the Installed tab once you have set its
+ auth token.
+
+
+ );
+}
diff --git a/frontend/components/AdminTabs/mcp/GatewayHeader.jsx b/frontend/components/AdminTabs/mcp/GatewayHeader.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..902dcfe4c2a7084a83f644d1b9045ae542e3cd9f
--- /dev/null
+++ b/frontend/components/AdminTabs/mcp/GatewayHeader.jsx
@@ -0,0 +1,145 @@
+// frontend/components/AdminTabs/mcp/GatewayHeader.jsx
+// Top strip of the MCP Servers tab: gateway pill + roll-up counters.
+
+import React from "react";
+
+export default function GatewayHeader({
+ status,
+ installedCount,
+ enabledCount,
+ totalTools,
+ onRefresh,
+ onSync,
+ syncing,
+}) {
+ const reachable = status?.gateway_reachable;
+ const dotColor = reachable ? "#10b981" : "#ef4444";
+ const dotLabel = reachable ? "Connected" : "Unreachable";
+ const gatewayUrl = status?.gateway_url || "—";
+
+ return (
+
+
+
+
+
+ MCP Context Forge — {dotLabel}
+
+ {status?.plugin_enabled === false && (
+
+ plugin disabled
+
+ )}
+
+
+ Gateway: {gatewayUrl}
+
+
+
+
+
+
+
+
+ Refresh
+
+ {onSync && (
+
+ {syncing ? "Syncing…" : "Sync"}
+
+ )}
+
+
+ );
+}
+
+function Counter({ label, value }) {
+ return (
+
+
+ {value ?? "—"}
+
+
+ {label}
+
+
+ );
+}
diff --git a/frontend/components/AdminTabs/mcp/ServerCard.jsx b/frontend/components/AdminTabs/mcp/ServerCard.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..6384105dad800883f23f9e843e9f13a35e289343
--- /dev/null
+++ b/frontend/components/AdminTabs/mcp/ServerCard.jsx
@@ -0,0 +1,334 @@
+// frontend/components/AdminTabs/mcp/ServerCard.jsx
+// One installed MCP server. Collapsed shows summary + actions; expanded
+// reveals the per-tool list with risk badges and individual toggles.
+
+import React, { useState } from "react";
+import ToolRow from "./ToolRow.jsx";
+
+const RISK_PALETTE = {
+ low: { bg: "#0f3a26", border: "#1f5a3e", text: "#86efac" },
+ medium: { bg: "#3a2e0f", border: "#5a4a1f", text: "#fcd34d" },
+ high: { bg: "#3a0f0f", border: "#5a1f1f", text: "#fca5a5" },
+};
+
+export default function ServerCard({
+ server,
+ onEnable,
+ onDisable,
+ onUninstall,
+ onTest,
+ onToggleTool,
+ onForget,
+}) {
+ const [expanded, setExpanded] = useState(false);
+ const [testResult, setTestResult] = useState(null);
+ const [testing, setTesting] = useState(false);
+
+ const handleTest = async () => {
+ setTesting(true);
+ try {
+ const result = await onTest();
+ setTestResult(result);
+ } finally {
+ setTesting(false);
+ }
+ };
+
+ const statusDot = server.enabled ? "#10b981" : "#6b7280";
+ const statusLabel = server.enabled ? "Enabled" : "Disabled";
+
+ // Risk roll-up shown next to the tool count.
+ const riskCounts = server.tools?.reduce(
+ (acc, t) => ({ ...acc, [t.risk]: (acc[t.risk] || 0) + 1 }),
+ {}
+ ) || {};
+
+ return (
+
+
+
+
+
+
+
+ {server.id}
+
+
+ {statusLabel}
+
+ {!server.is_known && (
+
+ custom
+
+ )}
+ {server.orphan && (
+
+ orphan
+
+ )}
+ {server.source === "forge-sync" && !server.orphan && (
+
+ via sync
+
+ )}
+
+
+ {server.description || "—"}
+
+
+ {server.endpoint || "—"}
+
+
+ {(server.tags || []).map((t) => (
+
+ {t}
+
+ ))}
+
+
+
+
+
+ {server.enabled ? (
+
+ Disable
+
+ ) : (
+
+ Enable
+
+ )}
+
+ {testing ? "Testing…" : "Test"}
+
+ {onForget ? (
+
+ Forget
+
+ ) : (
+
+ Uninstall
+
+ )}
+
+
+ {server.tool_count} tool{server.tool_count === 1 ? "" : "s"}
+ {Object.entries(riskCounts).map(([risk, count]) => (
+
+ {count} {risk}
+
+ ))}
+
+
setExpanded((v) => !v)}
+ style={{
+ padding: "4px 8px",
+ background: "transparent",
+ color: "#93c5fd",
+ border: "none",
+ cursor: "pointer",
+ fontSize: "12px",
+ }}
+ >
+ {expanded ? "Hide tools ▴" : "Show tools ▾"}
+
+
+
+
+ {testResult && (
+
+ {testResult.ok
+ ? "Healthy. Inspector confirmed the server is reachable and advertises its expected tools."
+ : `Failed: ${testResult.reason || testResult.error || "unknown error"}`}
+
+ )}
+
+
+ {expanded && (
+
+ {server.tools?.length ? (
+ server.tools.map((t) => (
+
onToggleTool(t.name, enabled)}
+ />
+ ))
+ ) : (
+
+ No tools advertised by this server.
+
+ )}
+
+ )}
+
+ );
+}
+
+function Btn({ children, variant = "default", ...props }) {
+ const palettes = {
+ default: { bg: "#252634", color: "#e0e0e7", border: "#3a3b4a" },
+ primary: { bg: "#3B82F6", color: "#fff", border: "#3B82F6" },
+ ghost: { bg: "transparent", color: "#cdd0d8", border: "#3a3b4a" },
+ danger: { bg: "transparent", color: "#fca5a5", border: "#5a1f1f" },
+ };
+ const p = palettes[variant];
+ return (
+
+ {children}
+
+ );
+}
diff --git a/frontend/components/AdminTabs/mcp/SyncReport.jsx b/frontend/components/AdminTabs/mcp/SyncReport.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..25f72e8c268a3cb51c769e0dc289516fd71b7e31
--- /dev/null
+++ b/frontend/components/AdminTabs/mcp/SyncReport.jsx
@@ -0,0 +1,152 @@
+// frontend/components/AdminTabs/mcp/SyncReport.jsx
+// Renders the SyncReport returned by POST /api/mcp/sync as a compact
+// dismissible banner. Best practices: structured + colour-coded counts,
+// truncated id lists with a "View" toggle, single primary "Dismiss"
+// action, ARIA "status" so a screen reader announces the result once.
+
+import React, { useState } from "react";
+
+export default function SyncReport({ report, onDismiss }) {
+ const [open, setOpen] = useState(false);
+ if (!report) return null;
+
+ const { added = [], kept = [], orphaned = [], forge_unreachable, error } = report;
+
+ if (forge_unreachable) {
+ return (
+
+ );
+ }
+
+ const noChanges = added.length === 0 && orphaned.length === 0;
+
+ return (
+ setOpen((v) => !v)}
+ style={{
+ background: "transparent",
+ border: "none",
+ color: "#93c5fd",
+ cursor: "pointer",
+ fontSize: 12,
+ padding: 0,
+ }}
+ >
+ {open ? "Hide details ▴" : "View details ▾"}
+
+ }
+ >
+ {open && (
+
+ {added.length > 0 && (
+
+ )}
+ {kept.length > 0 && (
+
+ )}
+ {orphaned.length > 0 && (
+
+ )}
+ {report.correlation_id && (
+
+ correlation_id: {report.correlation_id}
+
+ )}
+
+ )}
+
+ );
+}
+
+function Banner({ kind, title, body, footer, onDismiss, children }) {
+ const palette = {
+ ok: { bg: "#0f3a26", border: "#1f5a3e", text: "#86efac" },
+ info: { bg: "#1e3a5f", border: "#3B82F6", text: "#93c5fd" },
+ warn: { bg: "#3a2e0f", border: "#5a4a1f", text: "#fcd34d" },
+ bad: { bg: "#3a0f0f", border: "#5a1f1f", text: "#fca5a5" },
+ }[kind] || { bg: "#1a1b26", border: "#2a2b36", text: "#cdd0d8" };
+
+ return (
+
+
+ {title}
+
+ ×
+
+
+ {body &&
{body}
}
+ {children}
+ {footer &&
{footer}
}
+
+ );
+}
+
+function Detail({ label, items, colour, hint }) {
+ return (
+
+
{label}: {" "}
+
+ {items.slice(0, 6).join(", ")}
+ {items.length > 6 ? ` … (+${items.length - 6} more)` : ""}
+
+ {hint && (
+
{hint}
+ )}
+
+ );
+}
diff --git a/frontend/components/AdminTabs/mcp/ToolRow.jsx b/frontend/components/AdminTabs/mcp/ToolRow.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..82b020416391154cab3e2e73e4b04d105fbf4b0b
--- /dev/null
+++ b/frontend/components/AdminTabs/mcp/ToolRow.jsx
@@ -0,0 +1,149 @@
+// frontend/components/AdminTabs/mcp/ToolRow.jsx
+// One row inside an expanded ServerCard. Tool name, risk badge,
+// "used by" agent chips, and a per-tool enable toggle.
+
+import React from "react";
+
+const RISK_PALETTE = {
+ low: { bg: "#0f3a26", text: "#86efac", label: "low" },
+ medium: { bg: "#3a2e0f", text: "#fcd34d", label: "med" },
+ high: { bg: "#3a0f0f", text: "#fca5a5", label: "high" },
+};
+
+export default function ToolRow({ tool, disabled, onToggle }) {
+ const risk = RISK_PALETTE[tool.risk] || RISK_PALETTE.low;
+
+ // Destructive tools cannot be toggled on by the UI; the backend's
+ // PolicyEngine will reject a toggle PUT anyway, but disabling the
+ // control surfaces the constraint up front.
+ const lockEnable = tool.destructive;
+
+ return (
+
+
+
+
+ {tool.name}
+
+
+ {risk.label}
+
+
+
+ {(tool.used_by || []).length > 0 && used by }
+ {(tool.used_by || []).map((agent) => (
+
+ {agent}
+
+ ))}
+
+
+
+
onToggle(checked)}
+ />
+
+ );
+}
+
+function Toggle({ checked, disabled, title, onChange }) {
+ return (
+
+ onChange(e.target.checked)}
+ style={{ position: "absolute", opacity: 0, pointerEvents: "none" }}
+ aria-label={title}
+ />
+
+
+
+
+ );
+}
diff --git a/frontend/components/AssistantMessage.jsx b/frontend/components/AssistantMessage.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..9ec8c002c9ae6cabc8c673b3a228ff232bbad2a5
--- /dev/null
+++ b/frontend/components/AssistantMessage.jsx
@@ -0,0 +1,167 @@
+import React from "react";
+import PlanView from "./PlanView.jsx";
+
+export default function AssistantMessage({ answer, plan, executionLog, planStatus }) {
+ // ``planStatus`` is optional metadata about the lifecycle of the plan
+ // attached to this message: "executed" | "rejected" | null. It drives
+ // the badge next to the Action Plan header so the user can tell at a
+ // glance, in chat history, whether a previous plan was approved or
+ // dismissed. Defaults to null (no badge) to keep the legacy render
+ // path untouched.
+ const styles = {
+ container: {
+ marginBottom: "20px",
+ padding: "20px",
+ backgroundColor: "#18181B", // Zinc-900
+ borderRadius: "12px",
+ border: "1px solid #27272A", // Zinc-800
+ color: "#F4F4F5", // Zinc-100
+ fontFamily: '-apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, sans-serif',
+ boxShadow: "0 4px 6px -1px rgba(0, 0, 0, 0.1), 0 2px 4px -1px rgba(0, 0, 0, 0.06)",
+ },
+ section: {
+ marginBottom: "20px",
+ },
+ lastSection: {
+ marginBottom: "0",
+ },
+ header: {
+ display: "flex",
+ alignItems: "center",
+ marginBottom: "12px",
+ paddingBottom: "8px",
+ borderBottom: "1px solid #3F3F46", // Zinc-700
+ },
+ title: {
+ fontSize: "12px",
+ fontWeight: "600",
+ textTransform: "uppercase",
+ letterSpacing: "0.05em",
+ color: "#A1A1AA", // Zinc-400
+ margin: 0,
+ },
+ content: {
+ fontSize: "14px",
+ lineHeight: "1.6",
+ whiteSpace: "pre-wrap",
+ },
+ executionList: {
+ listStyle: "none",
+ padding: 0,
+ margin: 0,
+ display: "flex",
+ flexDirection: "column",
+ gap: "8px",
+ },
+ executionStep: {
+ display: "flex",
+ flexDirection: "column",
+ gap: "4px",
+ padding: "10px",
+ backgroundColor: "#09090B", // Zinc-950
+ borderRadius: "6px",
+ border: "1px solid #27272A",
+ fontSize: "13px",
+ },
+ stepNumber: {
+ fontSize: "11px",
+ fontWeight: "600",
+ color: "#10B981", // Emerald-500
+ textTransform: "uppercase",
+ },
+ stepSummary: {
+ color: "#D4D4D8", // Zinc-300
+ fontFamily: "ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, monospace",
+ },
+ };
+
+ // Only show Action Plan section when there are actual file actions.
+ // For Lite Mode Q&A responses (all steps have 0 files), the plan
+ // just duplicates the answer — hiding it avoids showing the same text 3x.
+ const hasFileActions = plan?.steps?.some(s => s.files?.length > 0);
+
+ return (
+
+ {/* Answer section */}
+
+
+ {/* Action Plan section — only when there are file changes */}
+ {plan && hasFileActions && (
+
+
+ Action Plan
+ {planStatus === "executed" && (
+
+ ✓ Executed
+
+ )}
+ {planStatus === "rejected" && (
+
+ ✕ Rejected
+
+ )}
+
+
+
+ )}
+
+ {/* Execution Log section (shown after execution) */}
+ {executionLog && (
+
+
+
+
+ {executionLog.steps.map((s) => (
+
+ Step {s.step_number}
+ {s.summary}
+
+ ))}
+
+
+
+ )}
+
+ );
+}
\ No newline at end of file
diff --git a/frontend/components/BranchPicker.jsx b/frontend/components/BranchPicker.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..e04e04d77dbda1a8532b26c9c2d3fc92fb7d2616
--- /dev/null
+++ b/frontend/components/BranchPicker.jsx
@@ -0,0 +1,398 @@
+import React, { useCallback, useEffect, useRef, useState } from "react";
+import { createPortal } from "react-dom";
+
+/**
+ * BranchPicker — Claude-Code-on-Web parity branch selector.
+ *
+ * Fetches branches from the new /api/repos/{owner}/{repo}/branches endpoint.
+ * Shows search, default branch badge, AI session branch highlighting.
+ *
+ * Fixes applied:
+ * - Dropdown portaled to document.body (avoids overflow:hidden clipping)
+ * - Branches cached per repo (no "No branches found" flash)
+ * - Shows "Loading..." only on first fetch, keeps stale data otherwise
+ */
+
+// Simple per-repo branch cache so reopening the dropdown is instant
+const branchCache = {};
+
+/**
+ * Props:
+ * repo, currentBranch, defaultBranch, sessionBranches, onBranchChange
+ * — standard branch-picker props
+ *
+ * externalAnchorRef (optional) — a React ref pointing to an external DOM
+ * element to anchor the dropdown to. When provided:
+ * - BranchPicker skips rendering its own trigger button
+ * - the dropdown opens immediately on mount
+ * - closing the dropdown calls onClose()
+ *
+ * onClose (optional) — called when the dropdown is dismissed (outside
+ * click or Escape). Only meaningful with externalAnchorRef.
+ */
+export default function BranchPicker({
+ repo,
+ currentBranch,
+ defaultBranch,
+ sessionBranches = [],
+ onBranchChange,
+ externalAnchorRef,
+ onClose,
+}) {
+ const isExternalMode = !!externalAnchorRef;
+ const [open, setOpen] = useState(isExternalMode);
+ const [query, setQuery] = useState("");
+ const [branches, setBranches] = useState([]);
+ const [loading, setLoading] = useState(false);
+ const [error, setError] = useState(null);
+ const triggerRef = useRef(null);
+ const dropdownRef = useRef(null);
+ const inputRef = useRef(null);
+
+ const branch = currentBranch || defaultBranch || "main";
+ const isAiSession = sessionBranches.includes(branch) && branch !== defaultBranch;
+
+ // The element used for dropdown positioning
+ const anchorRef = isExternalMode ? externalAnchorRef : triggerRef;
+
+ const cacheKey = repo ? `${repo.owner}/${repo.name}` : null;
+
+ // Seed from cache on mount / repo change
+ useEffect(() => {
+ if (cacheKey && branchCache[cacheKey]) {
+ setBranches(branchCache[cacheKey]);
+ }
+ }, [cacheKey]);
+
+ // Fetch branches from GitHub via backend
+ const fetchBranches = useCallback(async (searchQuery) => {
+ if (!repo) return;
+ setLoading(true);
+ setError(null);
+ try {
+ const token = localStorage.getItem("github_token");
+ const headers = token ? { Authorization: `Bearer ${token}` } : {};
+ const params = new URLSearchParams({ per_page: "100" });
+ if (searchQuery) params.set("query", searchQuery);
+
+ const res = await fetch(
+ `/api/repos/${repo.owner}/${repo.name}/branches?${params}`,
+ { headers, cache: "no-cache" }
+ );
+ if (!res.ok) {
+ const errData = await res.json().catch(() => ({}));
+ const detail = errData.detail || `HTTP ${res.status}`;
+ console.warn("BranchPicker: fetch failed:", detail);
+ setError(detail);
+ return;
+ }
+ const data = await res.json();
+ const fetched = data.branches || [];
+ setBranches(fetched);
+
+ // Only cache the unfiltered result
+ if (!searchQuery && cacheKey) {
+ branchCache[cacheKey] = fetched;
+ }
+ } catch (err) {
+ console.warn("Failed to fetch branches:", err);
+ } finally {
+ setLoading(false);
+ }
+ }, [repo, cacheKey]);
+
+ // Fetch + focus when opened
+ useEffect(() => {
+ if (open) {
+ fetchBranches(query);
+ setTimeout(() => inputRef.current?.focus(), 50);
+ }
+ }, [open]); // eslint-disable-line react-hooks/exhaustive-deps
+
+ // Debounced search
+ useEffect(() => {
+ if (!open) return;
+ const t = setTimeout(() => fetchBranches(query), 300);
+ return () => clearTimeout(t);
+ }, [query, open, fetchBranches]);
+
+ // Close on outside click
+ useEffect(() => {
+ if (!open) return;
+ const handler = (e) => {
+ const inAnchor = anchorRef.current && anchorRef.current.contains(e.target);
+ const inDropdown = dropdownRef.current && dropdownRef.current.contains(e.target);
+ if (!inAnchor && !inDropdown) {
+ handleClose();
+ }
+ };
+ document.addEventListener("mousedown", handler);
+ return () => document.removeEventListener("mousedown", handler);
+ }, [open]); // eslint-disable-line react-hooks/exhaustive-deps
+
+ const handleClose = useCallback(() => {
+ setOpen(false);
+ setQuery("");
+ onClose?.();
+ }, [onClose]);
+
+ const handleSelect = (branchName) => {
+ handleClose();
+ if (branchName !== branch) {
+ onBranchChange?.(branchName);
+ }
+ };
+
+ // Merge API branches with session branches (AI branches might not show in GitHub API)
+ const allBranches = [...branches];
+ for (const sb of sessionBranches) {
+ if (!allBranches.find((b) => b.name === sb)) {
+ allBranches.push({ name: sb, is_default: false, protected: false });
+ }
+ }
+
+ // Calculate portal position from anchor element
+ const getDropdownPosition = () => {
+ if (!anchorRef.current) return { top: 0, left: 0 };
+ const rect = anchorRef.current.getBoundingClientRect();
+ return {
+ top: rect.bottom + 4,
+ left: rect.left,
+ };
+ };
+
+ const pos = open ? getDropdownPosition() : { top: 0, left: 0 };
+
+ return (
+
+ {/* Trigger button — hidden when using external anchor */}
+ {!isExternalMode && (
+
setOpen((v) => !v)}
+ >
+
+
+
+
+
+
+ {branch}
+
+
+
+
+ )}
+
+ {/* Dropdown — portaled to document.body to escape overflow:hidden */}
+ {open && createPortal(
+
+ {/* Search input */}
+
+ setQuery(e.target.value)}
+ style={styles.searchInput}
+ onKeyDown={(e) => {
+ if (e.key === "Escape") {
+ handleClose();
+ }
+ }}
+ />
+
+
+ {/* Branch list */}
+
+ {loading && allBranches.length === 0 && (
+
Loading...
+ )}
+
+ {!loading && error && (
+
{error}
+ )}
+
+ {!loading && !error && allBranches.length === 0 && (
+
No branches found
+ )}
+
+ {allBranches.map((b) => {
+ const isDefault = b.is_default || b.name === defaultBranch;
+ const isAi = sessionBranches.includes(b.name);
+ const isCurrent = b.name === branch;
+
+ return (
+
handleSelect(b.name)}
+ >
+
+ ✓
+
+
+ {b.name}
+
+ {isDefault && (
+
default
+ )}
+ {isAi && !isDefault && (
+
AI
+ )}
+ {b.protected && (
+
+
+
+
+
+ )}
+
+ );
+ })}
+
+ {/* Subtle loading indicator when refreshing with cached data visible */}
+ {loading && allBranches.length > 0 && (
+
Updating...
+ )}
+
+
,
+ document.body
+ )}
+
+ );
+}
+
+const styles = {
+ container: {
+ position: "relative",
+ },
+ trigger: {
+ display: "flex",
+ alignItems: "center",
+ gap: 6,
+ padding: "4px 8px",
+ borderRadius: 4,
+ border: "1px solid #3F3F46",
+ background: "transparent",
+ fontSize: 13,
+ cursor: "pointer",
+ fontFamily: "monospace",
+ maxWidth: 200,
+ },
+ branchName: {
+ whiteSpace: "nowrap",
+ overflow: "hidden",
+ textOverflow: "ellipsis",
+ maxWidth: 140,
+ },
+ dropdown: {
+ position: "fixed",
+ width: 280,
+ backgroundColor: "#1F1F23",
+ border: "1px solid #27272A",
+ borderRadius: 8,
+ boxShadow: "0 8px 24px rgba(0,0,0,0.6)",
+ zIndex: 9999,
+ overflow: "hidden",
+ },
+ searchBox: {
+ padding: "8px 10px",
+ borderBottom: "1px solid #27272A",
+ },
+ searchInput: {
+ width: "100%",
+ padding: "6px 8px",
+ borderRadius: 4,
+ border: "1px solid #3F3F46",
+ background: "#131316",
+ color: "#E4E4E7",
+ fontSize: 12,
+ outline: "none",
+ fontFamily: "monospace",
+ boxSizing: "border-box",
+ },
+ branchList: {
+ maxHeight: 260,
+ overflowY: "auto",
+ },
+ branchRow: {
+ display: "flex",
+ alignItems: "center",
+ gap: 6,
+ padding: "7px 10px",
+ cursor: "pointer",
+ transition: "background-color 0.1s",
+ borderBottom: "1px solid rgba(39, 39, 42, 0.5)",
+ },
+ loadingRow: {
+ padding: "12px 10px",
+ textAlign: "center",
+ fontSize: 12,
+ color: "#71717A",
+ },
+ errorRow: {
+ padding: "12px 10px",
+ textAlign: "center",
+ fontSize: 11,
+ color: "#F59E0B",
+ },
+ defaultBadge: {
+ fontSize: 9,
+ padding: "1px 5px",
+ borderRadius: 8,
+ backgroundColor: "rgba(16, 185, 129, 0.15)",
+ color: "#10B981",
+ fontWeight: 600,
+ textTransform: "uppercase",
+ letterSpacing: "0.04em",
+ flexShrink: 0,
+ },
+ aiBadge: {
+ fontSize: 9,
+ padding: "1px 5px",
+ borderRadius: 8,
+ backgroundColor: "rgba(59, 130, 246, 0.15)",
+ color: "#60a5fa",
+ fontWeight: 700,
+ flexShrink: 0,
+ },
+ protectedBadge: {
+ color: "#F59E0B",
+ flexShrink: 0,
+ display: "flex",
+ alignItems: "center",
+ },
+};
diff --git a/frontend/components/ChatPanel.jsx b/frontend/components/ChatPanel.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..c66d274bf900e2b2a39c5a39edd8875522fb8f91
--- /dev/null
+++ b/frontend/components/ChatPanel.jsx
@@ -0,0 +1,940 @@
+// frontend/components/ChatPanel.jsx
+import React, { useEffect, useRef, useState } from "react";
+import AssistantMessage from "./AssistantMessage.jsx";
+import ThinkingIndicator from "./ThinkingIndicator.jsx";
+import ContextMeter from "./ContextMeter.jsx";
+import DiffStats from "./DiffStats.jsx";
+import DiffViewer from "./DiffViewer.jsx";
+import CreatePRButton from "./CreatePRButton.jsx";
+import StreamingMessage from "./StreamingMessage.jsx";
+import { SessionWebSocket } from "../utils/ws.js";
+
+// Helper to get headers (inline safety if utility is missing)
+const getHeaders = () => ({
+ "Content-Type": "application/json",
+ Authorization: `Bearer ${localStorage.getItem("github_token") || ""}`,
+});
+
+export default function ChatPanel({
+ repo,
+ defaultBranch = "main",
+ currentBranch, // do NOT default here; parent must pass the real one
+ onExecutionComplete,
+ sessionChatState,
+ onSessionChatStateChange,
+ sessionId,
+ onEnsureSession,
+ canChat = true, // readiness gate: false disables composer and shows blocker
+ chatBlocker = null, // { message: string, cta?: string, onCta?: () => void }
+}) {
+ // Initialize state from props or defaults
+ const [messages, setMessages] = useState(sessionChatState?.messages || []);
+ const [goal, setGoal] = useState("");
+ const [plan, setPlan] = useState(sessionChatState?.plan || null);
+
+ const [loadingPlan, setLoadingPlan] = useState(false);
+ const [executing, setExecuting] = useState(false);
+ const [status, setStatus] = useState("");
+
+ // Claude-Code-on-Web: WebSocket streaming + diff + PR
+ const [wsConnected, setWsConnected] = useState(false);
+ const [streamingEvents, setStreamingEvents] = useState([]);
+ const [diffData, setDiffData] = useState(null);
+ const [showDiffViewer, setShowDiffViewer] = useState(false);
+ const wsRef = useRef(null);
+
+ // Ref mirrors streamingEvents so WS callbacks avoid stale closures
+ const streamingEventsRef = useRef([]);
+ useEffect(() => { streamingEventsRef.current = streamingEvents; }, [streamingEvents]);
+
+ // Skip the session-sync useEffect reset when we just created a session
+ // (the parent already seeded the messages into chatBySession)
+ const skipNextSyncRef = useRef(false);
+
+ const messagesEndRef = useRef(null);
+ const prevMsgCountRef = useRef((sessionChatState?.messages || []).length);
+
+ // ---------------------------------------------------------------------------
+ // WebSocket connection management
+ // ---------------------------------------------------------------------------
+ useEffect(() => {
+ // Clean up previous connection
+ if (wsRef.current) {
+ wsRef.current.close();
+ wsRef.current = null;
+ setWsConnected(false);
+ }
+
+ if (!sessionId) return;
+
+ // Wait for backend to be reachable before opening WebSocket.
+ // Without this, the WS connects immediately on session creation
+ // and fails repeatedly with "closed before established" when the
+ // backend is still starting up (common on WSL cold start).
+ let cancelled = false;
+ const backendUrl = import.meta.env.VITE_BACKEND_URL || '';
+ const pingUrl = backendUrl ? `${backendUrl}/api/ping` : '/api/ping';
+ const waitForBackend = async () => {
+ for (let i = 0; i < 10 && !cancelled; i++) {
+ try {
+ const res = await fetch(pingUrl, { method: 'GET', signal: AbortSignal.timeout(2000) });
+ if (res.ok) return true;
+ } catch { /* retry */ }
+ await new Promise(r => setTimeout(r, 1500));
+ }
+ return false;
+ };
+
+ waitForBackend().then((ok) => {
+ if (cancelled || !ok) return;
+ connectWs();
+ });
+
+ function connectWs() {
+ const ws = new SessionWebSocket(sessionId, {
+ onConnect: () => setWsConnected(true),
+ onDisconnect: () => setWsConnected(false),
+ onMessage: (data) => {
+ if (data.type === "agent_message") {
+ setStreamingEvents((prev) => [...prev, data]);
+ } else if (data.type === "tool_use" || data.type === "tool_result") {
+ setStreamingEvents((prev) => [...prev, data]);
+ } else if (data.type === "diff_update") {
+ setDiffData(data.stats || data);
+ } else if (data.type === "session_restored") {
+ // Session loaded
+ }
+ },
+ onStatusChange: (newStatus) => {
+ if (newStatus === "waiting") {
+ // Always clear loading state when agent finishes
+ setLoadingPlan(false);
+
+ // Consolidate streaming events into a chat message (use ref to
+ // avoid stale closure — streamingEvents state would be stale here).
+ //
+ // We also commit the FINAL consolidated text to the backend session
+ // here. Previously this branch never called persistMessage, so the
+ // assistant turn looked correct in the live view but vanished on the
+ // next session reload — the canonical "streaming truncation" symptom.
+ const events = streamingEventsRef.current;
+ if (events.length > 0) {
+ const textParts = events
+ .filter((e) => e.type === "agent_message")
+ .map((e) => e.content);
+ if (textParts.length > 0) {
+ const consolidated = {
+ from: "ai",
+ role: "assistant",
+ answer: textParts.join(""),
+ content: textParts.join(""),
+ };
+ setMessages((prev) => [...prev, consolidated]);
+ persistMessage(sessionId, "assistant", consolidated.content);
+ }
+ setStreamingEvents([]);
+ }
+ }
+ },
+ onError: (err) => {
+ console.warn("[ws] Error:", err);
+ setLoadingPlan(false);
+ },
+ });
+
+ ws.connect();
+ wsRef.current = ws;
+ } // end connectWs
+
+ return () => {
+ cancelled = true;
+ if (wsRef.current) wsRef.current.close();
+ };
+ }, [sessionId]); // eslint-disable-line react-hooks/exhaustive-deps
+
+ // ---------------------------------------------------------------------------
+ // 1) SESSION SYNC: Restore chat when branch, repo, OR session changes
+ // IMPORTANT: Do NOT depend on sessionChatState here (prevents prop/state loop)
+ // ---------------------------------------------------------------------------
+ useEffect(() => {
+ // When send() just created a session, the parent seeded the messages
+ // into chatBySession already. Skip the reset so we don't wipe
+ // the optimistic user message that was already rendered.
+ if (skipNextSyncRef.current) {
+ skipNextSyncRef.current = false;
+ return;
+ }
+
+ const nextMessages = sessionChatState?.messages || [];
+ const nextPlan = sessionChatState?.plan || null;
+
+ setMessages(nextMessages);
+ setPlan(nextPlan);
+
+ // Reset transient UI state on branch/repo/session switch
+ setGoal("");
+ setStatus("");
+ setLoadingPlan(false);
+ setExecuting(false);
+ setStreamingEvents([]);
+ setDiffData(null);
+
+ // Update msg count tracker so auto-scroll doesn't "jump" on switch
+ prevMsgCountRef.current = nextMessages.length;
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [currentBranch, repo?.full_name, sessionId]);
+
+ // ---------------------------------------------------------------------------
+ // 2) PERSISTENCE: Save chat to Parent (no loop now because sync only on branch)
+ // ---------------------------------------------------------------------------
+ useEffect(() => {
+ if (typeof onSessionChatStateChange === "function") {
+ // Avoid wiping parent state on mount
+ if (messages.length > 0 || plan) {
+ onSessionChatStateChange({ messages, plan });
+ }
+ }
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [messages, plan]);
+
+ // ---------------------------------------------------------------------------
+ // 3) AUTO-SCROLL: Only scroll when a message is appended (reduces flicker)
+ // ---------------------------------------------------------------------------
+ useEffect(() => {
+ const curCount = messages.length + streamingEvents.length;
+ const prevCount = prevMsgCountRef.current;
+
+ // Only scroll when new messages are added
+ if (curCount > prevCount) {
+ prevMsgCountRef.current = curCount;
+ requestAnimationFrame(() => {
+ messagesEndRef.current?.scrollIntoView({ behavior: "smooth" });
+ });
+ } else {
+ prevMsgCountRef.current = curCount;
+ }
+ }, [messages.length, streamingEvents.length]);
+
+ // ---------------------------------------------------------------------------
+ // HANDLERS
+ // ---------------------------------------------------------------------------
+ // ---------------------------------------------------------------------------
+ // Persist a message to the backend session (fire-and-forget).
+ //
+ // The fourth argument carries the *structured* payload of the assistant
+ // response — the Action Plan, the Execution Log, diff stats, etc. The
+ // backend stores it on Message.metadata; on session reload App.jsx
+ // spreads metadata back into the local message via normalizeBackendMessage,
+ // so the same AssistantMessage renderer can re-draw the Plan / Steps /
+ // Create buttons identically to the live view.
+ //
+ // Before this fix the structured payload was dropped at persist time —
+ // the session reloaded as raw text, and the UI degraded to a plain
+ // paragraph. This is the canonical "state loss during hydration" bug.
+ // ---------------------------------------------------------------------------
+ const persistMessage = (sid, role, content, metadata = null) => {
+ if (!sid) return;
+ const body = { role, content };
+ if (metadata && typeof metadata === "object" && Object.keys(metadata).length > 0) {
+ body.metadata = metadata;
+ }
+ fetch(`/api/sessions/${sid}/message`, {
+ method: "POST",
+ headers: getHeaders(),
+ body: JSON.stringify(body),
+ }).catch(() => {}); // best-effort
+ };
+
+ // Pick the structured fields a message can carry across a reload.
+ // Keep this in one place so every call-site stores the same shape and
+ // the renderer never has to guess.
+ const pickAssistantMetadata = (m) => {
+ if (!m || typeof m !== "object") return null;
+ const meta = {};
+ if (m.plan) meta.plan = m.plan;
+ if (m.executionLog) meta.executionLog = m.executionLog;
+ if (m.diff) meta.diff = m.diff;
+ if (m.actions) meta.actions = m.actions;
+ return Object.keys(meta).length > 0 ? meta : null;
+ };
+
+ const send = async () => {
+ if (!repo || !goal.trim()) return;
+
+ const text = goal.trim();
+
+ // Clear input immediately (Claude Code behavior)
+ setGoal("");
+ // Reset textarea height
+ const ta = document.querySelector(".chat-input");
+ if (ta) ta.style.height = "40px";
+
+ // Optimistic update (user bubble appears immediately)
+ const userMsg = { from: "user", role: "user", text, content: text };
+ setMessages((prev) => [...prev, userMsg]);
+
+ setLoadingPlan(true);
+ setStatus("");
+ setPlan(null);
+ setStreamingEvents([]);
+
+ // ------- Implicit session creation (Claude Code parity) -------
+ // Every chat must be backed by a session. If none exists yet,
+ // create one on-demand before sending the plan request.
+ let sid = sessionId;
+ if (!sid && typeof onEnsureSession === "function") {
+ // Derive a short title from the first message
+ const sessionName = text.length > 60 ? text.slice(0, 57) + "..." : text;
+
+ // Tell the sync useEffect to skip the reset that would otherwise
+ // wipe the optimistic user message when activeSessionId changes.
+ skipNextSyncRef.current = true;
+
+ sid = await onEnsureSession(sessionName, [userMsg]);
+ if (!sid) {
+ // Session creation failed — continue without session
+ skipNextSyncRef.current = false;
+ }
+ }
+
+ // Persist user message to backend session
+ persistMessage(sid, "user", text);
+
+ // Always use HTTP for plan generation (the original reliable flow).
+ // WebSocket is only used for real-time streaming feedback display.
+ const effectiveBranch = currentBranch || defaultBranch || "HEAD";
+
+ try {
+ // Timeout after 5 minutes (CrewAI agent can be slow with small models)
+ const planController = new AbortController();
+ const planTimer = setTimeout(() => planController.abort(), 300000);
+
+ let res;
+ try {
+ res = await fetch("/api/chat/plan", {
+ method: "POST",
+ headers: getHeaders(),
+ body: JSON.stringify({
+ repo_owner: repo.owner,
+ repo_name: repo.name,
+ goal: text,
+ branch_name: effectiveBranch,
+ }),
+ signal: planController.signal,
+ });
+ } catch (fetchErr) {
+ if (fetchErr.name === "AbortError") {
+ throw new Error("Request timed out after 5 minutes. The LLM may be too slow. Try a faster model.");
+ }
+ throw fetchErr;
+ } finally {
+ clearTimeout(planTimer);
+ }
+
+ let data;
+ try {
+ data = await res.json();
+ } catch {
+ throw new Error(`Server error (${res.status}). The LLM may have returned an invalid response. Try a different model or enable Lite Mode in Settings.`);
+ }
+ if (!res.ok) {
+ const detail = data?.detail || data?.error || data?.message || "";
+ // Friendly message for common LLM failures
+ if (detail.includes("None or empty") || detail.includes("Invalid response from LLM")) {
+ throw new Error(
+ "The LLM returned an empty response. This often happens with small models (deepseek, qwen 0.5b). " +
+ "Try a larger model (llama3, qwen2.5:7b) or enable Lite Mode in Settings."
+ );
+ }
+ throw new Error(detail || "Failed to generate plan");
+ }
+
+ // Guard: a plan with no executable file actions is not a plan we
+ // can approve. This happens when the planner/explorer agents
+ // refused (tool-loop hallucination or a real safety refusal) and
+ // CrewAI returned a schema-valid but empty payload. Without
+ // this guard the Approve & execute / Reject plan buttons would
+ // render against a payload that can't actually be executed.
+ const planSteps = Array.isArray(data?.steps)
+ ? data.steps
+ : Array.isArray(data?.plan?.steps)
+ ? data.plan.steps
+ : [];
+ const hasExecutableFiles = planSteps.some(
+ (s) =>
+ Array.isArray(s?.files) &&
+ s.files.some((f) => ["CREATE", "MODIFY", "DELETE"].includes(f?.action)),
+ );
+
+ // Extract summary from nested plan structure or top-level
+ const summary =
+ data.plan?.summary || data.summary || data.message ||
+ "Here is the proposed plan for your request.";
+
+ if (hasExecutableFiles) {
+ setPlan(data);
+ const assistantMsg = {
+ from: "ai",
+ role: "assistant",
+ answer: summary,
+ content: summary,
+ plan: data,
+ };
+ setMessages((prev) => [...prev, assistantMsg]);
+ persistMessage(sid, "assistant", summary, pickAssistantMetadata(assistantMsg));
+ } else {
+ // No executable steps — surface a clear failure to the user
+ // instead of half-rendering a plan card and dangling buttons.
+ // The most common cause is the explorer/planner agent loop
+ // (CrewAI same-input limiter blocks repeat tool calls, the
+ // agent panics and "refuses"). Encourage a retry rather than
+ // letting the user click Approve on nothing.
+ setPlan(null);
+ const failureText =
+ "I couldn't produce a plan for that request. The agent may have " +
+ "got stuck reading the same file twice. Try rephrasing, or " +
+ "switch to a stronger model in Settings → Provider.";
+ const failureMsg = {
+ from: "ai",
+ role: "system",
+ content: failureText,
+ };
+ setMessages((prev) => [...prev, failureMsg]);
+ persistMessage(sid, "system", failureText);
+ setStatus("No executable plan produced.");
+ return;
+ }
+ } catch (err) {
+ const msg = String(err?.message || err);
+ console.error(err);
+ setStatus(msg);
+ setMessages((prev) => [
+ ...prev,
+ { from: "ai", role: "system", content: `Error: ${msg}` },
+ ]);
+ } finally {
+ setLoadingPlan(false);
+ }
+ };
+
+ // ---------------------------------------------------------------------------
+ // Reject the active plan — minimal first cut.
+ //
+ // Industry rule we follow from the start: never write to disk on a path the
+ // user did not approve. Rejecting is the cheapest expression of that —
+ // discard the proposed plan locally, leave the workspace untouched, record
+ // the rejection in chat history so the user sees it after a session reload.
+ //
+ // No backend endpoint is needed yet because plans are not persisted as
+ // first-class objects today; they ride along on the assistant message's
+ // metadata. When we later add per-plan state tracking, this handler will
+ // also POST /api/chat/plan/{id}/reject — leaving that for a follow-up.
+ // ---------------------------------------------------------------------------
+ const rejectPlan = () => {
+ if (!plan || executing) return;
+ setPlan(null);
+ setStatus("Plan rejected. No files were changed.");
+
+ const rejectionMsg = {
+ from: "ai",
+ role: "system",
+ content: "Plan rejected. No files were changed.",
+ };
+ setMessages((prev) => [...prev, rejectionMsg]);
+
+ if (sessionId) {
+ persistMessage(sessionId, "system", rejectionMsg.content);
+ }
+ };
+
+ const execute = async () => {
+ if (!repo || !plan) return;
+
+ setExecuting(true);
+ setStatus("");
+
+ try {
+ // Guard: currentBranch might be missing if parent didn't pass it yet
+ const safeCurrent = currentBranch || defaultBranch || "HEAD";
+ const safeDefault = defaultBranch || "main";
+
+ // Sticky vs Hard Switch:
+ // - If on default branch -> undefined (backend creates new branch)
+ // - If already on AI branch -> currentBranch (backend updates existing)
+ const branch_name = safeCurrent === safeDefault ? undefined : safeCurrent;
+
+ const res = await fetch("/api/chat/execute", {
+ method: "POST",
+ headers: getHeaders(),
+ body: JSON.stringify({
+ repo_owner: repo.owner,
+ repo_name: repo.name,
+ plan,
+ branch_name,
+ }),
+ });
+
+ const data = await res.json();
+ if (!res.ok) throw new Error(data.detail || "Execution failed");
+
+ setStatus(data.message || "Execution completed.");
+
+ const completionMsg = {
+ from: "ai",
+ role: "assistant",
+ answer: data.message || "Execution completed.",
+ content: data.message || "Execution completed.",
+ executionLog: data.executionLog,
+ diff: data.diff,
+ };
+
+ // Show completion immediately (keeps old "Execution Log" section)
+ setMessages((prev) => [...prev, completionMsg]);
+
+ // Persist the execution log + diff alongside the message text so
+ // the History view re-renders the green "Execution Log" panel and
+ // the "View diff" affordance. Without this, reloading the session
+ // shows just the one-line "Execution completed." summary.
+ persistMessage(
+ sessionId,
+ "assistant",
+ completionMsg.content,
+ pickAssistantMetadata(completionMsg),
+ );
+
+ // Clear active plan UI
+ setPlan(null);
+
+ // Pass completionMsg upward for seeding branch history
+ if (typeof onExecutionComplete === "function") {
+ onExecutionComplete({
+ branch: data.branch || data.branch_name,
+ mode: data.mode,
+ commit_url: data.commit_url || data.html_url,
+ message: data.message,
+ completionMsg,
+ sourceBranch: safeCurrent,
+ });
+ }
+ } catch (err) {
+ console.error(err);
+ setStatus(String(err?.message || err));
+ } finally {
+ setExecuting(false);
+ }
+ };
+
+ // ---------------------------------------------------------------------------
+ // RENDER
+ // ---------------------------------------------------------------------------
+ const isOnSessionBranch = currentBranch && currentBranch !== defaultBranch;
+
+ return (
+
+
+
+
+ {messages.map((m, idx) => {
+ // Success message (App.jsx injected)
+ if (m.isSuccess) {
+ return (
+
+ );
+ }
+
+ // User message
+ if (m.from === "user" || m.role === "user") {
+ return (
+
+ {m.text || m.content}
+
+ );
+ }
+
+ // Assistant message (Answer / Plan / Execution Log).
+ //
+ // Lifecycle audit signal: if this message carries a plan, look
+ // ahead in the timeline for any subsequent message that
+ // records an execution log (=> the plan was approved+executed)
+ // or a system "Plan rejected" entry (=> the plan was
+ // rejected). The status is rendered as a small green/grey
+ // badge next to the Action Plan header so users can tell at a
+ // glance — in history — whether a previous plan was acted on.
+ let planStatus = null;
+ if (m.plan) {
+ const after = messages.slice(idx + 1);
+ if (after.some((later) => later.executionLog)) {
+ planStatus = "executed";
+ } else if (
+ after.some(
+ (later) =>
+ later.role === "system" &&
+ typeof later.content === "string" &&
+ later.content.includes("Plan rejected"),
+ )
+ ) {
+ planStatus = "rejected";
+ }
+ }
+
+ return (
+
+
+ {/* Diff stats indicator (Claude-Code-on-Web parity) */}
+ {m.diff && (
+
{
+ setDiffData(m.diff);
+ setShowDiffViewer(true);
+ }} />
+ )}
+
+ );
+ })}
+
+ {/* Streaming events (real-time agent output) */}
+ {streamingEvents.length > 0 && (
+
+
+
+ )}
+
+ {/* Enterprise Pulse — agentic thinking state shown after the user
+ hits Send and before the first streamed/planned chunk arrives.
+ Falls back gracefully to nothing once streamingEvents start
+ flowing in (StreamingMessage takes over the live feedback). */}
+ {loadingPlan && streamingEvents.length === 0 && (
+
+ )}
+
+ {/* Live execution status — visible in the chat timeline while
+ ``executing`` is true, sits between the Action Plan card and
+ where the Execution Log (green panel in AssistantMessage)
+ will land once the backend returns. Removes the "did the
+ app freeze?" feeling caused by only the bottom button
+ saying "Executing…".
+
+ Reuses the ThinkingIndicator with execution-specific labels.
+ When the executor finishes, ``setExecuting(false)`` removes
+ this bubble and the completionMsg lands in the timeline as
+ a normal assistant message with its green Execution Log
+ block — already rendered by AssistantMessage today. */}
+ {executing && (
+
+ )}
+
+ {!messages.length && !plan && !loadingPlan && streamingEvents.length === 0 && (
+
+
💬
+
Tell GitPilot what you want to do with this repository.
+
+ It will propose a safe step-by-step plan before any execution.
+
+
+ )}
+
+
+
+
+ {/* Diff stats bar (when agent has made changes) */}
+ {diffData && (
+
+ setShowDiffViewer(true)} />
+
+ )}
+
+
+ {/* Readiness blocker banner */}
+ {!canChat && chatBlocker && (
+
+ {chatBlocker.message || "Chat is not ready yet."}
+ {chatBlocker.cta && chatBlocker.onCta && (
+
+ {chatBlocker.cta}
+
+ )}
+
+ )}
+ {status && (
+
+ {status}
+
+ )}
+
+
+
+
+ {/* WebSocket connection indicator + context-window meter */}
+
+
+ {sessionId && (
+
+
+ {wsConnected ? "Live" : "Connecting..."}
+
+ )}
+
+
+
+
+
+ {/* Diff Viewer overlay */}
+ {showDiffViewer && (
+
setShowDiffViewer(false)}
+ />
+ )}
+
+ );
+}
diff --git a/frontend/components/ContextBar.jsx b/frontend/components/ContextBar.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..be13192a04dbea56e257044078db9e1398a44db8
--- /dev/null
+++ b/frontend/components/ContextBar.jsx
@@ -0,0 +1,156 @@
+import React, { useCallback, useRef, useState } from "react";
+import BranchPicker from "./BranchPicker.jsx";
+
+/**
+ * ContextBar — horizontal repo chip bar for multi-repo workspace context.
+ *
+ * Uses CSS classes for hover-reveal X (Claude-style: subtle by default,
+ * visible on chip hover, red on X hover). Each chip owns its own remove
+ * button — removing one repo never affects the others.
+ */
+export default function ContextBar({
+ contextRepos,
+ activeRepoKey,
+ repoStateByKey,
+ onActivate,
+ onRemove,
+ onAdd,
+ onBranchChange,
+ mode, // workspace mode: "github", "local-git", "folder" (optional)
+}) {
+ if (!contextRepos || contextRepos.length === 0) return null;
+
+ return (
+
+ {/* Workspace mode indicator */}
+ {mode && (
+
+ {mode === "github" ? "GH" : mode === "local-git" ? "Git" : "Dir"}
+
+ )}
+
+ {contextRepos.map((entry) => {
+ const isActive = entry.repoKey === activeRepoKey;
+ return (
+ onActivate(entry.repoKey)}
+ onRemove={() => onRemove(entry.repoKey)}
+ onBranchChange={(newBranch) =>
+ onBranchChange(entry.repoKey, newBranch)
+ }
+ />
+ );
+ })}
+
+
+
+
+
+
+
+
+
+
+ {contextRepos.length} {contextRepos.length === 1 ? "repo" : "repos"}
+
+
+ );
+}
+
+function RepoChip({ entry, isActive, repoState, onActivate, onRemove, onBranchChange }) {
+ const [branchOpen, setBranchOpen] = useState(false);
+ const [hovered, setHovered] = useState(false);
+ const branchBtnRef = useRef(null);
+ const repo = entry.repo;
+ const branch = repoState?.currentBranch || entry.branch || repo?.default_branch || "main";
+ const defaultBranch = repoState?.defaultBranch || repo?.default_branch || "main";
+ const sessionBranches = repoState?.sessionBranches || [];
+ const displayName = repo?.name || entry.repoKey?.split("/")[1] || entry.repoKey;
+
+ const handleChipClick = useCallback(
+ (e) => {
+ if (e.target.closest("[data-chip-action]")) return;
+ onActivate();
+ },
+ [onActivate]
+ );
+
+ return (
+ setHovered(true)}
+ onMouseLeave={() => setHovered(false)}
+ title={isActive ? `Active (write): ${entry.repoKey}` : `Click to activate ${entry.repoKey}`}
+ >
+ {/* Active indicator bar */}
+ {isActive &&
}
+
+ {/* Repo name */}
+
{displayName}
+
+ {/* Separator dot */}
+
+
+ {/* Branch name — single click opens GitHub branch list */}
+
{
+ e.stopPropagation();
+ setBranchOpen((v) => !v);
+ }}
+ >
+ {branch}
+
+
+ {/* Write badge for active repo */}
+ {isActive &&
write }
+
+ {/* Remove button: hidden by default, revealed on hover */}
+
{
+ e.stopPropagation();
+ onRemove();
+ }}
+ title={`Remove ${displayName} from context`}
+ >
+
+
+
+
+
+
+ {/* BranchPicker in external-anchor mode: dropdown opens immediately,
+ positioned from the branch button, fetches all branches from GitHub */}
+ {branchOpen && (
+
{
+ onBranchChange(newBranch);
+ setBranchOpen(false);
+ }}
+ onClose={() => setBranchOpen(false)}
+ />
+ )}
+
+ );
+}
diff --git a/frontend/components/ContextMeter.jsx b/frontend/components/ContextMeter.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..acd60eacba7cd48472354f9a60da54c2d7080fde
--- /dev/null
+++ b/frontend/components/ContextMeter.jsx
@@ -0,0 +1,410 @@
+// frontend/components/ContextMeter.jsx
+//
+// Small bottom-right control that shows the active LLM's context-window
+// utilisation. Collapsed: a single ⓘ icon (no number — keeps the UI
+// quiet during normal use). Expanded: a compact popover with the
+// breakdown, topology line, and a manual refresh button.
+//
+// Refresh model: lazy — fetched only when the popover opens, plus the
+// explicit ↻ button. Zero idle traffic.
+//
+// Token-count estimate flag: when the backend reports is_estimate=true
+// (Ollama / OllaBridge — no real tokenizer available) every number is
+// prefixed with ≈ so the imprecision is visible.
+//
+// Colours: GitPilot orange #D95C3D for ≥60% (warning), red #B91C1C for
+// ≥85% (saturated). No new dependencies; inline styles + a scoped
+//
+
+ = 60 && percent < 85 ? "1" : "0"}
+ data-sat={data && percent >= 85 ? "1" : "0"}
+ onClick={() => setOpen((v) => !v)}
+ title="Context window usage"
+ >
+ {"ⓘ"}
+
+
+ {open && (
+
+
Context window
+
+ {loading && !data && (
+
Loading…
+ )}
+ {error && error !== "disabled" && (
+
+ Couldn't load: {error}
+
+ )}
+
+ {data && (
+ <>
+
+ Provider
+ {data.provider}
+ Model
+ {data.model || "—"}
+ Topology
+ {data.topology}
+
+
+
+
+
+ {prefix}
+ {fmt(data.used)} / {fmt(data.context_window)}{" "}
+ ({percent.toFixed(1)}%)
+
+
+
+
|
+
|
+
|
+
|
+
|
+
+
+
+
|
+
+ {percent >= 85 && (
+
= 95 ? "1" : "0"}>
+ Context near saturation. Consider:
+
+ Resetting the conversation
+ Switching to a larger-context model
+ Reducing repository scope
+
+
+ )}
+
+
+ {estimate ? "Token counts are estimated" : "Token counts via tiktoken"}
+
+ {loading ? "…" : "↻ refresh"}
+
+
+ >
+ )}
+
+ )}
+
+ );
+}
diff --git a/frontend/components/CreatePRButton.jsx b/frontend/components/CreatePRButton.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..559eb9c277ceed34cb8a455eea1347189f52cf56
--- /dev/null
+++ b/frontend/components/CreatePRButton.jsx
@@ -0,0 +1,159 @@
+import React, { useState } from "react";
+
+/**
+ * CreatePRButton — Claude-Code-on-Web parity PR creation action.
+ *
+ * When clicked, pushes session changes to a new branch and opens a PR.
+ * Shows loading state and links to the created PR on GitHub.
+ */
+export default function CreatePRButton({
+ repo,
+ sessionId,
+ branch,
+ defaultBranch,
+ disabled,
+ onPRCreated,
+}) {
+ const [creating, setCreating] = useState(false);
+ const [prUrl, setPrUrl] = useState(null);
+ const [error, setError] = useState(null);
+
+ const handleCreate = async () => {
+ if (!repo || !branch || branch === defaultBranch) return;
+
+ setCreating(true);
+ setError(null);
+
+ try {
+ const token = localStorage.getItem("github_token");
+ const headers = {
+ "Content-Type": "application/json",
+ ...(token ? { Authorization: `Bearer ${token}` } : {}),
+ };
+
+ const owner = repo.full_name?.split("/")[0] || repo.owner;
+ const name = repo.full_name?.split("/")[1] || repo.name;
+
+ const res = await fetch(`/api/repos/${owner}/${name}/pulls`, {
+ method: "POST",
+ headers,
+ body: JSON.stringify({
+ title: `[GitPilot] Changes from session ${sessionId ? sessionId.slice(0, 8) : branch}`,
+ head: branch,
+ base: defaultBranch || "main",
+ body: [
+ "## Summary",
+ "",
+ `Changes created by GitPilot AI assistant on branch \`${branch}\`.`,
+ "",
+ sessionId ? `Session ID: \`${sessionId}\`` : "",
+ "",
+ "---",
+ "*This PR was generated by [GitPilot](https://github.com/ruslanmv/gitpilot).*",
+ ]
+ .filter(Boolean)
+ .join("\n"),
+ }),
+ });
+
+ const data = await res.json();
+ if (!res.ok) throw new Error(data.detail || "Failed to create PR");
+
+ const url = data.html_url || data.url;
+ setPrUrl(url);
+ onPRCreated?.({ pr_url: url, pr_number: data.number, branch });
+ } catch (err) {
+ setError(err.message);
+ } finally {
+ setCreating(false);
+ }
+ };
+
+ if (prUrl) {
+ return (
+
+
+
+
+
+
+
+ View PR on GitHub →
+
+ );
+ }
+
+ return (
+
+
+
+
+
+
+
+
+ {creating ? "Creating PR..." : "Create PR"}
+
+ {error && (
+
{error}
+ )}
+
+ );
+}
+
+const styles = {
+ btn: {
+ display: "flex",
+ alignItems: "center",
+ gap: 6,
+ height: 38,
+ padding: "0 14px",
+ borderRadius: 8,
+ border: "1px solid rgba(16, 185, 129, 0.3)",
+ background: "rgba(16, 185, 129, 0.08)",
+ color: "#10B981",
+ fontSize: 13,
+ fontWeight: 600,
+ cursor: "pointer",
+ whiteSpace: "nowrap",
+ transition: "background-color 0.15s",
+ },
+ prLink: {
+ display: "flex",
+ alignItems: "center",
+ gap: 6,
+ height: 38,
+ padding: "0 14px",
+ borderRadius: 8,
+ background: "rgba(16, 185, 129, 0.10)",
+ color: "#10B981",
+ fontSize: 13,
+ fontWeight: 600,
+ textDecoration: "none",
+ whiteSpace: "nowrap",
+ },
+ error: {
+ fontSize: 11,
+ color: "#EF4444",
+ marginTop: 4,
+ },
+};
diff --git a/frontend/components/DiffStats.jsx b/frontend/components/DiffStats.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..2460e6ef496ccc5e95b2159c698afda556a99734
--- /dev/null
+++ b/frontend/components/DiffStats.jsx
@@ -0,0 +1,59 @@
+import React from "react";
+
+/**
+ * DiffStats — Claude-Code-on-Web parity inline diff indicator.
+ *
+ * Clickable "+N -N in M files" badge that appears in agent messages.
+ * Clicking opens the DiffViewer overlay.
+ */
+export default function DiffStats({ diff, onClick }) {
+ if (!diff || (!diff.additions && !diff.deletions && !diff.files_changed)) {
+ return null;
+ }
+
+ return (
+
+
+
+
+
+ +{diff.additions || 0}
+ -{diff.deletions || 0}
+
+ in {diff.files_changed || (diff.files || []).length} file{(diff.files_changed || (diff.files || []).length) !== 1 ? "s" : ""}
+
+
+
+
+
+ );
+}
+
+const styles = {
+ container: {
+ display: "inline-flex",
+ alignItems: "center",
+ gap: 6,
+ padding: "5px 10px",
+ borderRadius: 6,
+ border: "1px solid #27272A",
+ backgroundColor: "rgba(24, 24, 27, 0.8)",
+ cursor: "pointer",
+ fontSize: 12,
+ fontFamily: "monospace",
+ color: "#A1A1AA",
+ transition: "border-color 0.15s, background-color 0.15s",
+ marginTop: 8,
+ },
+ additions: {
+ color: "#10B981",
+ fontWeight: 600,
+ },
+ deletions: {
+ color: "#EF4444",
+ fontWeight: 600,
+ },
+ files: {
+ color: "#71717A",
+ },
+};
diff --git a/frontend/components/DiffViewer.jsx b/frontend/components/DiffViewer.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..b1a05fbc9d3f4c52b40ab4cd075d42db99522663
--- /dev/null
+++ b/frontend/components/DiffViewer.jsx
@@ -0,0 +1,263 @@
+import React, { useState } from "react";
+
+/**
+ * DiffViewer — Claude-Code-on-Web parity diff overlay.
+ *
+ * Shows a file list on the left and unified diff on the right.
+ * Green = additions, red = deletions. Additive component.
+ */
+export default function DiffViewer({ diff, onClose }) {
+ const [selectedFile, setSelectedFile] = useState(0);
+
+ if (!diff || !diff.files || diff.files.length === 0) {
+ return (
+
+
+
+ Diff Viewer
+
+ ×
+
+
+
No changes to display.
+
+
+ );
+ }
+
+ const files = diff.files || [];
+ const currentFile = files[selectedFile] || files[0];
+
+ return (
+
+
+ {/* Header */}
+
+
+ Diff Viewer
+
+ +{diff.additions || 0}
+ {" "}
+ -{diff.deletions || 0}
+ {" in "}
+ {diff.files_changed || files.length} files
+
+
+
+ ×
+
+
+
+ {/* Body */}
+
+ {/* File list */}
+
+ {files.map((f, idx) => (
+
setSelectedFile(idx)}
+ >
+ {f.path}
+
+ +{f.additions || 0}
+ {" "}
+ -{f.deletions || 0}
+
+
+ ))}
+
+
+ {/* Diff content */}
+
+
{currentFile.path}
+
+ {(currentFile.hunks || []).map((hunk, hi) => (
+
+
{hunk.header || `@@ hunk ${hi + 1} @@`}
+ {(hunk.lines || []).map((line, li) => {
+ let bg = "transparent";
+ let color = "#D4D4D8";
+ if (line.startsWith("+")) {
+ bg = "rgba(16, 185, 129, 0.10)";
+ color = "#6EE7B7";
+ } else if (line.startsWith("-")) {
+ bg = "rgba(239, 68, 68, 0.10)";
+ color = "#FCA5A5";
+ }
+ return (
+
+ {line}
+
+ );
+ })}
+
+ ))}
+
+ {(!currentFile.hunks || currentFile.hunks.length === 0) && (
+
+ Diff content will appear here when the agent modifies files.
+
+ )}
+
+
+
+
+
+ );
+}
+
+const styles = {
+ overlay: {
+ position: "fixed",
+ top: 0,
+ left: 0,
+ right: 0,
+ bottom: 0,
+ backgroundColor: "rgba(0, 0, 0, 0.7)",
+ zIndex: 200,
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ },
+ panel: {
+ width: "90vw",
+ maxWidth: 1100,
+ height: "80vh",
+ backgroundColor: "#131316",
+ border: "1px solid #27272A",
+ borderRadius: 12,
+ display: "flex",
+ flexDirection: "column",
+ overflow: "hidden",
+ },
+ header: {
+ display: "flex",
+ justifyContent: "space-between",
+ alignItems: "center",
+ padding: "12px 16px",
+ borderBottom: "1px solid #27272A",
+ backgroundColor: "#18181B",
+ },
+ headerLeft: {
+ display: "flex",
+ alignItems: "center",
+ gap: 12,
+ },
+ headerTitle: {
+ fontSize: 14,
+ fontWeight: 600,
+ color: "#E4E4E7",
+ },
+ statBadge: {
+ fontSize: 12,
+ color: "#A1A1AA",
+ },
+ closeBtn: {
+ width: 28,
+ height: 28,
+ borderRadius: 6,
+ border: "1px solid #3F3F46",
+ background: "transparent",
+ color: "#A1A1AA",
+ fontSize: 18,
+ cursor: "pointer",
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ },
+ body: {
+ flex: 1,
+ display: "flex",
+ overflow: "hidden",
+ },
+ fileList: {
+ width: 240,
+ borderRight: "1px solid #27272A",
+ overflowY: "auto",
+ flexShrink: 0,
+ },
+ fileItem: {
+ padding: "8px 10px",
+ cursor: "pointer",
+ borderBottom: "1px solid rgba(39, 39, 42, 0.5)",
+ transition: "background-color 0.1s",
+ },
+ fileName: {
+ display: "block",
+ fontSize: 12,
+ fontFamily: "monospace",
+ color: "#E4E4E7",
+ whiteSpace: "nowrap",
+ overflow: "hidden",
+ textOverflow: "ellipsis",
+ },
+ fileStats: {
+ display: "block",
+ fontSize: 10,
+ marginTop: 2,
+ },
+ diffContent: {
+ flex: 1,
+ overflow: "auto",
+ display: "flex",
+ flexDirection: "column",
+ },
+ diffPath: {
+ padding: "8px 12px",
+ fontSize: 12,
+ fontFamily: "monospace",
+ color: "#A1A1AA",
+ borderBottom: "1px solid #27272A",
+ backgroundColor: "#18181B",
+ position: "sticky",
+ top: 0,
+ zIndex: 1,
+ },
+ diffCode: {
+ padding: "4px 0",
+ fontFamily: "monospace",
+ fontSize: 12,
+ lineHeight: 1.6,
+ },
+ hunkHeader: {
+ padding: "4px 12px",
+ color: "#6B7280",
+ backgroundColor: "rgba(59, 130, 246, 0.05)",
+ fontSize: 11,
+ fontStyle: "italic",
+ },
+ diffLine: {
+ padding: "0 12px",
+ whiteSpace: "pre",
+ },
+ diffPlaceholder: {
+ padding: 20,
+ textAlign: "center",
+ color: "#52525B",
+ fontSize: 13,
+ },
+ emptyState: {
+ flex: 1,
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ color: "#52525B",
+ fontSize: 14,
+ },
+};
diff --git a/frontend/components/EnvironmentEditor.jsx b/frontend/components/EnvironmentEditor.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..eb0740eebff0280f3155f478ac7a2f437f251681
--- /dev/null
+++ b/frontend/components/EnvironmentEditor.jsx
@@ -0,0 +1,278 @@
+import React, { useState } from "react";
+import { createPortal } from "react-dom";
+
+/**
+ * EnvironmentEditor — Claude-Code-on-Web parity environment config modal.
+ *
+ * Allows setting name, network access level, and environment variables.
+ */
+export default function EnvironmentEditor({ environment, onSave, onDelete, onClose }) {
+ const [name, setName] = useState(environment?.name || "");
+ const [networkAccess, setNetworkAccess] = useState(environment?.network_access || "limited");
+ const [envVarsText, setEnvVarsText] = useState(
+ environment?.env_vars
+ ? Object.entries(environment.env_vars)
+ .map(([k, v]) => `${k}=${v}`)
+ .join("\n")
+ : ""
+ );
+
+ const handleSave = () => {
+ const envVars = {};
+ envVarsText
+ .split("\n")
+ .map((line) => line.trim())
+ .filter((line) => line && line.includes("="))
+ .forEach((line) => {
+ const idx = line.indexOf("=");
+ const key = line.slice(0, idx).trim();
+ const val = line.slice(idx + 1).trim();
+ if (key) envVars[key] = val;
+ });
+
+ onSave({
+ id: environment?.id || null,
+ name: name.trim() || "Default",
+ network_access: networkAccess,
+ env_vars: envVars,
+ });
+ };
+
+ return createPortal(
+ { if (e.target === e.currentTarget) onClose(); }}>
+
e.stopPropagation()}>
+
+
+ {environment?.id ? "Edit Environment" : "New Environment"}
+
+
+ ×
+
+
+
+
+ {/* Name */}
+
Environment Name
+
setName(e.target.value)}
+ placeholder="e.g. Development, Staging, Production"
+ style={styles.input}
+ />
+
+ {/* Network Access */}
+
Network Access
+
+ {[
+ { value: "limited", label: "Limited", desc: "Allowlisted domains only (package managers, APIs)" },
+ { value: "full", label: "Full", desc: "Unrestricted internet access" },
+ { value: "none", label: "None", desc: "Air-gapped — no external network" },
+ ].map((opt) => (
+
+ setNetworkAccess(e.target.value)}
+ style={{ display: "none" }}
+ />
+
+
+ {opt.label}
+
+
+ {opt.desc}
+
+
+
+ ))}
+
+
+ {/* Environment Variables */}
+
Environment Variables
+
+
+
+ {onDelete && (
+
+ Delete
+
+ )}
+
+
+ Cancel
+
+
+ Save
+
+
+
+
,
+ document.body
+ );
+}
+
+const styles = {
+ overlay: {
+ position: "fixed",
+ top: 0,
+ left: 0,
+ right: 0,
+ bottom: 0,
+ backgroundColor: "rgba(0, 0, 0, 0.6)",
+ zIndex: 10000,
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ },
+ modal: {
+ width: 480,
+ maxHeight: "80vh",
+ backgroundColor: "#131316",
+ border: "1px solid #27272A",
+ borderRadius: 12,
+ display: "flex",
+ flexDirection: "column",
+ overflow: "hidden",
+ },
+ header: {
+ display: "flex",
+ justifyContent: "space-between",
+ alignItems: "center",
+ padding: "14px 16px",
+ borderBottom: "1px solid #27272A",
+ backgroundColor: "#18181B",
+ },
+ headerTitle: {
+ fontSize: 14,
+ fontWeight: 600,
+ color: "#E4E4E7",
+ },
+ closeBtn: {
+ width: 26,
+ height: 26,
+ borderRadius: 6,
+ border: "1px solid #3F3F46",
+ background: "transparent",
+ color: "#A1A1AA",
+ fontSize: 16,
+ cursor: "pointer",
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ },
+ body: {
+ padding: "16px",
+ overflowY: "auto",
+ flex: 1,
+ },
+ label: {
+ display: "block",
+ fontSize: 12,
+ fontWeight: 600,
+ color: "#A1A1AA",
+ marginBottom: 6,
+ marginTop: 14,
+ },
+ input: {
+ width: "100%",
+ padding: "8px 10px",
+ borderRadius: 6,
+ border: "1px solid #3F3F46",
+ background: "#18181B",
+ color: "#E4E4E7",
+ fontSize: 13,
+ outline: "none",
+ boxSizing: "border-box",
+ },
+ radioGroup: {
+ display: "flex",
+ flexDirection: "column",
+ gap: 6,
+ },
+ radioItem: {
+ display: "flex",
+ alignItems: "flex-start",
+ gap: 10,
+ padding: "8px 10px",
+ borderRadius: 6,
+ border: "1px solid #27272A",
+ cursor: "pointer",
+ transition: "border-color 0.15s, background-color 0.15s",
+ },
+ textarea: {
+ width: "100%",
+ padding: "8px 10px",
+ borderRadius: 6,
+ border: "1px solid #3F3F46",
+ background: "#18181B",
+ color: "#E4E4E7",
+ fontSize: 12,
+ fontFamily: "monospace",
+ outline: "none",
+ resize: "vertical",
+ boxSizing: "border-box",
+ },
+ footer: {
+ display: "flex",
+ alignItems: "center",
+ gap: 8,
+ padding: "12px 16px",
+ borderTop: "1px solid #27272A",
+ },
+ cancelBtn: {
+ padding: "6px 14px",
+ borderRadius: 6,
+ border: "1px solid #3F3F46",
+ background: "transparent",
+ color: "#A1A1AA",
+ fontSize: 12,
+ cursor: "pointer",
+ },
+ saveBtn: {
+ padding: "6px 14px",
+ borderRadius: 6,
+ border: "none",
+ background: "#3B82F6",
+ color: "#fff",
+ fontSize: 12,
+ fontWeight: 600,
+ cursor: "pointer",
+ },
+ deleteBtn: {
+ padding: "6px 14px",
+ borderRadius: 6,
+ border: "1px solid rgba(239, 68, 68, 0.3)",
+ background: "transparent",
+ color: "#EF4444",
+ fontSize: 12,
+ cursor: "pointer",
+ },
+};
diff --git a/frontend/components/EnvironmentSelector.jsx b/frontend/components/EnvironmentSelector.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..c54e475a0c5715cc34ee1523aab9a2dc53bbe889
--- /dev/null
+++ b/frontend/components/EnvironmentSelector.jsx
@@ -0,0 +1,199 @@
+import React, { useEffect, useState } from "react";
+import EnvironmentEditor from "./EnvironmentEditor.jsx";
+
+/**
+ * EnvironmentSelector — Claude-Code-on-Web parity environment dropdown.
+ *
+ * Shows current environment name + gear icon. Gear opens the editor modal.
+ * Fetches environments from /api/environments.
+ */
+export default function EnvironmentSelector({ activeEnvId, onEnvChange }) {
+ const [envs, setEnvs] = useState([]);
+ const [editorOpen, setEditorOpen] = useState(false);
+ const [editingEnv, setEditingEnv] = useState(null);
+
+ const fetchEnvs = async () => {
+ try {
+ const res = await fetch("/api/environments", { cache: "no-cache" });
+ if (!res.ok) return;
+ const data = await res.json();
+ setEnvs(data.environments || []);
+ } catch (err) {
+ console.warn("Failed to fetch environments:", err);
+ }
+ };
+
+ useEffect(() => {
+ fetchEnvs();
+ }, []);
+
+ const activeEnv =
+ envs.find((e) => e.id === activeEnvId) || envs[0] || { name: "Default", id: "default" };
+
+ const handleSave = async (config) => {
+ try {
+ const method = config.id ? "PUT" : "POST";
+ const url = config.id ? `/api/environments/${config.id}` : "/api/environments";
+ await fetch(url, {
+ method,
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify(config),
+ });
+ await fetchEnvs();
+ setEditorOpen(false);
+ setEditingEnv(null);
+ } catch (err) {
+ console.warn("Failed to save environment:", err);
+ }
+ };
+
+ const handleDelete = async (envId) => {
+ try {
+ await fetch(`/api/environments/${envId}`, { method: "DELETE" });
+ await fetchEnvs();
+ if (activeEnvId === envId) {
+ onEnvChange?.(null);
+ }
+ } catch (err) {
+ console.warn("Failed to delete environment:", err);
+ }
+ };
+
+ return (
+
+
ENVIRONMENT
+
+
+ {/* Env selector */}
+ onEnvChange?.(e.target.value)}
+ style={styles.select}
+ >
+ {envs.map((env) => (
+
+ {env.name}
+
+ ))}
+
+
+ {/* Network badge */}
+
+ {activeEnv.network_access || "limited"}
+
+
+
+ {/* Gear icon */}
+
{
+ setEditingEnv(activeEnv);
+ setEditorOpen(true);
+ }}
+ title="Configure environment"
+ >
+
+
+
+
+
+
+ {/* Add new */}
+
{
+ setEditingEnv(null);
+ setEditorOpen(true);
+ }}
+ title="Add environment"
+ >
+ +
+
+
+
+ {/* Editor modal */}
+ {editorOpen && (
+
handleDelete(editingEnv.id) : null}
+ onClose={() => {
+ setEditorOpen(false);
+ setEditingEnv(null);
+ }}
+ />
+ )}
+
+ );
+}
+
+const styles = {
+ container: {
+ padding: "10px 14px",
+ },
+ label: {
+ fontSize: 10,
+ fontWeight: 700,
+ letterSpacing: "0.08em",
+ color: "#71717A",
+ textTransform: "uppercase",
+ marginBottom: 6,
+ },
+ row: {
+ display: "flex",
+ alignItems: "center",
+ gap: 6,
+ },
+ envCard: {
+ flex: 1,
+ display: "flex",
+ alignItems: "center",
+ gap: 8,
+ padding: "4px 8px",
+ borderRadius: 6,
+ border: "1px solid #27272A",
+ backgroundColor: "#18181B",
+ minWidth: 0,
+ },
+ select: {
+ flex: 1,
+ background: "transparent",
+ border: "none",
+ color: "#E4E4E7",
+ fontSize: 12,
+ fontWeight: 500,
+ outline: "none",
+ cursor: "pointer",
+ minWidth: 0,
+ },
+ networkBadge: {
+ fontSize: 9,
+ fontWeight: 600,
+ textTransform: "uppercase",
+ letterSpacing: "0.04em",
+ flexShrink: 0,
+ },
+ gearBtn: {
+ width: 28,
+ height: 28,
+ borderRadius: 6,
+ border: "1px solid #27272A",
+ background: "transparent",
+ color: "#71717A",
+ cursor: "pointer",
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ fontSize: 14,
+ flexShrink: 0,
+ },
+};
diff --git a/frontend/components/FileTree.jsx b/frontend/components/FileTree.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..74352c434ba2a7a26ec6c61c63b32be21504c04a
--- /dev/null
+++ b/frontend/components/FileTree.jsx
@@ -0,0 +1,307 @@
+import React, { useState, useEffect } from "react";
+
+/**
+ * Simple recursive file tree viewer with refresh support
+ * Fetches tree data directly using the API.
+ */
+export default function FileTree({ repo, refreshTrigger, branch }) {
+ const [tree, setTree] = useState([]);
+ const [loading, setLoading] = useState(false);
+ const [isSwitchingBranch, setIsSwitchingBranch] = useState(false);
+ const [error, setError] = useState(null);
+ const [localRefresh, setLocalRefresh] = useState(0);
+
+ useEffect(() => {
+ if (!repo) return;
+
+ // Determine if this is a branch switch (we already have data)
+ const hasExistingData = tree.length > 0;
+ if (hasExistingData) {
+ setIsSwitchingBranch(true);
+ } else {
+ setLoading(true);
+ }
+ setError(null);
+
+ // Construct headers manually
+ let headers = {};
+ try {
+ const token = localStorage.getItem("github_token");
+ if (token) {
+ headers = { Authorization: `Bearer ${token}` };
+ }
+ } catch (e) {
+ console.warn("Unable to read github_token", e);
+ }
+
+ // Add cache busting + selected branch ref
+ const refParam = branch ? `&ref=${encodeURIComponent(branch)}` : "";
+ const cacheBuster = `?_t=${Date.now()}${refParam}`;
+
+ let cancelled = false;
+
+ fetch(`/api/repos/${repo.owner}/${repo.name}/tree${cacheBuster}`, { headers })
+ .then(async (res) => {
+ if (!res.ok) {
+ const errData = await res.json().catch(() => ({}));
+ throw new Error(errData.detail || "Failed to load files");
+ }
+ return res.json();
+ })
+ .then((data) => {
+ if (cancelled) return;
+ if (data.files && Array.isArray(data.files)) {
+ setTree(buildTree(data.files));
+ setError(null);
+ } else {
+ setError("No files found in repository");
+ }
+ })
+ .catch((err) => {
+ if (cancelled) return;
+ setError(err.message);
+ console.error("FileTree error:", err);
+ })
+ .finally(() => {
+ if (cancelled) return;
+ setIsSwitchingBranch(false);
+ setLoading(false);
+ });
+
+ return () => { cancelled = true; };
+ }, [repo, branch, refreshTrigger, localRefresh]); // eslint-disable-line react-hooks/exhaustive-deps
+
+ const handleRefresh = () => {
+ setLocalRefresh(prev => prev + 1);
+ };
+
+ // Theme matching parent component
+ const theme = {
+ border: "#27272A",
+ textPrimary: "#EDEDED",
+ textSecondary: "#A1A1AA",
+ accent: "#D95C3D",
+ warningText: "#F59E0B",
+ warningBg: "rgba(245, 158, 11, 0.1)",
+ warningBorder: "rgba(245, 158, 11, 0.2)",
+ };
+
+ const styles = {
+ header: {
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "space-between",
+ padding: "8px 20px 8px 10px",
+ marginBottom: "8px",
+ borderBottom: `1px solid ${theme.border}`,
+ },
+ headerTitle: {
+ fontSize: "12px",
+ fontWeight: "600",
+ color: theme.textSecondary,
+ textTransform: "uppercase",
+ letterSpacing: "0.5px",
+ },
+ refreshButton: {
+ backgroundColor: "transparent",
+ border: `1px solid ${theme.border}`,
+ color: theme.textSecondary,
+ padding: "4px 8px",
+ borderRadius: "4px",
+ fontSize: "11px",
+ cursor: loading ? "not-allowed" : "pointer",
+ display: "flex",
+ alignItems: "center",
+ gap: "4px",
+ transition: "all 0.2s",
+ opacity: loading ? 0.5 : 1,
+ },
+ switchingBar: {
+ padding: "6px 20px",
+ fontSize: "11px",
+ color: theme.textSecondary,
+ backgroundColor: "rgba(59, 130, 246, 0.06)",
+ borderBottom: `1px solid ${theme.border}`,
+ },
+ loadingText: {
+ padding: "0 20px",
+ color: theme.textSecondary,
+ fontSize: "13px",
+ },
+ errorBox: {
+ padding: "12px 20px",
+ color: theme.warningText,
+ fontSize: "12px",
+ backgroundColor: theme.warningBg,
+ border: `1px solid ${theme.warningBorder}`,
+ borderRadius: "6px",
+ margin: "0 10px",
+ },
+ emptyText: {
+ padding: "0 20px",
+ color: theme.textSecondary,
+ fontSize: "13px",
+ },
+ treeContainer: {
+ fontSize: "13px",
+ color: theme.textSecondary,
+ padding: "0 10px 20px 10px",
+ },
+ };
+
+ return (
+
+ {/* Header with Refresh Button */}
+
+
Files
+
{
+ if (!loading) {
+ e.currentTarget.style.backgroundColor = "rgba(255, 255, 255, 0.05)";
+ }
+ }}
+ onMouseOut={(e) => {
+ e.currentTarget.style.backgroundColor = "transparent";
+ }}
+ >
+
+
+
+ {loading ? "..." : "Refresh"}
+
+
+
+ {/* Branch switch indicator (shown above existing tree, doesn't clear it) */}
+ {isSwitchingBranch && (
+
Loading branch...
+ )}
+
+ {/* Content */}
+ {loading && tree.length === 0 && (
+
Loading files...
+ )}
+
+ {!loading && !isSwitchingBranch && error && (
+
{error}
+ )}
+
+ {!loading && !isSwitchingBranch && !error && tree.length === 0 && (
+
No files found
+ )}
+
+ {tree.length > 0 && (
+
+ {tree.map((node) => (
+
+ ))}
+
+ )}
+
+ );
+}
+
+// Recursive Node Component
+function TreeNode({ node, level }) {
+ const [expanded, setExpanded] = useState(false);
+ const isFolder = node.children && node.children.length > 0;
+
+ const icon = isFolder ? (expanded ? "📂" : "📁") : "📄";
+
+ return (
+
+
isFolder && setExpanded(!expanded)}
+ style={{
+ padding: "4px 0",
+ paddingLeft: `${level * 12}px`,
+ cursor: isFolder ? "pointer" : "default",
+ display: "flex",
+ alignItems: "center",
+ gap: "6px",
+ color: isFolder ? "#EDEDED" : "#A1A1AA",
+ whiteSpace: "nowrap"
+ }}
+ >
+ {icon}
+ {node.name}
+
+
+ {isFolder && expanded && (
+
+ {node.children.map(child => (
+
+ ))}
+
+ )}
+
+ );
+}
+
+// Helper to build tree structure from flat file list
+function buildTree(files) {
+ const root = [];
+
+ files.forEach(file => {
+ const parts = file.path.split('/');
+ let currentLevel = root;
+ let currentPath = "";
+
+ parts.forEach((part, idx) => {
+ currentPath = currentPath ? `${currentPath}/${part}` : part;
+
+ // Check if node exists at this level
+ let existingNode = currentLevel.find(n => n.name === part);
+
+ if (!existingNode) {
+ const newNode = {
+ name: part,
+ path: currentPath,
+ type: idx === parts.length - 1 ? file.type : 'tree',
+ children: []
+ };
+ currentLevel.push(newNode);
+ existingNode = newNode;
+ }
+
+ if (idx < parts.length - 1) {
+ currentLevel = existingNode.children;
+ }
+ });
+ });
+
+ // Sort folders first, then files
+ const sortNodes = (nodes) => {
+ nodes.sort((a, b) => {
+ const aIsFolder = a.children.length > 0;
+ const bIsFolder = b.children.length > 0;
+ if (aIsFolder && !bIsFolder) return -1;
+ if (!aIsFolder && bIsFolder) return 1;
+ return a.name.localeCompare(b.name);
+ });
+ nodes.forEach(n => {
+ if (n.children.length > 0) sortNodes(n.children);
+ });
+ };
+
+ sortNodes(root);
+ return root;
+}
\ No newline at end of file
diff --git a/frontend/components/FlowViewer.jsx b/frontend/components/FlowViewer.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..71ef1e0b71603c85dd38ed37b2e930ebf6405bde
--- /dev/null
+++ b/frontend/components/FlowViewer.jsx
@@ -0,0 +1,659 @@
+import React, { useEffect, useState, useCallback, useRef } from "react";
+import ReactFlow, { Background, Controls, MiniMap } from "reactflow";
+import "reactflow/dist/style.css";
+
+/* ------------------------------------------------------------------ */
+/* Node type → colour mapping */
+/* ------------------------------------------------------------------ */
+const NODE_COLOURS = {
+ agent: { border: "#ff7a3c", bg: "#20141a" },
+ router: { border: "#6c8cff", bg: "#141828" },
+ tool: { border: "#3a3b4d", bg: "#141821" },
+ tool_group: { border: "#3a3b4d", bg: "#141821" },
+ user: { border: "#4caf88", bg: "#14211a" },
+ output: { border: "#9c6cff", bg: "#1a1428" },
+};
+const DEFAULT_COLOUR = { border: "#3a3b4d", bg: "#141821" };
+
+function colourFor(type) {
+ return NODE_COLOURS[type] || DEFAULT_COLOUR;
+}
+
+const STYLE_COLOURS = {
+ single_task: "#6c8cff",
+ react_loop: "#ff7a3c",
+ crew_pipeline: "#4caf88",
+};
+
+const STYLE_LABELS = {
+ single_task: "Dispatch",
+ react_loop: "ReAct Loop",
+ crew_pipeline: "Pipeline",
+};
+
+/* ------------------------------------------------------------------ */
+/* TopologyCard — single clickable topology card */
+/* ------------------------------------------------------------------ */
+function TopologyCard({ topology, isActive, onClick }) {
+ const styleColor = STYLE_COLOURS[topology.execution_style] || "#9a9bb0";
+ const agentCount = topology.agents_used?.length || 0;
+
+ return (
+
+
+ {topology.icon}
+
+ {STYLE_LABELS[topology.execution_style] || topology.execution_style}
+
+
+
+ {topology.name}
+
+ {topology.description}
+
+ {agentCount} agent{agentCount !== 1 ? "s" : ""}
+
+
+ );
+}
+
+const cardStyles = {
+ card: {
+ display: "flex",
+ flexDirection: "column",
+ gap: 4,
+ padding: "10px 12px",
+ borderRadius: 8,
+ border: "1px solid #1e1f30",
+ cursor: "pointer",
+ textAlign: "left",
+ minWidth: 170,
+ maxWidth: 200,
+ flexShrink: 0,
+ transition: "border-color 0.2s, background-color 0.2s",
+ },
+ cardTop: {
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "space-between",
+ gap: 6,
+ },
+ icon: {
+ fontSize: 18,
+ },
+ styleBadge: {
+ fontSize: 9,
+ fontWeight: 700,
+ textTransform: "uppercase",
+ letterSpacing: "0.05em",
+ padding: "1px 6px",
+ borderRadius: 4,
+ border: "1px solid",
+ },
+ name: {
+ fontSize: 12,
+ fontWeight: 600,
+ lineHeight: 1.3,
+ },
+ desc: {
+ fontSize: 10,
+ color: "#71717A",
+ lineHeight: 1.3,
+ overflow: "hidden",
+ display: "-webkit-box",
+ WebkitLineClamp: 2,
+ WebkitBoxOrient: "vertical",
+ },
+ agentCount: {
+ fontSize: 9,
+ color: "#52525B",
+ fontWeight: 600,
+ marginTop: 2,
+ },
+};
+
+/* ------------------------------------------------------------------ */
+/* TopologyPanel — card grid grouped by category */
+/* ------------------------------------------------------------------ */
+function TopologyPanel({
+ topologies,
+ activeTopology,
+ autoMode,
+ autoResult,
+ onSelect,
+ onToggleAuto,
+}) {
+ const systems = topologies.filter((t) => t.category === "system");
+ const pipelines = topologies.filter((t) => t.category === "pipeline");
+
+ return (
+
+ {/* Auto-detect toggle */}
+
+
+
+
+
+
+ Auto
+
+ {autoMode && autoResult && (
+
+ Detected: {autoResult.icon} {autoResult.name}
+ {autoResult.confidence != null && (
+
+ {" "}({Math.round(autoResult.confidence * 100)}%)
+
+ )}
+
+ )}
+
+
+ {/* System architectures */}
+
+
System Architectures
+
+ {systems.map((t) => (
+ onSelect(t.id)}
+ />
+ ))}
+
+
+
+ {/* Task pipelines */}
+
+
Task Pipelines
+
+ {pipelines.map((t) => (
+ onSelect(t.id)}
+ />
+ ))}
+
+
+
+ );
+}
+
+const panelStyles = {
+ root: {
+ padding: "8px 16px 12px",
+ borderBottom: "1px solid #1e1f30",
+ backgroundColor: "#08090e",
+ },
+ autoRow: {
+ display: "flex",
+ alignItems: "center",
+ gap: 10,
+ marginBottom: 10,
+ },
+ autoBtn: {
+ display: "flex",
+ alignItems: "center",
+ gap: 5,
+ padding: "4px 10px",
+ borderRadius: 6,
+ border: "1px solid #27272A",
+ background: "transparent",
+ fontSize: 11,
+ fontWeight: 600,
+ cursor: "pointer",
+ transition: "border-color 0.15s, color 0.15s",
+ },
+ autoHint: {
+ fontSize: 11,
+ color: "#9a9bb0",
+ },
+ section: {
+ marginBottom: 8,
+ },
+ sectionLabel: {
+ fontSize: 9,
+ fontWeight: 700,
+ textTransform: "uppercase",
+ letterSpacing: "0.08em",
+ color: "#52525B",
+ marginBottom: 6,
+ },
+ cardRow: {
+ display: "flex",
+ gap: 8,
+ overflowX: "auto",
+ scrollbarWidth: "none",
+ paddingBottom: 2,
+ },
+};
+
+/* ------------------------------------------------------------------ */
+/* Main FlowViewer component */
+/* ------------------------------------------------------------------ */
+export default function FlowViewer() {
+ const [nodes, setNodes] = useState([]);
+ const [edges, setEdges] = useState([]);
+ const [loading, setLoading] = useState(false);
+ const [error, setError] = useState("");
+
+ // Topology state
+ const [topologies, setTopologies] = useState([]);
+ const [activeTopology, setActiveTopology] = useState(null);
+ const [topologyMeta, setTopologyMeta] = useState(null);
+
+ // Auto-detection state
+ const [autoMode, setAutoMode] = useState(false);
+ const [autoResult, setAutoResult] = useState(null);
+ const [autoTestMessage, setAutoTestMessage] = useState("");
+
+ const initialLoadDone = useRef(false);
+
+ /* ---------- Load topology list on mount ---------- */
+ useEffect(() => {
+ (async () => {
+ try {
+ const [topoRes, prefRes] = await Promise.all([
+ fetch("/api/flow/topologies"),
+ fetch("/api/settings/topology"),
+ ]);
+ if (topoRes.ok) {
+ const data = await topoRes.json();
+ setTopologies(data);
+ }
+ if (prefRes.ok) {
+ const { topology } = await prefRes.json();
+ if (topology) {
+ setActiveTopology(topology);
+ }
+ }
+ } catch (e) {
+ console.warn("Failed to load topologies:", e);
+ }
+ initialLoadDone.current = true;
+ })();
+ }, []);
+
+ /* ---------- Load graph when topology changes ---------- */
+ const loadGraph = useCallback(async (topologyId) => {
+ setLoading(true);
+ setError("");
+ try {
+ const url = topologyId
+ ? `/api/flow/current?topology=${encodeURIComponent(topologyId)}`
+ : "/api/flow/current";
+ const res = await fetch(url);
+ const data = await res.json();
+ if (!res.ok) throw new Error(data.error || "Failed to load flow");
+
+ // Track topology metadata from response
+ if (data.topology_id) {
+ setTopologyMeta({
+ id: data.topology_id,
+ name: data.topology_name,
+ icon: data.topology_icon,
+ description: data.topology_description,
+ execution_style: data.execution_style,
+ agents_used: topologies.find((t) => t.id === data.topology_id)?.agents_used || [],
+ });
+ }
+
+ // Build ReactFlow nodes
+ const RFnodes = data.nodes.map((n, i) => {
+ const nodeType = n.type || "default";
+ const colour = colourFor(nodeType);
+ const d = n.data || {};
+
+ const label = d.label || n.label || n.id;
+ const description = d.description || n.description || "";
+ const model = d.model;
+ const mode = d.mode;
+
+ const pos = n.position || {
+ x: 50 + (i % 3) * 250,
+ y: 50 + Math.floor(i / 3) * 180,
+ };
+
+ return {
+ id: n.id,
+ data: {
+ label: (
+
+
+ {label}
+
+ {model && (
+
+ {model}
+
+ )}
+ {mode && (
+
+ {mode}
+
+ )}
+
+ {description}
+
+
+ ),
+ },
+ position: pos,
+ type: "default",
+ style: {
+ borderRadius: 12,
+ padding: "12px 16px",
+ border: `2px solid ${colour.border}`,
+ background: colour.bg,
+ color: "#f5f5f7",
+ fontSize: 13,
+ minWidth: 180,
+ maxWidth: 220,
+ },
+ };
+ });
+
+ // Build ReactFlow edges
+ const RFedges = data.edges.map((e) => ({
+ id: e.id,
+ source: e.source,
+ target: e.target,
+ label: e.label,
+ animated: e.animated !== false,
+ style: { stroke: "#7a7b8e", strokeWidth: 2 },
+ labelStyle: { fill: "#c3c5dd", fontSize: 11, fontWeight: 500 },
+ labelBgStyle: { fill: "#101117", fillOpacity: 0.9 },
+ ...(e.type === "bidirectional" && {
+ markerEnd: { type: "arrowclosed", color: "#7a7b8e" },
+ markerStart: { type: "arrowclosed", color: "#7a7b8e" },
+ animated: false,
+ style: { stroke: "#555670", strokeWidth: 1.5, strokeDasharray: "5 5" },
+ }),
+ }));
+
+ setNodes(RFnodes);
+ setEdges(RFedges);
+ } catch (e) {
+ console.error(e);
+ setError(e.message);
+ } finally {
+ setLoading(false);
+ }
+ }, [topologies]);
+
+ // Load graph whenever activeTopology changes
+ useEffect(() => {
+ loadGraph(activeTopology);
+ }, [activeTopology, loadGraph]);
+
+ /* ---------- Topology selection handler ---------- */
+ const handleTopologyChange = useCallback(
+ async (newTopologyId) => {
+ setActiveTopology(newTopologyId);
+ setAutoMode(false); // Manual selection disables auto
+ // Persist preference (fire-and-forget)
+ try {
+ await fetch("/api/settings/topology", {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ topology: newTopologyId }),
+ });
+ } catch (e) {
+ console.warn("Failed to save topology preference:", e);
+ }
+ },
+ []
+ );
+
+ /* ---------- Auto-detection ---------- */
+ const handleToggleAuto = useCallback(() => {
+ setAutoMode((prev) => !prev);
+ if (!autoMode) {
+ setAutoResult(null);
+ }
+ }, [autoMode]);
+
+ const handleAutoClassify = useCallback(
+ async (message) => {
+ if (!message.trim()) return;
+ try {
+ const res = await fetch("/api/flow/classify", {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ message }),
+ });
+ if (!res.ok) return;
+ const data = await res.json();
+ const recommendedId = data.recommended_topology;
+ const topo = topologies.find((t) => t.id === recommendedId);
+ setAutoResult({
+ id: recommendedId,
+ name: topo?.name || recommendedId,
+ icon: topo?.icon || "",
+ confidence: data.confidence,
+ alternatives: data.alternatives || [],
+ });
+ setActiveTopology(recommendedId);
+ } catch (e) {
+ console.warn("Auto-classify failed:", e);
+ }
+ },
+ [topologies]
+ );
+
+ // Debounced auto-classify when test message changes
+ useEffect(() => {
+ if (!autoMode || !autoTestMessage.trim()) return;
+ const t = setTimeout(() => handleAutoClassify(autoTestMessage), 500);
+ return () => clearTimeout(t);
+ }, [autoTestMessage, autoMode, handleAutoClassify]);
+
+ /* ---------- Render ---------- */
+ const activeStyleColor = STYLE_COLOURS[topologyMeta?.execution_style] || "#9a9bb0";
+
+ return (
+
+ {/* Header */}
+
+
+
Agent Workflow
+
+ Visual view of the multi-agent system that GitPilot uses to
+ plan and apply changes to your repositories.
+
+
+
+ {topologyMeta && (
+
+ {topologyMeta.icon}
+ {topologyMeta.name}
+
+ {STYLE_LABELS[topologyMeta.execution_style] || topologyMeta.execution_style}
+
+ {topologyMeta.agents_used?.length || 0} agents
+
+ )}
+ {loading &&
Loading... }
+
+
+
+ {/* Topology selector panel */}
+ {topologies.length > 0 && (
+
+ )}
+
+ {/* Auto-detection test input (shown when auto mode is on) */}
+ {autoMode && (
+
+
+ Test auto-detection: type a task description to see which topology is recommended
+
+
setAutoTestMessage(e.target.value)}
+ style={autoInputStyles.input}
+ />
+ {autoResult && autoResult.alternatives?.length > 0 && (
+
+ Alternatives:
+ {autoResult.alternatives.slice(0, 3).map((alt) => {
+ const altTopo = topologies.find((t) => t.id === alt.id);
+ return (
+ handleTopologyChange(alt.id)}
+ >
+ {altTopo?.icon} {altTopo?.name || alt.id}
+
+ {alt.confidence != null ? ` ${Math.round(alt.confidence * 100)}%` : ""}
+
+
+ );
+ })}
+
+ )}
+
+ )}
+
+ {/* Description bar */}
+ {topologyMeta && topologyMeta.description && !autoMode && (
+
+ {topologyMeta.icon} {topologyMeta.description}
+
+ )}
+
+ {/* ReactFlow canvas */}
+
+ {error ? (
+
+ ) : (
+
+
+ {
+ const border = node.style?.border || "";
+ if (border.includes("#ff7a3c")) return "#ff7a3c";
+ if (border.includes("#6c8cff")) return "#6c8cff";
+ if (border.includes("#4caf88")) return "#4caf88";
+ if (border.includes("#9c6cff")) return "#9c6cff";
+ return "#3a3b4d";
+ }}
+ maskColor="rgba(0, 0, 0, 0.6)"
+ />
+
+
+ )}
+
+
+ );
+}
+
+const autoInputStyles = {
+ wrap: {
+ padding: "8px 16px 10px",
+ borderBottom: "1px solid #1e1f30",
+ backgroundColor: "#0c0d14",
+ },
+ label: {
+ fontSize: 10,
+ color: "#71717A",
+ marginBottom: 6,
+ },
+ input: {
+ width: "100%",
+ padding: "8px 12px",
+ borderRadius: 6,
+ border: "1px solid #27272A",
+ background: "#08090e",
+ color: "#e0e1f0",
+ fontSize: 12,
+ fontFamily: "monospace",
+ outline: "none",
+ boxSizing: "border-box",
+ },
+ altRow: {
+ display: "flex",
+ alignItems: "center",
+ gap: 6,
+ marginTop: 6,
+ flexWrap: "wrap",
+ },
+ altBtn: {
+ padding: "2px 8px",
+ borderRadius: 4,
+ border: "1px solid #27272A",
+ background: "transparent",
+ color: "#9a9bb0",
+ fontSize: 10,
+ cursor: "pointer",
+ },
+};
diff --git a/frontend/components/Footer.jsx b/frontend/components/Footer.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..71e8a0bca926c639c885bde541d6b3f7c9c10fbd
--- /dev/null
+++ b/frontend/components/Footer.jsx
@@ -0,0 +1,48 @@
+import React from "react";
+
+export default function Footer() {
+ return (
+
+ );
+}
\ No newline at end of file
diff --git a/frontend/components/LlmSettings.jsx b/frontend/components/LlmSettings.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..e90b2180af222ad7fda08a9cd18852ed806eaf44
--- /dev/null
+++ b/frontend/components/LlmSettings.jsx
@@ -0,0 +1,623 @@
+import React, { useEffect, useMemo, useState } from "react";
+import { testProvider } from "../utils/api";
+
+const PROVIDERS = ["ollabridge", "openai", "claude", "watsonx", "ollama"];
+
+const PROVIDER_LABELS = {
+ ollabridge: "OllaBridge Cloud",
+ openai: "OpenAI",
+ claude: "Claude",
+ watsonx: "Watsonx",
+ ollama: "Ollama",
+};
+
+const AUTH_MODES = [
+ { id: "device", label: "Device Pairing", icon: "📱" },
+ { id: "apikey", label: "API Key", icon: "🔑" },
+ { id: "local", label: "Local Trust", icon: "🏠" },
+];
+
+function LoadingState({ loadingMessage, loadingSlow, onRetry }) {
+ return (
+
+
+
+
AI Providers
+
Admin / LLM Settings
+
{loadingMessage}
+
+ {loadingSlow && (
+
+
+ This is taking longer than expected. The backend may still be
+ starting or the settings endpoint may be slow.
+
+
+ Retry
+
+
+ )}
+
+
+ );
+}
+
+export default function LlmSettings() {
+ const [settings, setSettings] = useState(null);
+ const [initialLoading, setInitialLoading] = useState(true);
+ const [loadingSlow, setLoadingSlow] = useState(false);
+
+ const [saving, setSaving] = useState(false);
+ const [error, setError] = useState("");
+ const [savedMsg, setSavedMsg] = useState("");
+
+ const [modelsByProvider, setModelsByProvider] = useState({});
+ const [modelsError, setModelsError] = useState("");
+ const [loadingModelsFor, setLoadingModelsFor] = useState("");
+
+ const [testResult, setTestResult] = useState(null);
+ const [testing, setTesting] = useState(false);
+
+ const [authMode, setAuthMode] = useState("local");
+ const [pairCode, setPairCode] = useState("");
+ const [pairing, setPairing] = useState(false);
+ const [pairResult, setPairResult] = useState(null);
+
+ const loadingMessage = useMemo(() => {
+ if (loadingSlow) {
+ return "Still loading provider configuration…";
+ }
+ return "Loading current configuration…";
+ }, [loadingSlow]);
+
+ const loadSettings = async () => {
+ setInitialLoading(true);
+ setError("");
+ setLoadingSlow(false);
+
+ let slowTimer;
+ try {
+ slowTimer = window.setTimeout(() => {
+ setLoadingSlow(true);
+ }, 1500);
+
+ const res = await fetch("/api/settings");
+ const data = await res.json();
+
+ if (!res.ok) {
+ throw new Error(data.error || "Failed to load settings");
+ }
+
+ setSettings(data);
+ } catch (e) {
+ console.error(e);
+ setError(e.message || "Failed to load settings");
+ } finally {
+ window.clearTimeout(slowTimer);
+ setInitialLoading(false);
+ }
+ };
+
+ useEffect(() => {
+ loadSettings();
+ }, []);
+
+ const updateField = (section, field, value) => {
+ setSettings((prev) => ({
+ ...prev,
+ [section]: {
+ ...prev[section],
+ [field]: value,
+ },
+ }));
+ };
+
+ const handleSave = async () => {
+ setSaving(true);
+ setError("");
+ setSavedMsg("");
+
+ try {
+ const res = await fetch("/api/settings/llm", {
+ method: "PUT",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify(settings),
+ });
+
+ const data = await res.json();
+ if (!res.ok) throw new Error(data.error || "Failed to save settings");
+
+ setSettings(data);
+ setSavedMsg("Settings saved successfully!");
+ setTimeout(() => setSavedMsg(""), 3000);
+ } catch (e) {
+ console.error(e);
+ setError(e.message || "Failed to save settings");
+ } finally {
+ setSaving(false);
+ }
+ };
+
+ const loadModelsForProvider = async (provider) => {
+ setModelsError("");
+ setLoadingModelsFor(provider);
+
+ try {
+ const res = await fetch(`/api/settings/models?provider=${provider}`);
+ const data = await res.json();
+
+ if (!res.ok || data.error) {
+ throw new Error(data.error || "Failed to load models");
+ }
+
+ setModelsByProvider((prev) => ({
+ ...prev,
+ [provider]: data.models || [],
+ }));
+ } catch (e) {
+ console.error(e);
+ setModelsError(e.message || "Failed to load models");
+ } finally {
+ setLoadingModelsFor("");
+ }
+ };
+
+ const handlePair = async () => {
+ if (!pairCode.trim()) return;
+
+ setPairing(true);
+ setPairResult(null);
+
+ try {
+ const baseUrl =
+ settings?.ollabridge?.base_url || "https://ruslanmv-ollabridge.hf.space";
+
+ const res = await fetch("/api/ollabridge/pair", {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ base_url: baseUrl, code: pairCode.trim() }),
+ });
+
+ const data = await res.json();
+
+ if (data.success) {
+ setPairResult({ ok: true, message: "Paired successfully!" });
+ if (data.token) {
+ updateField("ollabridge", "api_key", data.token);
+ }
+ } else {
+ setPairResult({
+ ok: false,
+ message: data.error || "Pairing failed",
+ });
+ }
+ } catch (e) {
+ setPairResult({ ok: false, message: e.message || "Pairing failed" });
+ } finally {
+ setPairing(false);
+ }
+ };
+
+ const handleTestConnection = async () => {
+ setTesting(true);
+ setTestResult(null);
+
+ try {
+ const activeProvider = settings?.provider || "ollama";
+ const config = { provider: activeProvider };
+
+ if (activeProvider === "openai" && settings?.openai) {
+ config.openai = {
+ api_key: settings.openai.api_key,
+ base_url: settings.openai.base_url,
+ model: settings.openai.model,
+ };
+ } else if (activeProvider === "claude" && settings?.claude) {
+ config.claude = {
+ api_key: settings.claude.api_key,
+ base_url: settings.claude.base_url,
+ model: settings.claude.model,
+ };
+ } else if (activeProvider === "watsonx" && settings?.watsonx) {
+ config.watsonx = {
+ api_key: settings.watsonx.api_key,
+ project_id: settings.watsonx.project_id,
+ base_url: settings.watsonx.base_url,
+ model_id: settings.watsonx.model_id,
+ };
+ } else if (activeProvider === "ollama" && settings?.ollama) {
+ config.ollama = {
+ base_url: settings.ollama.base_url,
+ model: settings.ollama.model,
+ };
+ } else if (activeProvider === "ollabridge" && settings?.ollabridge) {
+ config.ollabridge = {
+ base_url: settings.ollabridge.base_url,
+ model: settings.ollabridge.model,
+ api_key: settings.ollabridge.api_key,
+ };
+ }
+
+ const result = await testProvider(config);
+ setTestResult(result);
+ } catch (err) {
+ setTestResult({
+ health: "error",
+ warning: err.message || "Test failed",
+ });
+ } finally {
+ setTesting(false);
+ }
+ };
+
+ if (initialLoading) {
+ return (
+
+ );
+ }
+
+ if (!settings) {
+ return (
+
+
+
AI Providers
+
Admin / LLM Settings
+
+ {error || "Unable to load current configuration."}
+
+
+ Retry
+
+
+
+ );
+ }
+
+ const { provider } = settings;
+ const availableModels = modelsByProvider[provider] || [];
+
+ return (
+
+
AI Providers
+
+ Choose which LLM provider GitPilot should use for planning and agent
+ workflows. Provider settings are stored on the server.
+
+
+ {error &&
{error}
}
+ {savedMsg &&
{savedMsg}
}
+
+
+
Active provider
+
+ {PROVIDERS.map((p) => (
+ setSettings((prev) => ({ ...prev, provider: p }))}
+ >
+ {PROVIDER_LABELS[p] || p}
+
+ ))}
+
+
+
+ {provider === "ollabridge" && (
+
+
OllaBridge Cloud Configuration
+
+ Connect to OllaBridge Cloud or any OllaBridge instance for LLM
+ inference. No API key required for public endpoints.
+
+
+
Authentication Mode
+
+ {AUTH_MODES.map((m) => (
+ setAuthMode(m.id)}
+ >
+ {m.icon}
+ {m.label}
+
+ ))}
+
+
+ {authMode === "device" && (
+
+
+ Enter the pairing code from your OllaBridge console and click
+ Pair.
+
+
+ setPairCode(e.target.value.toUpperCase())}
+ onKeyDown={(e) => e.key === "Enter" && handlePair()}
+ />
+
+ {pairing ? "Pairing…" : "Pair"}
+
+
+ {pairResult && (
+
+ {pairResult.message}
+
+ )}
+
+ )}
+
+
Base URL
+
+ updateField("ollabridge", "base_url", e.target.value)
+ }
+ placeholder="https://your-ollabridge-endpoint"
+ />
+
+ {(authMode === "apikey" || authMode === "local") && (
+ <>
+
API Key
+
+ updateField("ollabridge", "api_key", e.target.value)
+ }
+ placeholder="Optional API key"
+ />
+ >
+ )}
+
+
Model
+
+
+ updateField("ollabridge", "model", e.target.value)
+ }
+ placeholder="qwen2.5:1.5b"
+ />
+ loadModelsForProvider("ollabridge")}
+ disabled={loadingModelsFor === "ollabridge"}
+ >
+ {loadingModelsFor === "ollabridge" ? "Loading…" : "Load Models"}
+
+
+
+ )}
+
+ {provider === "openai" && (
+
+
OpenAI Configuration
+
+
API Key
+
updateField("openai", "api_key", e.target.value)}
+ placeholder="sk-..."
+ />
+
+
Base URL
+
updateField("openai", "base_url", e.target.value)}
+ placeholder="Optional custom base URL"
+ />
+
+
Model
+
updateField("openai", "model", e.target.value)}
+ placeholder="gpt-4o-mini"
+ />
+
+ )}
+
+ {provider === "claude" && (
+
+
Claude Configuration
+
+
API Key
+
updateField("claude", "api_key", e.target.value)}
+ placeholder="Anthropic API key"
+ />
+
+
Base URL
+
updateField("claude", "base_url", e.target.value)}
+ placeholder="Optional custom base URL"
+ />
+
+
Model
+
updateField("claude", "model", e.target.value)}
+ placeholder="claude-sonnet-4-5"
+ />
+
+ )}
+
+ {provider === "watsonx" && (
+
+ )}
+
+ {provider === "ollama" && (
+
+
Ollama Configuration
+
+
Base URL
+
updateField("ollama", "base_url", e.target.value)}
+ placeholder="http://localhost:11434"
+ />
+
+
Model
+
+ updateField("ollama", "model", e.target.value)}
+ placeholder="llama3"
+ />
+ loadModelsForProvider("ollama")}
+ disabled={loadingModelsFor === "ollama"}
+ >
+ {loadingModelsFor === "ollama" ? "Loading…" : "Load Models"}
+
+
+
+ )}
+
+ {availableModels.length > 0 && (
+
+
Available Models
+
+ {availableModels.map((model) => (
+ updateField(provider, "model", model)}
+ >
+ {model}
+
+ ))}
+
+
+ )}
+
+ {modelsError &&
{modelsError}
}
+
+ {testResult && (
+
+ {testResult.health === "ok"
+ ? testResult.details || "Provider connection successful."
+ : testResult.warning || "Provider connection failed."}
+
+ )}
+
+
+
+ {saving ? "Saving…" : "Save Settings"}
+
+
+
+ {testing ? "Testing…" : "Test Connection"}
+
+
+
+ );
+}
\ No newline at end of file
diff --git a/frontend/components/LoginPage.jsx b/frontend/components/LoginPage.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..9d0fd67d1300c080de52c6ce91b0e03a0ce503dc
--- /dev/null
+++ b/frontend/components/LoginPage.jsx
@@ -0,0 +1,544 @@
+// frontend/components/LoginPage.jsx
+import React, { useState, useEffect, useRef } from "react";
+import { apiUrl, safeFetchJSON } from "../utils/api.js";
+import { initApp } from "../utils/appInit.js";
+
+/**
+ * GitPilot – Enterprise Agentic Login
+ * Theme: "Claude Code" / Anthropic Enterprise (Dark + Warm Orange)
+ */
+
+export default function LoginPage({ onAuthenticated, backendReady = false }) {
+ // Auth State
+ const [authProcessing, setAuthProcessing] = useState(false);
+ const [error, setError] = useState("");
+
+ // Mode State: 'loading' | 'web' (Has Secret) | 'device' (No Secret)
+ const [mode, setMode] = useState("loading");
+
+ // Device Flow State
+ const [deviceData, setDeviceData] = useState(null);
+ const pollTimer = useRef(null);
+ const stopPolling = useRef(false); // Flag to safely stop async polling
+
+ // Web Flow State
+ const [missingClientId, setMissingClientId] = useState(false);
+
+ // REF FIX: Prevents React StrictMode from running the auth exchange twice
+ const processingRef = useRef(false);
+ const authCheckDone = useRef(false);
+
+ // 1. Initialization Effect — runs once on mount AND when backendReady changes
+ useEffect(() => {
+ // Skip if already resolved
+ if (authCheckDone.current && mode !== "loading") return;
+
+ const params = new URLSearchParams(window.location.search);
+ const code = params.get("code");
+ const state = params.get("state");
+
+ // A. If returning from GitHub (Web Flow Callback)
+ if (code) {
+ if (!processingRef.current) {
+ processingRef.current = true;
+ setMode("web");
+ consumeOAuthCallback(code, state);
+ }
+ return;
+ }
+
+ // B. Use the shared singleton init — reuses App.jsx's result.
+ // No duplicate /api/auth/status calls, no separate retry loops.
+ initApp().then((result) => {
+ authCheckDone.current = true;
+ if (result.ready) {
+ setError("");
+ setMode(result.authMode === "web" ? "web" : "device");
+ } else {
+ // Backend unreachable — allow device flow as fallback
+ setError(result.error || "Backend unavailable");
+ setMode("device");
+ }
+ });
+
+ // Cleanup polling on unmount
+ return () => {
+ stopPolling.current = true;
+ if (pollTimer.current) clearTimeout(pollTimer.current);
+ };
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [backendReady]);
+
+ // ===========================================================================
+ // WEB FLOW LOGIC (Standard OAuth2)
+ // ===========================================================================
+
+ async function consumeOAuthCallback(code, state) {
+ const expectedState = sessionStorage.getItem("gitpilot_oauth_state");
+ if (state && expectedState && expectedState !== state) {
+ console.warn("OAuth state mismatch - proceeding with caution.");
+ }
+
+ setAuthProcessing(true);
+ setError("");
+ window.history.replaceState({}, document.title, window.location.pathname);
+
+ try {
+ const data = await safeFetchJSON(apiUrl("/api/auth/callback"), {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ code, state: state || "" }),
+ });
+
+ handleSuccess(data);
+ } catch (err) {
+ console.error("Login Error:", err);
+ setError(err instanceof Error ? err.message : "Login failed.");
+ setAuthProcessing(false);
+ }
+ }
+
+ async function handleSignInWithGitHub() {
+ setError("");
+ setMissingClientId(false);
+ setAuthProcessing(true);
+
+ try {
+ const data = await safeFetchJSON(apiUrl("/api/auth/url"));
+
+ if (data.state) {
+ sessionStorage.setItem("gitpilot_oauth_state", data.state);
+ }
+
+ window.location.href = data.authorization_url;
+ } catch (err) {
+ console.error("Auth Start Error:", err);
+ // Check for missing client ID (404/500 errors)
+ if (err.message && (err.message.includes('404') || err.message.includes('500'))) {
+ setMissingClientId(true);
+ } else {
+ setError(err instanceof Error ? err.message : "Could not start sign-in.");
+ }
+ setAuthProcessing(false);
+ }
+ }
+
+ // ===========================================================================
+ // DEVICE FLOW LOGIC (No Client Secret Required)
+ // ===========================================================================
+
+ const startDeviceFlow = async () => {
+ setError("");
+ setAuthProcessing(true);
+ stopPolling.current = false; // Reset stop flag
+
+ try {
+ const data = await safeFetchJSON(apiUrl("/api/auth/device/code"), { method: "POST" });
+
+ // Handle Errors
+ if (data.error) {
+ if (data.error.includes("400") || data.error.includes("Bad Request")) {
+ throw new Error("Device Flow is disabled in GitHub. Please go to your GitHub App Settings > 'General' > 'Identifying and authorizing users' and check the box 'Enable Device Flow'.");
+ }
+ throw new Error(data.error);
+ }
+
+ if (!data.device_code) throw new Error("Invalid device code response");
+
+ setDeviceData(data);
+ setAuthProcessing(false);
+
+ // Start Polling (Recursive Timeout Pattern)
+ pollDeviceToken(data.device_code, data.interval || 5);
+
+ } catch (err) {
+ setError(err.message);
+ setAuthProcessing(false);
+ }
+ };
+
+ const pollDeviceToken = async (deviceCode, interval) => {
+ if (stopPolling.current) return;
+
+ try {
+ const response = await fetch(apiUrl("/api/auth/device/poll"), {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ device_code: deviceCode })
+ });
+
+ // 1. Success (200)
+ if (response.status === 200) {
+ const data = await response.json();
+ handleSuccess(data);
+ return;
+ }
+
+ // 2. Pending (202) -> Continue Polling
+ if (response.status === 202) {
+ // Schedule next poll
+ pollTimer.current = setTimeout(
+ () => pollDeviceToken(deviceCode, interval),
+ interval * 1000
+ );
+ return;
+ }
+
+ // 3. Error (4xx/5xx) -> Stop Polling & Show Error
+ const errData = await response.json().catch(() => ({ error: "Unknown polling error" }));
+
+ // Special case: If it's just a 'slow_down' warning (sometimes 400), we just wait longer
+ if (errData.error === "slow_down") {
+ pollTimer.current = setTimeout(
+ () => pollDeviceToken(deviceCode, interval + 5),
+ (interval + 5) * 1000
+ );
+ return;
+ }
+
+ // Terminal errors
+ throw new Error(errData.error || `Polling failed: ${response.status}`);
+
+ } catch (e) {
+ console.error("Poll error:", e);
+ if (!stopPolling.current) {
+ setError(e.message || "Failed to connect to authentication server.");
+ setDeviceData(null); // Return to initial state
+ }
+ }
+ };
+
+ const handleManualCheck = async () => {
+ if (!deviceData?.device_code) return;
+
+ try {
+ const response = await fetch(apiUrl("/api/auth/device/poll"), {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ device_code: deviceData.device_code })
+ });
+
+ if (response.status === 200) {
+ const data = await response.json();
+ handleSuccess(data);
+ } else if (response.status === 202) {
+ // Visual feedback for pending state
+ const btn = document.getElementById("manual-check-btn");
+ if (btn) {
+ const originalText = btn.innerText;
+ btn.innerText = "Still Pending...";
+ btn.disabled = true;
+ setTimeout(() => {
+ btn.innerText = originalText;
+ btn.disabled = false;
+ }, 2000);
+ }
+ }
+ } catch (e) {
+ console.error("Manual check failed", e);
+ }
+ };
+
+ const handleCancelDeviceFlow = () => {
+ stopPolling.current = true;
+ if (pollTimer.current) clearTimeout(pollTimer.current);
+ setDeviceData(null);
+ setError("");
+ };
+
+ // ===========================================================================
+ // SHARED HELPERS
+ // ===========================================================================
+
+ function handleSuccess(data) {
+ stopPolling.current = true; // Ensure polling stops
+ if (pollTimer.current) clearTimeout(pollTimer.current);
+
+ if (!data.access_token || !data.user) {
+ setError("Server returned incomplete session data.");
+ return;
+ }
+
+ try {
+ localStorage.setItem("github_token", data.access_token);
+ localStorage.setItem("github_user", JSON.stringify(data.user));
+ } catch (e) {
+ console.warn("LocalStorage access denied:", e);
+ }
+
+ if (typeof onAuthenticated === "function") {
+ onAuthenticated({
+ access_token: data.access_token,
+ user: data.user,
+ });
+ }
+ }
+
+ // --- Design Token System ---
+ const theme = {
+ bg: "#131316",
+ cardBg: "#1C1C1F",
+ border: "#27272A",
+ accent: "#D95C3D",
+ accentHover: "#C44F32",
+ textPrimary: "#EDEDED",
+ textSecondary: "#A1A1AA",
+ font: '"Söhne", "Inter", -apple-system, sans-serif',
+ };
+
+ const styles = {
+ container: {
+ minHeight: "100vh",
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ backgroundColor: theme.bg,
+ fontFamily: theme.font,
+ color: theme.textPrimary,
+ letterSpacing: "-0.01em",
+ },
+ card: {
+ backgroundColor: theme.cardBg,
+ width: "100%",
+ maxWidth: "440px",
+ borderRadius: "12px",
+ border: `1px solid ${theme.border}`,
+ boxShadow: "0 24px 48px -12px rgba(0, 0, 0, 0.6)",
+ padding: "48px 40px",
+ textAlign: "center",
+ position: "relative",
+ },
+ logoBadge: {
+ width: "48px",
+ height: "48px",
+ backgroundColor: "rgba(217, 92, 61, 0.15)",
+ color: theme.accent,
+ borderRadius: "10px",
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ fontSize: "22px",
+ fontWeight: "700",
+ margin: "0 auto 32px auto",
+ border: "1px solid rgba(217, 92, 61, 0.2)",
+ },
+ h1: {
+ fontSize: "24px",
+ fontWeight: "600",
+ marginBottom: "12px",
+ color: theme.textPrimary,
+ },
+ p: {
+ fontSize: "14px",
+ color: theme.textSecondary,
+ lineHeight: "1.6",
+ marginBottom: "40px",
+ },
+ button: {
+ width: "100%",
+ height: "48px",
+ backgroundColor: theme.accent,
+ color: "#FFFFFF",
+ border: "none",
+ borderRadius: "8px",
+ fontSize: "14px",
+ fontWeight: "500",
+ cursor: (authProcessing || (mode === 'loading')) ? "not-allowed" : "pointer",
+ opacity: (authProcessing || (mode === 'loading')) ? 0.7 : 1,
+ transition: "background-color 0.2s ease",
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ gap: "10px",
+ boxShadow: "0 4px 12px rgba(217, 92, 61, 0.25)",
+ },
+ secondaryButton: {
+ backgroundColor: "transparent",
+ color: "#A1A1AA",
+ border: "1px solid #3F3F46",
+ padding: "8px 16px",
+ borderRadius: "6px",
+ fontSize: "12px",
+ cursor: "pointer",
+ marginTop: "16px",
+ minWidth: "100px"
+ },
+ errorBox: {
+ backgroundColor: "rgba(185, 28, 28, 0.15)",
+ border: "1px solid rgba(185, 28, 28, 0.3)",
+ color: "#FCA5A5",
+ padding: "12px",
+ borderRadius: "8px",
+ fontSize: "13px",
+ marginBottom: "24px",
+ textAlign: "left",
+ },
+ configCard: {
+ textAlign: "left",
+ backgroundColor: "#111",
+ border: "1px solid #333",
+ padding: "24px",
+ borderRadius: "8px",
+ marginBottom: "24px",
+ },
+ codeDisplay: {
+ backgroundColor: "#27272A",
+ color: theme.accent,
+ fontSize: "20px",
+ fontWeight: "700",
+ padding: "12px",
+ borderRadius: "6px",
+ textAlign: "center",
+ letterSpacing: "2px",
+ margin: "12px 0",
+ border: `1px dashed ${theme.accent}`,
+ cursor: "pointer",
+ },
+ footer: {
+ marginTop: "48px",
+ fontSize: "12px",
+ color: "#52525B",
+ }
+ };
+
+ // --- RENDER: Device Flow UI ---
+ const renderDeviceFlow = () => {
+ if (!deviceData) {
+ return (
+ !authProcessing && (e.currentTarget.style.backgroundColor = theme.accentHover)}
+ onMouseOut={(e) => !authProcessing && (e.currentTarget.style.backgroundColor = theme.accent)}
+ >
+ {authProcessing ? "Connecting..." : "Sign in with GitHub"}
+
+ );
+ }
+
+ return (
+
+
Authorize Device
+
+ GitPilot needs authorization to access your repositories.
+
+
+
+
1. Copy code:
+
{
+ navigator.clipboard.writeText(deviceData.user_code);
+ }}
+ title="Click to copy"
+ >
+ {deviceData.user_code}
+
+
+
+
+
+
+ ↻
+ Waiting for authorization...
+
+
+
+
+
+ Check Status
+
+
+ Cancel
+
+
+
+ );
+ };
+
+ // --- RENDER: Config Error ---
+ if (missingClientId) {
+ return (
+
+
+
⚠️
+
Configuration Error
+
Could not connect to GitHub Authentication services.
+
setMissingClientId(false)} style={{...styles.button, backgroundColor: "#3F3F46"}}>Retry
+
+
+ );
+ }
+
+ // --- RENDER: Main ---
+ return (
+
+
+
GP
+
+
GitPilot Enterprise
+
+ Agentic AI workflow for your repositories.
+ Secure. Context-aware. Automated.
+
+
+ {error &&
{error}
}
+
+ {mode === "loading" && (
+
Initializing...
+ )}
+
+ {mode === "web" && (
+
!authProcessing && (e.currentTarget.style.backgroundColor = theme.accentHover)}
+ onMouseOut={(e) => !authProcessing && (e.currentTarget.style.backgroundColor = theme.accent)}
+ >
+ {authProcessing ? "Connecting..." : (
+ <>
+
+ Sign in with GitHub
+ >
+ )}
+
+ )}
+
+ {mode === "device" && renderDeviceFlow()}
+
+
+ © {new Date().getFullYear()} GitPilot Inc.
+
+
+
+ );
+}
\ No newline at end of file
diff --git a/frontend/components/PlanView.jsx b/frontend/components/PlanView.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..a67efb237c5204e69cd1cec98248e8702ade6e25
--- /dev/null
+++ b/frontend/components/PlanView.jsx
@@ -0,0 +1,231 @@
+import React from "react";
+
+export default function PlanView({ plan }) {
+ if (!plan) return null;
+
+ // Calculate totals for each action type
+ const totals = { CREATE: 0, MODIFY: 0, DELETE: 0 };
+ plan.steps.forEach((step) => {
+ step.files.forEach((file) => {
+ totals[file.action] = (totals[file.action] || 0) + 1;
+ });
+ });
+
+ const theme = {
+ bg: "#18181B",
+ border: "#27272A",
+ textPrimary: "#EDEDED",
+ textSecondary: "#A1A1AA",
+ successBg: "rgba(16, 185, 129, 0.1)",
+ successText: "#10B981",
+ warningBg: "rgba(245, 158, 11, 0.1)",
+ warningText: "#F59E0B",
+ dangerBg: "rgba(239, 68, 68, 0.1)",
+ dangerText: "#EF4444",
+ };
+
+ const styles = {
+ container: {
+ display: "flex",
+ flexDirection: "column",
+ gap: "20px",
+ fontFamily: '-apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, sans-serif',
+ },
+ header: {
+ display: "flex",
+ flexDirection: "column",
+ gap: "8px",
+ paddingBottom: "16px",
+ borderBottom: `1px solid ${theme.border}`,
+ },
+ goal: {
+ fontSize: "14px",
+ fontWeight: "600",
+ color: theme.textPrimary,
+ },
+ summary: {
+ fontSize: "13px",
+ color: theme.textSecondary,
+ lineHeight: "1.5",
+ },
+ totals: {
+ display: "flex",
+ gap: "12px",
+ flexWrap: "wrap",
+ },
+ totalBadge: {
+ fontSize: "11px",
+ fontWeight: "500",
+ padding: "4px 8px",
+ borderRadius: "4px",
+ border: "1px solid transparent",
+ },
+ totalCreate: {
+ backgroundColor: theme.successBg,
+ color: theme.successText,
+ borderColor: "rgba(16, 185, 129, 0.2)",
+ },
+ totalModify: {
+ backgroundColor: theme.warningBg,
+ color: theme.warningText,
+ borderColor: "rgba(245, 158, 11, 0.2)",
+ },
+ totalDelete: {
+ backgroundColor: theme.dangerBg,
+ color: theme.dangerText,
+ borderColor: "rgba(239, 68, 68, 0.2)",
+ },
+ stepsList: {
+ listStyle: "none",
+ padding: 0,
+ margin: 0,
+ display: "flex",
+ flexDirection: "column",
+ gap: "24px",
+ },
+ step: {
+ display: "flex",
+ flexDirection: "column",
+ gap: "8px",
+ position: "relative",
+ },
+ stepHeader: {
+ display: "flex",
+ alignItems: "baseline",
+ gap: "8px",
+ fontSize: "13px",
+ fontWeight: "600",
+ color: theme.textPrimary,
+ },
+ stepNumber: {
+ color: theme.textSecondary,
+ fontSize: "11px",
+ textTransform: "uppercase",
+ letterSpacing: "0.05em",
+ },
+ stepDescription: {
+ fontSize: "13px",
+ color: theme.textSecondary,
+ lineHeight: "1.5",
+ margin: 0,
+ },
+ fileList: {
+ marginTop: "8px",
+ display: "flex",
+ flexDirection: "column",
+ gap: "4px",
+ backgroundColor: "#131316",
+ padding: "8px 12px",
+ borderRadius: "6px",
+ border: `1px solid ${theme.border}`,
+ },
+ fileItem: {
+ display: "flex",
+ alignItems: "center",
+ gap: "10px",
+ fontSize: "12px",
+ fontFamily: "ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, monospace",
+ },
+ actionBadge: {
+ padding: "2px 6px",
+ borderRadius: "4px",
+ fontSize: "10px",
+ fontWeight: "bold",
+ textTransform: "uppercase",
+ minWidth: "55px",
+ textAlign: "center",
+ letterSpacing: "0.02em",
+ },
+ path: {
+ color: "#D4D4D8",
+ whiteSpace: "nowrap",
+ overflow: "hidden",
+ textOverflow: "ellipsis",
+ },
+ risks: {
+ marginTop: "8px",
+ fontSize: "12px",
+ color: theme.warningText,
+ backgroundColor: "rgba(245, 158, 11, 0.05)",
+ padding: "8px 12px",
+ borderRadius: "6px",
+ border: "1px solid rgba(245, 158, 11, 0.1)",
+ display: "flex",
+ gap: "6px",
+ alignItems: "flex-start",
+ },
+ };
+
+ const getActionStyle = (action) => {
+ switch (action) {
+ case "CREATE": return styles.totalCreate;
+ case "MODIFY": return styles.totalModify;
+ case "DELETE": return styles.totalDelete;
+ default: return {};
+ }
+ };
+
+ return (
+
+ {/* Header & Summary */}
+
+
Goal: {plan.goal}
+
{plan.summary}
+
+
+ {/* Totals Summary */}
+
+ {totals.CREATE > 0 && (
+
+ {totals.CREATE} to create
+
+ )}
+ {totals.MODIFY > 0 && (
+
+ {totals.MODIFY} to modify
+
+ )}
+ {totals.DELETE > 0 && (
+
+ {totals.DELETE} to delete
+
+ )}
+
+
+ {/* Steps List */}
+
+ {plan.steps.map((s) => (
+
+
+ Step {s.step_number}
+ {s.title}
+
+ {s.description}
+
+ {/* Files List */}
+ {s.files && s.files.length > 0 && (
+
+ {s.files.map((file, idx) => (
+
+
+ {file.action}
+
+ {file.path}
+
+ ))}
+
+ )}
+
+ {/* Risks */}
+ {s.risks && (
+
+ ⚠️
+ {s.risks}
+
+ )}
+
+ ))}
+
+
+ );
+}
\ No newline at end of file
diff --git a/frontend/components/ProjectContextPanel.jsx b/frontend/components/ProjectContextPanel.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..0a480e9b6003a3a9e1c4ffbc80c86564d6ccbe55
--- /dev/null
+++ b/frontend/components/ProjectContextPanel.jsx
@@ -0,0 +1,572 @@
+import React, { useEffect, useMemo, useRef, useState } from "react";
+import FileTree from "./FileTree.jsx";
+import BranchPicker from "./BranchPicker.jsx";
+
+// --- INJECTED STYLES FOR ANIMATIONS ---
+const animationStyles = `
+ @keyframes highlight-pulse {
+ 0% { background-color: rgba(59, 130, 246, 0.10); }
+ 50% { background-color: rgba(59, 130, 246, 0.22); }
+ 100% { background-color: transparent; }
+ }
+ .pulse-context {
+ animation: highlight-pulse 1.1s ease-out;
+ }
+`;
+
+/**
+ * ProjectContextPanel (Production-ready)
+ *
+ * Controlled component:
+ * - Branch source of truth is App.jsx:
+ * - defaultBranch (prod)
+ * - currentBranch (what user sees)
+ * - sessionBranches (list of all active AI session branches)
+ *
+ * Responsibilities:
+ * - Show project context + branch dropdown + AI badge/banner
+ * - Fetch access status + file count for the currentBranch
+ * - Trigger visual pulse on pulseNonce (Hard Switch)
+ */
+export default function ProjectContextPanel({
+ repo,
+ defaultBranch,
+ currentBranch,
+ sessionBranch, // Active session branch (optional, for specific highlighting)
+ sessionBranches = [], // List of all AI branches
+ onBranchChange,
+ pulseNonce,
+ onSettingsClick,
+}) {
+ const [appUrl, setAppUrl] = useState("");
+ const [fileCount, setFileCount] = useState(0);
+
+ const [isDropdownOpen, setIsDropdownOpen] = useState(false);
+
+ // Data Loading State
+ const [analyzing, setAnalyzing] = useState(false);
+ const [accessInfo, setAccessInfo] = useState(null);
+ const [treeError, setTreeError] = useState(null);
+
+ // Retry / Refresh Logic
+ const [refreshTrigger, setRefreshTrigger] = useState(0);
+ const [retryCount, setRetryCount] = useState(0);
+ const retryTimeoutRef = useRef(null);
+
+ // UX State
+ const [animateHeader, setAnimateHeader] = useState(false);
+ const [toast, setToast] = useState({ visible: false, title: "", msg: "" });
+
+ // Calculate effective default to prevent 'main' fallback errors
+ const effectiveDefaultBranch = defaultBranch || repo?.default_branch || "main";
+ const branch = currentBranch || effectiveDefaultBranch;
+
+ // Determine if we are currently viewing an AI Session branch
+ const isAiSession = (sessionBranches.includes(branch)) || (sessionBranch === branch && branch !== effectiveDefaultBranch);
+
+ // Fetch App URL on mount
+ useEffect(() => {
+ fetch("/api/auth/app-url")
+ .then((res) => res.json())
+ .then((data) => {
+ if (data.app_url) setAppUrl(data.app_url);
+ })
+ .catch((err) => console.error("Failed to fetch App URL:", err));
+ }, []);
+
+ // Hard Switch pulse: whenever App increments pulseNonce
+ useEffect(() => {
+ if (!pulseNonce) return;
+ setAnimateHeader(true);
+ const t = window.setTimeout(() => setAnimateHeader(false), 1100);
+ return () => window.clearTimeout(t);
+ }, [pulseNonce]);
+
+ // Main data fetcher (Access + Tree stats) for currentBranch
+ // Stale-while-revalidate: keep previous data visible during fetch
+ useEffect(() => {
+ if (!repo) return;
+
+ // Only show full "analyzing" spinner if we have no data yet
+ if (!accessInfo) setAnalyzing(true);
+ setTreeError(null);
+
+ if (retryTimeoutRef.current) {
+ clearTimeout(retryTimeoutRef.current);
+ retryTimeoutRef.current = null;
+ }
+
+ let headers = {};
+ try {
+ const token = localStorage.getItem("github_token");
+ if (token) headers = { Authorization: `Bearer ${token}` };
+ } catch (e) {
+ console.warn("Unable to read github_token:", e);
+ }
+
+ let cancelled = false;
+ const cacheBuster = `&_t=${Date.now()}&retry=${retryCount}`;
+
+ // A) Access Check (with Stale Cache Fix)
+ fetch(`/api/auth/repo-access?owner=${repo.owner}&repo=${repo.name}${cacheBuster}`, {
+ headers,
+ cache: "no-cache",
+ })
+ .then(async (res) => {
+ if (cancelled) return;
+ const data = await res.json().catch(() => ({}));
+
+ if (!res.ok) {
+ setAccessInfo({ can_write: false, app_installed: false, auth_type: "none" });
+ return;
+ }
+
+ setAccessInfo(data);
+
+ // Auto-retry if user has push access but App is not detected yet (Stale Cache)
+ if (data.can_write && !data.app_installed && retryCount === 0) {
+ retryTimeoutRef.current = setTimeout(() => {
+ setRetryCount(1);
+ }, 1000);
+ }
+ })
+ .catch(() => {
+ if (!cancelled) setAccessInfo({ can_write: false, app_installed: false, auth_type: "none" });
+ });
+
+ // B) Tree count for the selected branch
+ // Don't clear fileCount — keep stale value visible until new one arrives
+ const hadFileCount = fileCount > 0;
+ if (!hadFileCount) setAnalyzing(true);
+
+ fetch(`/api/repos/${repo.owner}/${repo.name}/tree?ref=${encodeURIComponent(branch)}&_t=${Date.now()}`, {
+ headers,
+ cache: "no-cache",
+ })
+ .then(async (res) => {
+ if (cancelled) return;
+ const data = await res.json().catch(() => ({}));
+ if (!res.ok) {
+ setTreeError(data.detail || "Failed to load tree");
+ setFileCount(0);
+ return;
+ }
+ setFileCount(Array.isArray(data.files) ? data.files.length : 0);
+ })
+ .catch((err) => {
+ if (cancelled) return;
+ setTreeError(err.message);
+ setFileCount(0);
+ })
+ .finally(() => { if (!cancelled) setAnalyzing(false); });
+
+ return () => {
+ cancelled = true;
+ if (retryTimeoutRef.current) clearTimeout(retryTimeoutRef.current);
+ };
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [repo?.owner, repo?.name, branch, refreshTrigger, retryCount]);
+
+ const showToast = (title, msg) => {
+ setToast({ visible: true, title, msg });
+ setTimeout(() => setToast((prev) => ({ ...prev, visible: false })), 3000);
+ };
+
+ const handleManualSwitch = (targetBranch) => {
+ if (!targetBranch || targetBranch === branch) {
+ setIsDropdownOpen(false);
+ return;
+ }
+
+ // Local UI feedback (App.jsx will handle the actual state change)
+ const goingAi = sessionBranches.includes(targetBranch);
+ showToast(
+ goingAi ? "Context Switched" : "Switched to Production",
+ goingAi ? `Viewing AI Session: ${targetBranch}` : `Viewing ${targetBranch}.`
+ );
+
+ setIsDropdownOpen(false);
+ if (onBranchChange) onBranchChange(targetBranch);
+ };
+
+ const handleRefresh = () => {
+ setAnalyzing(true);
+ setRetryCount(0);
+ setRefreshTrigger((prev) => prev + 1);
+ };
+
+ const handleInstallClick = () => {
+ if (!appUrl) return;
+ const targetUrl = appUrl.endsWith("/") ? `${appUrl}installations/new` : `${appUrl}/installations/new`;
+ window.open(targetUrl, "_blank", "noopener,noreferrer");
+ };
+
+ // --- STYLES ---
+ const theme = useMemo(
+ () => ({
+ bg: "#131316",
+ border: "#27272A",
+ textPrimary: "#EDEDED",
+ textSecondary: "#A1A1AA",
+ accent: "#3b82f6",
+ warningBorder: "rgba(245, 158, 11, 0.2)",
+ warningText: "#F59E0B",
+ successColor: "#10B981",
+ cardBg: "#18181B",
+ aiBg: "rgba(59, 130, 246, 0.10)",
+ aiBorder: "rgba(59, 130, 246, 0.30)",
+ aiText: "#60a5fa",
+ }),
+ []
+ );
+
+ const styles = useMemo(
+ () => ({
+ container: {
+ height: "100%",
+ borderRight: `1px solid ${theme.border}`,
+ backgroundColor: theme.bg,
+ display: "flex",
+ flexDirection: "column",
+ fontFamily: '"Söhne", "Inter", sans-serif',
+ position: "relative",
+ overflow: "hidden",
+ },
+ header: {
+ padding: "16px 20px",
+ borderBottom: `1px solid ${theme.border}`,
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "space-between",
+ transition: "background-color 0.3s ease",
+ },
+ titleGroup: { display: "flex", alignItems: "center", gap: "8px" },
+ title: { fontSize: "13px", fontWeight: "600", color: theme.textPrimary },
+ repoBadge: {
+ backgroundColor: "#27272A",
+ color: theme.textSecondary,
+ fontSize: "11px",
+ padding: "2px 8px",
+ borderRadius: "12px",
+ border: `1px solid ${theme.border}`,
+ fontFamily: "monospace",
+ },
+ aiBadge: {
+ display: "flex",
+ alignItems: "center",
+ gap: "6px",
+ backgroundColor: theme.aiBg,
+ color: theme.aiText,
+ fontSize: "10px",
+ fontWeight: "bold",
+ padding: "2px 8px",
+ borderRadius: "12px",
+ border: `1px solid ${theme.aiBorder}`,
+ textTransform: "uppercase",
+ letterSpacing: "0.5px",
+ },
+ content: {
+ padding: "16px 20px 12px 20px",
+ display: "flex",
+ flexDirection: "column",
+ gap: "12px",
+ },
+ statRow: { display: "flex", justifyContent: "space-between", fontSize: "13px", marginBottom: "4px" },
+ label: { color: theme.textSecondary },
+ value: { color: theme.textPrimary, fontWeight: "500" },
+ dropdownContainer: { position: "relative" },
+ branchButton: {
+ display: "flex",
+ alignItems: "center",
+ gap: "6px",
+ padding: "4px 8px",
+ borderRadius: "4px",
+ border: `1px solid ${isAiSession ? theme.aiBorder : theme.border}`,
+ backgroundColor: isAiSession ? "rgba(59, 130, 246, 0.05)" : "transparent",
+ color: isAiSession ? theme.aiText : theme.textPrimary,
+ fontSize: "13px",
+ cursor: "pointer",
+ fontFamily: "monospace",
+ },
+ dropdownMenu: {
+ position: "absolute",
+ top: "100%",
+ left: 0,
+ marginTop: "4px",
+ width: "240px",
+ backgroundColor: "#1F1F23",
+ border: `1px solid ${theme.border}`,
+ borderRadius: "6px",
+ boxShadow: "0 4px 12px rgba(0,0,0,0.5)",
+ zIndex: 50,
+ display: isDropdownOpen ? "block" : "none",
+ overflow: "hidden",
+ },
+ dropdownItem: {
+ padding: "8px 12px",
+ fontSize: "13px",
+ color: theme.textSecondary,
+ cursor: "pointer",
+ display: "flex",
+ alignItems: "center",
+ gap: "8px",
+ borderBottom: `1px solid ${theme.border}`,
+ },
+ contextBanner: {
+ backgroundColor: theme.aiBg,
+ borderTop: `1px solid ${theme.aiBorder}`,
+ padding: "8px 20px",
+ fontSize: "11px",
+ color: theme.aiText,
+ display: "flex",
+ justifyContent: "space-between",
+ alignItems: "center",
+ },
+ toast: {
+ position: "absolute",
+ top: "16px",
+ right: "16px",
+ backgroundColor: "#18181B",
+ border: `1px solid ${theme.border}`,
+ borderLeft: `3px solid ${theme.accent}`,
+ borderRadius: "6px",
+ padding: "12px",
+ boxShadow: "0 4px 12px rgba(0,0,0,0.5)",
+ zIndex: 100,
+ minWidth: "240px",
+ transition: "all 0.3s cubic-bezier(0.16, 1, 0.3, 1)",
+ transform: toast.visible ? "translateX(0)" : "translateX(120%)",
+ opacity: toast.visible ? 1 : 0,
+ },
+ toastTitle: { fontSize: "13px", fontWeight: "bold", color: theme.textPrimary, marginBottom: "2px" },
+ toastMsg: { fontSize: "11px", color: theme.textSecondary },
+ refreshButton: {
+ marginTop: "8px",
+ height: "32px",
+ padding: "0 12px",
+ backgroundColor: "transparent",
+ color: theme.textSecondary,
+ border: `1px solid ${theme.border}`,
+ borderRadius: "6px",
+ fontSize: "12px",
+ cursor: analyzing ? "not-allowed" : "pointer",
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ gap: "6px",
+ },
+ settingsBtn: {
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ width: "28px",
+ height: "28px",
+ borderRadius: "6px",
+ border: `1px solid ${theme.border}`,
+ backgroundColor: "transparent",
+ color: theme.textSecondary,
+ cursor: "pointer",
+ padding: 0,
+ transition: "color 0.15s, border-color 0.15s",
+ },
+ treeWrapper: { flex: 1, overflow: "auto", borderTop: `1px solid ${theme.border}` },
+ installCard: {
+ marginTop: "8px",
+ padding: "12px",
+ borderRadius: "8px",
+ backgroundColor: theme.cardBg,
+ border: `1px solid ${theme.warningBorder}`,
+ },
+ installHeader: {
+ display: "flex",
+ alignItems: "center",
+ gap: "10px",
+ fontSize: "14px",
+ fontWeight: "600",
+ color: theme.textPrimary,
+ },
+ installText: {
+ fontSize: "13px",
+ color: theme.textSecondary,
+ lineHeight: "1.5",
+ },
+ }),
+ [analyzing, isAiSession, isDropdownOpen, theme, toast.visible]
+ );
+
+ // Determine status text
+ let statusText = "Checking...";
+ let statusColor = theme.textSecondary;
+ let showInstallCard = false;
+
+ if (!analyzing && accessInfo) {
+ if (accessInfo.app_installed) {
+ statusText = "Write Access ✓";
+ statusColor = theme.successColor;
+ } else if (accessInfo.can_write && retryCount === 0) {
+ statusText = "Verifying...";
+ } else if (accessInfo.can_write) {
+ statusText = "Push Access (No App)";
+ statusColor = theme.warningText;
+ showInstallCard = true;
+ } else {
+ statusText = "Read Only";
+ statusColor = theme.warningText;
+ showInstallCard = true;
+ }
+ }
+
+ if (!repo) {
+ return (
+
+ );
+ }
+
+ return (
+
+
+
+ {/* TOAST */}
+
+
{toast.title}
+
{toast.msg}
+
+
+ {/* HEADER */}
+
+
+
Project context
+ {isAiSession && (
+
+
+
+
+ AI Session
+
+ )}
+
+
+ {!isAiSession &&
{repo.name} }
+ {onSettingsClick && (
+
+
+
+
+
+
+ )}
+
+
+
+ {/* CONTENT */}
+
+ {/* Branch selector (Claude-Code-on-Web parity — uses BranchPicker with search) */}
+
+ Branch:
+
+
+
+ {/* Stats */}
+
+ Files:
+ {analyzing ? "…" : fileCount}
+
+
+
+ Status:
+ {statusText}
+
+
+ {/* Tree error (optional display) */}
+ {treeError && (
+
+ {treeError}
+
+ )}
+
+ {/* Refresh */}
+
+
+
+
+ {analyzing ? "Refreshing..." : "Refresh"}
+
+
+ {/* Install card */}
+ {showInstallCard && (
+
+
+ ⚡
+ Enable Write Access
+
+
+ Install the GitPilot App to enable AI agent operations.
+
+
+ Alternatively, use Folder or Local Git mode for local-first workflows without GitHub.
+
+
+ Install App
+
+
+ )}
+
+
+ {/* Context banner */}
+ {isAiSession && (
+
+
+
+
+
+
+
+ You are viewing an AI Session branch.
+
+ handleManualSwitch(effectiveDefaultBranch)}>
+ Return to {effectiveDefaultBranch}
+
+
+ )}
+
+ {/* File tree (branch-aware) */}
+
+
+
+
+ );
+}
\ No newline at end of file
diff --git a/frontend/components/ProjectSettings/ContextTab.jsx b/frontend/components/ProjectSettings/ContextTab.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..5272c846b181fb9c17031215e4056b565a865f09
--- /dev/null
+++ b/frontend/components/ProjectSettings/ContextTab.jsx
@@ -0,0 +1,352 @@
+import React, { useEffect, useMemo, useRef, useState } from "react";
+
+export default function ContextTab({ owner, repo }) {
+ const [assets, setAssets] = useState([]);
+ const [busy, setBusy] = useState(false);
+ const [error, setError] = useState("");
+ const [uploadHint, setUploadHint] = useState("");
+ const inputRef = useRef(null);
+
+ const canUse = useMemo(() => Boolean(owner && repo), [owner, repo]);
+
+ async function loadAssets() {
+ if (!canUse) return;
+ setError("");
+ try {
+ const res = await fetch(`/api/repos/${owner}/${repo}/context/assets`);
+ if (!res.ok) throw new Error(`Failed to list assets (${res.status})`);
+ const data = await res.json();
+ setAssets(data.assets || []);
+ } catch (e) {
+ setError(e?.message || "Failed to load assets");
+ }
+ }
+
+ useEffect(() => {
+ loadAssets();
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [owner, repo]);
+
+ async function uploadFiles(fileList) {
+ if (!canUse) return;
+ const files = Array.from(fileList || []);
+ if (!files.length) return;
+
+ setBusy(true);
+ setError("");
+ setUploadHint(`Uploading ${files.length} file(s)...`);
+
+ try {
+ for (const f of files) {
+ const form = new FormData();
+ form.append("file", f);
+
+ const res = await fetch(
+ `/api/repos/${owner}/${repo}/context/assets/upload`,
+ { method: "POST", body: form }
+ );
+ if (!res.ok) {
+ const txt = await res.text().catch(() => "");
+ throw new Error(`Upload failed (${res.status}) ${txt}`);
+ }
+ }
+ setUploadHint("Upload complete. Refreshing list...");
+ await loadAssets();
+ setUploadHint("");
+ } catch (e) {
+ setError(e?.message || "Upload failed");
+ setUploadHint("");
+ } finally {
+ setBusy(false);
+ if (inputRef.current) inputRef.current.value = "";
+ }
+ }
+
+ async function deleteAsset(assetId) {
+ if (!canUse) return;
+ const ok = window.confirm("Delete this asset? This cannot be undone.");
+ if (!ok) return;
+
+ setBusy(true);
+ setError("");
+ try {
+ const res = await fetch(
+ `/api/repos/${owner}/${repo}/context/assets/${assetId}`,
+ { method: "DELETE" }
+ );
+ if (!res.ok) throw new Error(`Delete failed (${res.status})`);
+ await loadAssets();
+ } catch (e) {
+ setError(e?.message || "Delete failed");
+ } finally {
+ setBusy(false);
+ }
+ }
+
+ function downloadAsset(assetId) {
+ if (!canUse) return;
+ window.open(
+ `/api/repos/${owner}/${repo}/context/assets/${assetId}/download`,
+ "_blank"
+ );
+ }
+
+ const empty = !assets || assets.length === 0;
+
+ return (
+
+
+
+
Project Context
+
+ Upload documents, transcripts, screenshots, etc. (non-destructive,
+ additive).
+
+
+
+
+ uploadFiles(e.target.files)}
+ style={styles.fileInput}
+ />
+ inputRef.current?.click()}
+ >
+ Upload
+
+
+ Refresh
+
+
+
+
+
{
+ e.preventDefault();
+ e.stopPropagation();
+ }}
+ onDrop={(e) => {
+ e.preventDefault();
+ e.stopPropagation();
+ if (busy) return;
+ uploadFiles(e.dataTransfer.files);
+ }}
+ >
+
+ Drag & drop files here, or click Upload .
+
+
+ Tip: For audio/video, upload a transcript file too.
+
+
+
+ {uploadHint ?
{uploadHint}
: null}
+ {error ?
{error}
: null}
+
+
+
+
File
+
Type
+
Size
+
Indexed
+
Actions
+
+
+ {empty ? (
+
+ No context assets yet. Upload docs, transcripts, and screenshots to
+ improve planning quality.
+
+ ) : (
+ assets.map((a) => (
+
+
+
{a.filename}
+
+ Added: {a.created_at || "-"} | Extracted:{" "}
+ {Number(a.extracted_chars || 0).toLocaleString()} chars
+
+
+
+
+ {a.mime || "unknown"}
+
+
+
+ {formatBytes(a.size_bytes || 0)}
+
+
+
+ {a.indexed_chunks || 0} chunks
+
+
+
+ downloadAsset(a.asset_id)}
+ >
+ Download
+
+ deleteAsset(a.asset_id)}
+ >
+ Delete
+
+
+
+ ))
+ )}
+
+
+ );
+}
+
+function formatBytes(bytes) {
+ const b = Number(bytes || 0);
+ if (!b) return "0 B";
+ const units = ["B", "KB", "MB", "GB", "TB"];
+ let i = 0;
+ let v = b;
+ while (v >= 1024 && i < units.length - 1) {
+ v /= 1024;
+ i += 1;
+ }
+ return `${v.toFixed(v >= 10 || i === 0 ? 0 : 1)} ${units[i]}`;
+}
+
+const styles = {
+ wrap: { display: "flex", flexDirection: "column", gap: 12 },
+ topRow: {
+ display: "flex",
+ justifyContent: "space-between",
+ gap: 12,
+ alignItems: "flex-start",
+ flexWrap: "wrap",
+ },
+ left: { minWidth: 280 },
+ right: { display: "flex", gap: 8, alignItems: "center", flexWrap: "wrap" },
+ h1: { fontSize: 14, fontWeight: 800, color: "#fff" },
+ h2: { fontSize: 12, color: "rgba(255,255,255,0.65)", marginTop: 4 },
+ fileInput: { display: "none" },
+ btn: {
+ background: "rgba(255,255,255,0.10)",
+ border: "1px solid rgba(255,255,255,0.18)",
+ color: "#fff",
+ borderRadius: 10,
+ padding: "8px 10px",
+ cursor: "pointer",
+ fontSize: 13,
+ },
+ dropzone: {
+ border: "1px dashed rgba(255,255,255,0.22)",
+ borderRadius: 12,
+ padding: 16,
+ background: "rgba(255,255,255,0.03)",
+ },
+ dropText: { color: "rgba(255,255,255,0.85)", fontSize: 13 },
+ dropSub: { color: "rgba(255,255,255,0.55)", fontSize: 12, marginTop: 6 },
+ hint: {
+ color: "rgba(255,255,255,0.75)",
+ fontSize: 12,
+ padding: "8px 10px",
+ border: "1px solid rgba(255,255,255,0.12)",
+ borderRadius: 10,
+ background: "rgba(255,255,255,0.03)",
+ },
+ error: {
+ color: "#ffb3b3",
+ fontSize: 12,
+ padding: "8px 10px",
+ border: "1px solid rgba(255,120,120,0.25)",
+ borderRadius: 10,
+ background: "rgba(255,80,80,0.08)",
+ },
+ tableWrap: {
+ border: "1px solid rgba(255,255,255,0.12)",
+ borderRadius: 12,
+ overflow: "hidden",
+ },
+ tableHeader: {
+ display: "grid",
+ gridTemplateColumns: "1.6fr 1fr 0.6fr 0.6fr 0.8fr",
+ gap: 0,
+ padding: "10px 12px",
+ background: "rgba(255,255,255,0.03)",
+ borderBottom: "1px solid rgba(255,255,255,0.10)",
+ fontSize: 12,
+ color: "rgba(255,255,255,0.65)",
+ },
+ row: {
+ display: "grid",
+ gridTemplateColumns: "1.6fr 1fr 0.6fr 0.6fr 0.8fr",
+ padding: "10px 12px",
+ borderBottom: "1px solid rgba(255,255,255,0.08)",
+ alignItems: "center",
+ },
+ col: { minWidth: 0 },
+ colName: {},
+ colMeta: { color: "rgba(255,255,255,0.75)", fontSize: 12 },
+ colActions: { display: "flex", gap: 8, justifyContent: "flex-end" },
+ fileName: {
+ color: "#fff",
+ fontSize: 13,
+ fontWeight: 700,
+ overflow: "hidden",
+ textOverflow: "ellipsis",
+ whiteSpace: "nowrap",
+ },
+ small: {
+ color: "rgba(255,255,255,0.55)",
+ fontSize: 11,
+ marginTop: 4,
+ overflow: "hidden",
+ textOverflow: "ellipsis",
+ whiteSpace: "nowrap",
+ },
+ badge: {
+ display: "inline-flex",
+ alignItems: "center",
+ padding: "2px 8px",
+ borderRadius: 999,
+ border: "1px solid rgba(255,255,255,0.16)",
+ background: "rgba(255,255,255,0.04)",
+ fontSize: 11,
+ color: "rgba(255,255,255,0.80)",
+ maxWidth: "100%",
+ overflow: "hidden",
+ textOverflow: "ellipsis",
+ whiteSpace: "nowrap",
+ },
+ smallBtn: {
+ background: "rgba(255,255,255,0.08)",
+ border: "1px solid rgba(255,255,255,0.16)",
+ color: "#fff",
+ borderRadius: 10,
+ padding: "6px 8px",
+ cursor: "pointer",
+ fontSize: 12,
+ },
+ dangerBtn: {
+ border: "1px solid rgba(255,90,90,0.35)",
+ background: "rgba(255,90,90,0.10)",
+ },
+ empty: {
+ padding: 14,
+ color: "rgba(255,255,255,0.65)",
+ fontSize: 13,
+ },
+};
diff --git a/frontend/components/ProjectSettings/ConventionsTab.jsx b/frontend/components/ProjectSettings/ConventionsTab.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..d508ccf1a65817f1b5a44dbebcdfc1861c42b8b6
--- /dev/null
+++ b/frontend/components/ProjectSettings/ConventionsTab.jsx
@@ -0,0 +1,151 @@
+import React, { useEffect, useMemo, useState } from "react";
+
+export default function ConventionsTab({ owner, repo }) {
+ const [content, setContent] = useState("");
+ const [busy, setBusy] = useState(false);
+ const [error, setError] = useState("");
+
+ const canUse = useMemo(() => Boolean(owner && repo), [owner, repo]);
+
+ async function load() {
+ if (!canUse) return;
+ setError("");
+ setBusy(true);
+ try {
+ const res = await fetch(`/api/repos/${owner}/${repo}/context`);
+ if (!res.ok) throw new Error(`Failed to load conventions (${res.status})`);
+ const data = await res.json();
+ // backend may return { context: "..."} or { conventions: "..."} depending on implementation
+ setContent(data.context || data.conventions || data.memory || data.text || "");
+ } catch (e) {
+ setError(e?.message || "Failed to load conventions");
+ } finally {
+ setBusy(false);
+ }
+ }
+
+ async function initialize() {
+ if (!canUse) return;
+ setError("");
+ setBusy(true);
+ try {
+ const res = await fetch(`/api/repos/${owner}/${repo}/context/init`, {
+ method: "POST",
+ });
+ if (!res.ok) {
+ const txt = await res.text().catch(() => "");
+ throw new Error(`Init failed (${res.status}) ${txt}`);
+ }
+ await load();
+ } catch (e) {
+ setError(e?.message || "Init failed");
+ } finally {
+ setBusy(false);
+ }
+ }
+
+ useEffect(() => {
+ load();
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [owner, repo]);
+
+ return (
+
+
+
+
Project Conventions
+
+ This is the project memory/conventions file used by GitPilot.
+
+
+
+
+ Refresh
+
+
+ Initialize
+
+
+
+
+ {error ?
{error}
: null}
+
+
+ {content ? (
+
{content}
+ ) : (
+
+ No conventions found yet. Click Initialize to create default
+ project memory if supported.
+
+ )}
+
+
+
+ Editing conventions is intentionally not included here to keep this
+ feature additive/non-destructive. You can extend this later with an
+ explicit "Edit" mode.
+
+
+ );
+}
+
+const styles = {
+ wrap: { display: "flex", flexDirection: "column", gap: 12 },
+ topRow: {
+ display: "flex",
+ justifyContent: "space-between",
+ gap: 12,
+ alignItems: "flex-start",
+ flexWrap: "wrap",
+ },
+ actions: { display: "flex", gap: 8, flexWrap: "wrap" },
+ h1: { fontSize: 14, fontWeight: 800, color: "#fff" },
+ h2: { fontSize: 12, color: "rgba(255,255,255,0.65)", marginTop: 4 },
+ btn: {
+ background: "rgba(255,255,255,0.10)",
+ border: "1px solid rgba(255,255,255,0.18)",
+ color: "#fff",
+ borderRadius: 10,
+ padding: "8px 10px",
+ cursor: "pointer",
+ fontSize: 13,
+ },
+ error: {
+ color: "#ffb3b3",
+ fontSize: 12,
+ padding: "8px 10px",
+ border: "1px solid rgba(255,120,120,0.25)",
+ borderRadius: 10,
+ background: "rgba(255,80,80,0.08)",
+ },
+ box: {
+ border: "1px solid rgba(255,255,255,0.12)",
+ borderRadius: 12,
+ overflow: "hidden",
+ background: "rgba(0,0,0,0.22)",
+ },
+ pre: {
+ margin: 0,
+ padding: 12,
+ color: "rgba(255,255,255,0.85)",
+ fontSize: 12,
+ lineHeight: 1.35,
+ whiteSpace: "pre-wrap",
+ overflow: "auto",
+ maxHeight: 520,
+ },
+ empty: {
+ padding: 12,
+ color: "rgba(255,255,255,0.65)",
+ fontSize: 13,
+ },
+ note: {
+ color: "rgba(255,255,255,0.55)",
+ fontSize: 12,
+ },
+};
diff --git a/frontend/components/ProjectSettings/UseCaseTab.jsx b/frontend/components/ProjectSettings/UseCaseTab.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..b01e9a46d3ac15c42d16decf69b93e0cf7192db2
--- /dev/null
+++ b/frontend/components/ProjectSettings/UseCaseTab.jsx
@@ -0,0 +1,637 @@
+import React, { useEffect, useMemo, useRef, useState } from "react";
+
+export default function UseCaseTab({ owner, repo }) {
+ const [useCases, setUseCases] = useState([]);
+ const [selectedId, setSelectedId] = useState("");
+ const [useCase, setUseCase] = useState(null);
+ const [busy, setBusy] = useState(false);
+ const [error, setError] = useState("");
+ const [draftTitle, setDraftTitle] = useState("New Use Case");
+ const [message, setMessage] = useState("");
+ const messagesEndRef = useRef(null);
+
+ const canUse = useMemo(() => Boolean(owner && repo), [owner, repo]);
+ const spec = useCase?.spec || {};
+
+ function scrollToBottom() {
+ requestAnimationFrame(() => {
+ messagesEndRef.current?.scrollIntoView({ behavior: "smooth" });
+ });
+ }
+
+ async function loadUseCases() {
+ if (!canUse) return;
+ setError("");
+ try {
+ const res = await fetch(`/api/repos/${owner}/${repo}/use-cases`);
+ if (!res.ok) throw new Error(`Failed to list use cases (${res.status})`);
+ const data = await res.json();
+ const list = data.use_cases || [];
+ setUseCases(list);
+
+ // auto select active or first
+ const active = list.find((x) => x.is_active);
+ const nextId = active?.use_case_id || list[0]?.use_case_id || "";
+ if (!selectedId && nextId) setSelectedId(nextId);
+ } catch (e) {
+ setError(e?.message || "Failed to load use cases");
+ }
+ }
+
+ async function loadUseCase(id) {
+ if (!canUse || !id) return;
+ setError("");
+ try {
+ const res = await fetch(`/api/repos/${owner}/${repo}/use-cases/${id}`);
+ if (!res.ok) throw new Error(`Failed to load use case (${res.status})`);
+ const data = await res.json();
+ setUseCase(data.use_case || null);
+ scrollToBottom();
+ } catch (e) {
+ setError(e?.message || "Failed to load use case");
+ }
+ }
+
+ useEffect(() => {
+ loadUseCases();
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [owner, repo]);
+
+ useEffect(() => {
+ if (!selectedId) return;
+ loadUseCase(selectedId);
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [selectedId]);
+
+ async function createUseCase() {
+ if (!canUse) return;
+ setBusy(true);
+ setError("");
+ try {
+ const res = await fetch(`/api/repos/${owner}/${repo}/use-cases`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ title: draftTitle || "New Use Case" }),
+ });
+ if (!res.ok) {
+ const txt = await res.text().catch(() => "");
+ throw new Error(`Create failed (${res.status}) ${txt}`);
+ }
+ const data = await res.json();
+ const id = data?.use_case?.use_case_id;
+ await loadUseCases();
+ if (id) setSelectedId(id);
+ setDraftTitle("New Use Case");
+ } catch (e) {
+ setError(e?.message || "Create failed");
+ } finally {
+ setBusy(false);
+ }
+ }
+
+ async function sendMessage() {
+ if (!canUse || !selectedId) return;
+ const msg = (message || "").trim();
+ if (!msg) return;
+
+ setBusy(true);
+ setError("");
+
+ // optimistic UI: append user message immediately
+ setUseCase((prev) => {
+ if (!prev) return prev;
+ const next = { ...prev };
+ next.messages = Array.isArray(next.messages) ? [...next.messages] : [];
+ next.messages.push({ role: "user", content: msg, ts: new Date().toISOString() });
+ return next;
+ });
+ setMessage("");
+ scrollToBottom();
+
+ try {
+ const res = await fetch(
+ `/api/repos/${owner}/${repo}/use-cases/${selectedId}/chat`,
+ {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ message: msg }),
+ }
+ );
+ if (!res.ok) {
+ const txt = await res.text().catch(() => "");
+ throw new Error(`Chat failed (${res.status}) ${txt}`);
+ }
+ const data = await res.json();
+ setUseCase(data.use_case || null);
+ await loadUseCases();
+ scrollToBottom();
+ } catch (e) {
+ setError(e?.message || "Chat failed");
+ } finally {
+ setBusy(false);
+ }
+ }
+
+ async function finalizeUseCase() {
+ if (!canUse || !selectedId) return;
+ setBusy(true);
+ setError("");
+ try {
+ const res = await fetch(
+ `/api/repos/${owner}/${repo}/use-cases/${selectedId}/finalize`,
+ { method: "POST" }
+ );
+ if (!res.ok) {
+ const txt = await res.text().catch(() => "");
+ throw new Error(`Finalize failed (${res.status}) ${txt}`);
+ }
+ const data = await res.json();
+ setUseCase(data.use_case || null);
+ await loadUseCases();
+ alert(
+ "Use Case finalized and marked active.\n\nA Markdown export was saved in the repo workspace .gitpilot/context/use_cases/."
+ );
+ } catch (e) {
+ setError(e?.message || "Finalize failed");
+ } finally {
+ setBusy(false);
+ }
+ }
+
+ const activeId = useCases.find((x) => x.is_active)?.use_case_id;
+
+ return (
+
+
+
+
Use Case
+
+ Guided chat to clarify requirements and produce a versioned spec.
+
+
+
+
+ setDraftTitle(e.target.value)}
+ placeholder="New use case title..."
+ style={styles.titleInput}
+ disabled={!canUse || busy}
+ />
+
+ New
+
+
+ Finalize
+
+
+ Refresh
+
+
+
+
+ {error ?
{error}
: null}
+
+
+
+
Use Cases
+
+ {useCases.length === 0 ? (
+
+ No use cases yet. Create one with New .
+
+ ) : (
+ useCases.map((uc) => (
+
setSelectedId(uc.use_case_id)}
+ >
+
+
+ {uc.title || "(untitled)"}
+
+ {uc.use_case_id === activeId ? (
+
ACTIVE
+ ) : null}
+
+
+ Updated: {uc.updated_at || uc.created_at || "-"}
+
+
+ ))
+ )}
+
+
+
+
+
Guided Chat
+
+ {Array.isArray(useCase?.messages) && useCase.messages.length ? (
+ useCase.messages.map((m, idx) => (
+
+
+ {m.role === "user" ? "You" : "Assistant"}
+
+
{m.content}
+
+ ))
+ ) : (
+
+ Select a use case and start chatting. You can paste structured
+ info like:
+
+{`Summary: ...
+Problem: ...
+Users: ...
+Requirements:
+- ...
+Acceptance Criteria:
+- ...`}
+
+
+ )}
+
+
+
+
+
+
+
+
+
Spec Preview
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Finalize will save a Markdown spec and mark it ACTIVE for context.
+
+
+ Finalize Spec
+
+
+
+
+
+ );
+}
+
+function Section({ title, value }) {
+ return (
+
+
{title}
+
+ {String(value || "").trim() ? (
+
{value}
+ ) : (
+
(empty)
+ )}
+
+
+ );
+}
+
+function ListSection({ title, items }) {
+ const list = Array.isArray(items) ? items : [];
+ return (
+
+
{title}
+
+ {list.length ? (
+
+ {list.map((x, i) => (
+
+ {x}
+
+ ))}
+
+ ) : (
+
(empty)
+ )}
+
+
+ );
+}
+
+const styles = {
+ wrap: { display: "flex", flexDirection: "column", gap: 12 },
+ topRow: {
+ display: "flex",
+ justifyContent: "space-between",
+ gap: 12,
+ alignItems: "flex-start",
+ flexWrap: "wrap",
+ },
+ left: { minWidth: 280 },
+ right: { display: "flex", gap: 8, alignItems: "center", flexWrap: "wrap" },
+ h1: { fontSize: 14, fontWeight: 800, color: "#fff" },
+ h2: { fontSize: 12, color: "rgba(255,255,255,0.65)", marginTop: 4 },
+ titleInput: {
+ width: 260,
+ maxWidth: "70vw",
+ padding: "8px 10px",
+ borderRadius: 10,
+ border: "1px solid rgba(255,255,255,0.18)",
+ background: "rgba(0,0,0,0.25)",
+ color: "#fff",
+ fontSize: 13,
+ outline: "none",
+ },
+ btn: {
+ background: "rgba(255,255,255,0.10)",
+ border: "1px solid rgba(255,255,255,0.18)",
+ color: "#fff",
+ borderRadius: 10,
+ padding: "8px 10px",
+ cursor: "pointer",
+ fontSize: 13,
+ },
+ primaryBtn: {
+ background: "rgba(255,255,255,0.12)",
+ border: "1px solid rgba(255,255,255,0.22)",
+ color: "#fff",
+ borderRadius: 10,
+ padding: "8px 12px",
+ cursor: "pointer",
+ fontSize: 13,
+ fontWeight: 700,
+ },
+ error: {
+ color: "#ffb3b3",
+ fontSize: 12,
+ padding: "8px 10px",
+ border: "1px solid rgba(255,120,120,0.25)",
+ borderRadius: 10,
+ background: "rgba(255,80,80,0.08)",
+ },
+ grid: {
+ display: "grid",
+ gridTemplateColumns: "300px 1.2fr 0.9fr",
+ gap: 12,
+ alignItems: "stretch",
+ },
+ sidebar: {
+ border: "1px solid rgba(255,255,255,0.12)",
+ borderRadius: 12,
+ overflow: "hidden",
+ background: "rgba(255,255,255,0.02)",
+ display: "flex",
+ flexDirection: "column",
+ minHeight: 520,
+ },
+ sidebarTitle: {
+ padding: 10,
+ borderBottom: "1px solid rgba(255,255,255,0.10)",
+ fontSize: 12,
+ fontWeight: 800,
+ color: "rgba(255,255,255,0.85)",
+ },
+ sidebarList: {
+ padding: 8,
+ display: "flex",
+ flexDirection: "column",
+ gap: 8,
+ overflow: "auto",
+ },
+ sidebarEmpty: {
+ color: "rgba(255,255,255,0.65)",
+ fontSize: 12,
+ padding: 8,
+ },
+ ucItem: {
+ textAlign: "left",
+ background: "rgba(0,0,0,0.25)",
+ border: "1px solid rgba(255,255,255,0.12)",
+ color: "#fff",
+ borderRadius: 12,
+ padding: 10,
+ cursor: "pointer",
+ },
+ ucItemActive: {
+ border: "1px solid rgba(255,255,255,0.25)",
+ background: "rgba(255,255,255,0.06)",
+ },
+ ucTitleRow: { display: "flex", alignItems: "center", gap: 8 },
+ ucTitle: {
+ fontSize: 13,
+ fontWeight: 800,
+ overflow: "hidden",
+ textOverflow: "ellipsis",
+ whiteSpace: "nowrap",
+ flex: 1,
+ },
+ activePill: {
+ fontSize: 10,
+ fontWeight: 800,
+ padding: "2px 8px",
+ borderRadius: 999,
+ border: "1px solid rgba(120,255,180,0.30)",
+ background: "rgba(120,255,180,0.10)",
+ color: "rgba(200,255,220,0.95)",
+ },
+ ucMeta: {
+ marginTop: 6,
+ fontSize: 11,
+ color: "rgba(255,255,255,0.60)",
+ },
+ chatCol: {
+ border: "1px solid rgba(255,255,255,0.12)",
+ borderRadius: 12,
+ overflow: "hidden",
+ display: "flex",
+ flexDirection: "column",
+ background: "rgba(255,255,255,0.02)",
+ minHeight: 520,
+ },
+ specCol: {
+ border: "1px solid rgba(255,255,255,0.12)",
+ borderRadius: 12,
+ overflow: "hidden",
+ display: "flex",
+ flexDirection: "column",
+ background: "rgba(255,255,255,0.02)",
+ minHeight: 520,
+ },
+ panelTitle: {
+ padding: 10,
+ borderBottom: "1px solid rgba(255,255,255,0.10)",
+ fontSize: 12,
+ fontWeight: 800,
+ color: "rgba(255,255,255,0.85)",
+ },
+ chatBox: {
+ flex: 1,
+ overflow: "auto",
+ padding: 10,
+ display: "flex",
+ flexDirection: "column",
+ gap: 10,
+ },
+ chatEmpty: {
+ color: "rgba(255,255,255,0.65)",
+ fontSize: 12,
+ padding: 6,
+ },
+ pre: {
+ marginTop: 10,
+ padding: 10,
+ borderRadius: 10,
+ border: "1px solid rgba(255,255,255,0.12)",
+ background: "rgba(0,0,0,0.25)",
+ color: "rgba(255,255,255,0.8)",
+ overflow: "auto",
+ fontSize: 11,
+ },
+ msg: {
+ border: "1px solid rgba(255,255,255,0.12)",
+ borderRadius: 12,
+ padding: 10,
+ background: "rgba(0,0,0,0.25)",
+ },
+ msgUser: {
+ border: "1px solid rgba(255,255,255,0.18)",
+ background: "rgba(255,255,255,0.04)",
+ },
+ msgAsst: {},
+ msgRole: {
+ fontSize: 11,
+ fontWeight: 800,
+ color: "rgba(255,255,255,0.70)",
+ marginBottom: 6,
+ },
+ msgContent: {
+ whiteSpace: "pre-wrap",
+ fontSize: 13,
+ color: "rgba(255,255,255,0.90)",
+ lineHeight: 1.35,
+ },
+ composer: {
+ borderTop: "1px solid rgba(255,255,255,0.10)",
+ padding: 10,
+ display: "flex",
+ gap: 10,
+ alignItems: "flex-end",
+ },
+ textarea: {
+ flex: 1,
+ minHeight: 52,
+ maxHeight: 120,
+ resize: "vertical",
+ padding: 10,
+ borderRadius: 12,
+ border: "1px solid rgba(255,255,255,0.18)",
+ background: "rgba(0,0,0,0.25)",
+ color: "#fff",
+ fontSize: 13,
+ outline: "none",
+ },
+ sendBtn: {
+ background: "rgba(255,255,255,0.12)",
+ border: "1px solid rgba(255,255,255,0.22)",
+ color: "#fff",
+ borderRadius: 12,
+ padding: "10px 12px",
+ cursor: "pointer",
+ fontSize: 13,
+ fontWeight: 800,
+ },
+ specBox: {
+ flex: 1,
+ overflow: "auto",
+ padding: 10,
+ display: "flex",
+ flexDirection: "column",
+ gap: 10,
+ },
+ specFooter: {
+ borderTop: "1px solid rgba(255,255,255,0.10)",
+ padding: 10,
+ display: "flex",
+ gap: 10,
+ alignItems: "center",
+ justifyContent: "space-between",
+ },
+ specHint: { fontSize: 12, color: "rgba(255,255,255,0.60)" },
+ section: {
+ border: "1px solid rgba(255,255,255,0.10)",
+ borderRadius: 12,
+ background: "rgba(0,0,0,0.22)",
+ overflow: "hidden",
+ },
+ sectionTitle: {
+ padding: "8px 10px",
+ borderBottom: "1px solid rgba(255,255,255,0.08)",
+ fontSize: 12,
+ fontWeight: 800,
+ color: "rgba(255,255,255,0.80)",
+ background: "rgba(255,255,255,0.02)",
+ },
+ sectionBody: { padding: "8px 10px" },
+ sectionText: {
+ whiteSpace: "pre-wrap",
+ fontSize: 12,
+ color: "rgba(255,255,255,0.90)",
+ lineHeight: 1.35,
+ },
+ sectionEmpty: { fontSize: 12, color: "rgba(255,255,255,0.45)" },
+ ul: { margin: 0, paddingLeft: 18 },
+ li: { color: "rgba(255,255,255,0.90)", fontSize: 12, lineHeight: 1.35 },
+};
diff --git a/frontend/components/ProjectSettingsModal.jsx b/frontend/components/ProjectSettingsModal.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..ff6cc365af5a1c1226d544b425ffa7bbc88698b4
--- /dev/null
+++ b/frontend/components/ProjectSettingsModal.jsx
@@ -0,0 +1,230 @@
+import React, { useEffect, useMemo, useState } from "react";
+import ContextTab from "./ProjectSettings/ContextTab.jsx";
+import UseCaseTab from "./ProjectSettings/UseCaseTab.jsx";
+import ConventionsTab from "./ProjectSettings/ConventionsTab.jsx";
+import EnvironmentSelector from "./EnvironmentSelector.jsx";
+
+export default function ProjectSettingsModal({
+ owner,
+ repo,
+ isOpen,
+ onClose,
+ activeEnvId,
+ onEnvChange,
+}) {
+ const [activeTab, setActiveTab] = useState("context");
+
+ useEffect(() => {
+ if (!isOpen) return;
+ // reset to Context each time opened (safe default)
+ setActiveTab("context");
+ }, [isOpen]);
+
+ const title = useMemo(() => {
+ const repoLabel = owner && repo ? `${owner}/${repo}` : "Project";
+ return `Project Settings — ${repoLabel}`;
+ }, [owner, repo]);
+
+ if (!isOpen) return null;
+
+ return (
+ {
+ // click outside closes
+ if (e.target === e.currentTarget) onClose?.();
+ }}
+ >
+
e.stopPropagation()}>
+
+
+
{title}
+
+ Manage context, use cases, and project conventions (additive only).
+
+
+
+ ✕
+
+
+
+
+ setActiveTab("context")}
+ />
+ setActiveTab("usecase")}
+ />
+ setActiveTab("conventions")}
+ />
+ setActiveTab("environment")}
+ />
+
+
+
+ {activeTab === "context" &&
}
+ {activeTab === "usecase" &&
}
+ {activeTab === "conventions" && (
+
+ )}
+ {activeTab === "environment" && (
+
+
+ Select and configure the execution environment for agent operations.
+
+
+
+ )}
+
+
+
+
+ Tip: Upload meeting notes/transcripts in Context, then finalize a Use
+ Case spec.
+
+
+ Done
+
+
+
+
+ );
+}
+
+function TabButton({ label, isActive, onClick }) {
+ return (
+
+ {label}
+
+ );
+}
+
+const styles = {
+ backdrop: {
+ position: "fixed",
+ inset: 0,
+ background: "rgba(0,0,0,0.45)",
+ display: "flex",
+ justifyContent: "center",
+ alignItems: "center",
+ zIndex: 9999,
+ padding: 16,
+ },
+ modal: {
+ width: "min(1100px, 96vw)",
+ height: "min(760px, 90vh)",
+ background: "#111",
+ border: "1px solid rgba(255,255,255,0.12)",
+ borderRadius: 12,
+ overflow: "hidden",
+ display: "flex",
+ flexDirection: "column",
+ boxShadow: "0 12px 40px rgba(0,0,0,0.35)",
+ },
+ header: {
+ padding: "14px 14px 10px",
+ display: "flex",
+ gap: 12,
+ alignItems: "flex-start",
+ justifyContent: "space-between",
+ borderBottom: "1px solid rgba(255,255,255,0.10)",
+ background: "linear-gradient(180deg, rgba(255,255,255,0.04), transparent)",
+ },
+ headerLeft: {
+ display: "flex",
+ flexDirection: "column",
+ gap: 4,
+ minWidth: 0,
+ },
+ title: {
+ fontSize: 16,
+ fontWeight: 700,
+ color: "#fff",
+ lineHeight: 1.2,
+ whiteSpace: "nowrap",
+ overflow: "hidden",
+ textOverflow: "ellipsis",
+ maxWidth: "88vw",
+ },
+ subtitle: {
+ fontSize: 12,
+ color: "rgba(255,255,255,0.65)",
+ },
+ closeBtn: {
+ background: "transparent",
+ border: "1px solid rgba(255,255,255,0.18)",
+ color: "rgba(255,255,255,0.85)",
+ borderRadius: 10,
+ padding: "6px 10px",
+ cursor: "pointer",
+ },
+ tabsRow: {
+ display: "flex",
+ gap: 8,
+ padding: 10,
+ borderBottom: "1px solid rgba(255,255,255,0.10)",
+ background: "rgba(255,255,255,0.02)",
+ },
+ tabBtn: {
+ background: "transparent",
+ border: "1px solid rgba(255,255,255,0.14)",
+ color: "rgba(255,255,255,0.75)",
+ borderRadius: 999,
+ padding: "8px 12px",
+ cursor: "pointer",
+ fontSize: 13,
+ },
+ tabBtnActive: {
+ border: "1px solid rgba(255,255,255,0.28)",
+ color: "#fff",
+ background: "rgba(255,255,255,0.06)",
+ },
+ body: {
+ flex: 1,
+ overflow: "auto",
+ padding: 12,
+ },
+ footer: {
+ padding: 12,
+ borderTop: "1px solid rgba(255,255,255,0.10)",
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "space-between",
+ gap: 12,
+ background: "rgba(255,255,255,0.02)",
+ },
+ footerHint: {
+ color: "rgba(255,255,255,0.6)",
+ fontSize: 12,
+ overflow: "hidden",
+ textOverflow: "ellipsis",
+ whiteSpace: "nowrap",
+ },
+ primaryBtn: {
+ background: "rgba(255,255,255,0.10)",
+ border: "1px solid rgba(255,255,255,0.20)",
+ color: "#fff",
+ borderRadius: 10,
+ padding: "8px 12px",
+ cursor: "pointer",
+ },
+};
diff --git a/frontend/components/RepoSelector.jsx b/frontend/components/RepoSelector.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..dd601f4f7cb9b8a19a7858280d5886607a33271c
--- /dev/null
+++ b/frontend/components/RepoSelector.jsx
@@ -0,0 +1,269 @@
+import React, { useEffect, useState, useCallback } from "react";
+import { authFetch } from "../utils/api.js";
+
+export default function RepoSelector({ onSelect }) {
+ const [query, setQuery] = useState("");
+ const [repos, setRepos] = useState([]);
+ const [loading, setLoading] = useState(false);
+ const [loadingMore, setLoadingMore] = useState(false);
+ const [status, setStatus] = useState("");
+ const [page, setPage] = useState(1);
+ const [hasMore, setHasMore] = useState(false);
+ const [totalCount, setTotalCount] = useState(null);
+
+ /**
+ * Fetch repositories with pagination and optional search
+ * @param {number} pageNum - Page number to fetch
+ * @param {boolean} append - Whether to append or replace results
+ * @param {string} searchQuery - Search query (uses current query if not provided)
+ */
+ const fetchRepos = useCallback(async (pageNum = 1, append = false, searchQuery = query) => {
+ // Set appropriate loading state
+ if (pageNum === 1) {
+ setLoading(true);
+ setStatus("");
+ } else {
+ setLoadingMore(true);
+ }
+
+ try {
+ // Build URL with query parameters
+ const params = new URLSearchParams();
+ params.append("page", pageNum);
+ params.append("per_page", "100");
+ if (searchQuery) {
+ params.append("query", searchQuery);
+ }
+
+ const url = `/api/repos?${params.toString()}`;
+ const res = await authFetch(url);
+ const data = await res.json();
+
+ if (!res.ok) {
+ throw new Error(data.detail || data.error || "Failed to load repositories");
+ }
+
+ // Update repositories - append or replace
+ if (append) {
+ setRepos((prev) => [...prev, ...data.repositories]);
+ } else {
+ setRepos(data.repositories);
+ }
+
+ // Update pagination state
+ setPage(pageNum);
+ setHasMore(data.has_more);
+ setTotalCount(data.total_count);
+
+ // Show status if no results
+ if (!append && data.repositories.length === 0) {
+ if (searchQuery) {
+ setStatus(`No repositories matching "${searchQuery}"`);
+ } else {
+ setStatus("No repositories found");
+ }
+ } else {
+ setStatus("");
+ }
+ } catch (err) {
+ console.error("Error fetching repositories:", err);
+ setStatus(err.message || "Failed to load repositories");
+ } finally {
+ setLoading(false);
+ setLoadingMore(false);
+ }
+ }, [query]);
+
+ /**
+ * Load more repositories (next page)
+ */
+ const loadMore = () => {
+ fetchRepos(page + 1, true);
+ };
+
+ /**
+ * Handle search - resets to page 1
+ */
+ const handleSearch = () => {
+ setPage(1);
+ fetchRepos(1, false, query);
+ };
+
+ /**
+ * Handle input change - trigger search on Enter key
+ */
+ const handleKeyDown = (e) => {
+ if (e.key === "Enter") {
+ handleSearch();
+ }
+ };
+
+ /**
+ * Clear search and show all repos
+ */
+ const clearSearch = () => {
+ setQuery("");
+ setPage(1);
+ fetchRepos(1, false, "");
+ };
+
+ // Initial load on mount
+ useEffect(() => {
+ fetchRepos(1, false, "");
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, []);
+
+ /**
+ * Format repository count for display
+ */
+ const getCountText = () => {
+ if (totalCount !== null) {
+ // Search mode - show filtered count
+ return `${repos.length} of ${totalCount} repositories`;
+ } else {
+ // Pagination mode - show loaded count
+ return `${repos.length} ${repos.length === 1 ? "repository" : "repositories"}${hasMore ? "+" : ""}`;
+ }
+ };
+
+ return (
+
+
+ GitHub repos are optional. Use Folder or Local Git mode for local-first workflows.
+
+ {/* Search Header */}
+
+
+ setQuery(e.target.value)}
+ onKeyDown={handleKeyDown}
+ disabled={loading}
+ />
+
+ {loading ? "..." : "Search"}
+
+
+
+ {/* Search Info Bar */}
+ {(query || repos.length > 0) && (
+
+ {getCountText()}
+ {query && (
+
+ Clear search
+
+ )}
+
+ )}
+
+
+ {/* Status Message */}
+ {status && !loading && (
+
+ {status}
+
+ )}
+
+ {/* Repository List */}
+
+ {repos.map((r) => (
+
onSelect(r)}
+ >
+
+ {r.name}
+ {r.owner}
+
+ {r.private && (
+ Private
+ )}
+
+ ))}
+
+ {/* Loading Indicator */}
+ {loading && repos.length === 0 && (
+
+
+
Loading repositories...
+
+ )}
+
+ {/* Load More Button */}
+ {hasMore && !loading && repos.length > 0 && (
+
+ {loadingMore ? (
+ <>
+
+ Loading more...
+ >
+ ) : (
+ <>
+ Load more repositories
+ ({repos.length} loaded)
+ >
+ )}
+
+ )}
+
+ {/* All Loaded Message */}
+ {!hasMore && !loading && repos.length > 0 && (
+
+ ✓ All repositories loaded ({repos.length} total)
+
+ )}
+
+
+ {/* GitHub App Installation Notice */}
+
+
+
+
+
+
+
+ Repository missing?
+
+
+
+
+
+ );
+}
\ No newline at end of file
diff --git a/frontend/components/SessionItem.jsx b/frontend/components/SessionItem.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..acf2ce3fc670698f9198362a5c81d9d871e99027
--- /dev/null
+++ b/frontend/components/SessionItem.jsx
@@ -0,0 +1,183 @@
+import React, { useState } from "react";
+
+/**
+ * SessionItem — a single row in the sessions sidebar.
+ *
+ * Shows status dot (pulsing/static), title, timestamp, message count.
+ * Claude-Code-on-Web parity: active=amber pulse, completed=green,
+ * failed=red, waiting=blue.
+ */
+export default function SessionItem({ session, isActive, onSelect, onDelete }) {
+ const [hovering, setHovering] = useState(false);
+
+ const status = session.status || "active";
+
+ const dotColor = {
+ active: "#F59E0B",
+ completed: "#10B981",
+ failed: "#EF4444",
+ waiting: "#3B82F6",
+ paused: "#6B7280",
+ }[status] || "#6B7280";
+
+ const isPulsing = status === "active";
+
+ const timeAgo = formatTimeAgo(session.updated_at);
+
+ // Prefer name (set from first user prompt) over generic fallback
+ const title =
+ session.name ||
+ (session.branch ? `${session.branch}` : `Session ${session.id?.slice(0, 8)}`);
+
+ return (
+ setHovering(true)}
+ onMouseLeave={() => setHovering(false)}
+ >
+
+
+ {/* Status dot */}
+
+
+ {/* Content */}
+
+
{title}
+
+ {timeAgo}
+ {session.mode && (
+
+ {session.mode === "github" ? "GH" : session.mode === "local-git" ? "Git" : "Dir"}
+
+ )}
+ {session.message_count > 0 && (
+ {session.message_count} msgs
+ )}
+
+
+
+ {/* Delete button (on hover) */}
+ {hovering && (
+
{
+ e.stopPropagation();
+ onDelete?.();
+ }}
+ title="Delete session"
+ >
+ ×
+
+ )}
+
+ );
+}
+
+function formatTimeAgo(isoStr) {
+ if (!isoStr) return "";
+ try {
+ const date = new Date(isoStr);
+ const now = new Date();
+ const diffMs = now - date;
+ const diffMin = Math.floor(diffMs / 60000);
+ if (diffMin < 1) return "just now";
+ if (diffMin < 60) return `${diffMin}m ago`;
+ const diffHr = Math.floor(diffMin / 60);
+ if (diffHr < 24) return `${diffHr}h ago`;
+ const diffDay = Math.floor(diffHr / 24);
+ return `${diffDay}d ago`;
+ } catch {
+ return "";
+ }
+}
+
+const styles = {
+ row: {
+ display: "flex",
+ alignItems: "center",
+ gap: 8,
+ padding: "8px 10px",
+ borderRadius: 6,
+ cursor: "pointer",
+ transition: "background-color 0.15s",
+ position: "relative",
+ marginBottom: 2,
+ animation: "session-fade-in 0.25s ease-out",
+ },
+ dot: {
+ width: 8,
+ height: 8,
+ borderRadius: "50%",
+ flexShrink: 0,
+ },
+ content: {
+ flex: 1,
+ minWidth: 0,
+ overflow: "hidden",
+ },
+ title: {
+ fontSize: 12,
+ fontWeight: 500,
+ color: "#E4E4E7",
+ whiteSpace: "nowrap",
+ overflow: "hidden",
+ textOverflow: "ellipsis",
+ },
+ meta: {
+ fontSize: 10,
+ color: "#71717A",
+ marginTop: 2,
+ display: "flex",
+ alignItems: "center",
+ gap: 6,
+ },
+ badge: {
+ fontSize: 9,
+ background: "#27272A",
+ padding: "1px 5px",
+ borderRadius: 8,
+ color: "#A1A1AA",
+ },
+ deleteBtn: {
+ position: "absolute",
+ right: 6,
+ top: 6,
+ width: 18,
+ height: 18,
+ borderRadius: 3,
+ border: "none",
+ background: "rgba(239, 68, 68, 0.15)",
+ color: "#EF4444",
+ fontSize: 14,
+ cursor: "pointer",
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ lineHeight: 1,
+ },
+};
diff --git a/frontend/components/SessionSidebar.jsx b/frontend/components/SessionSidebar.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..fb63c7526850dba78ce9a5e37a4b0efac32d0e8a
--- /dev/null
+++ b/frontend/components/SessionSidebar.jsx
@@ -0,0 +1,181 @@
+import React, { useEffect, useRef, useState } from "react";
+import SessionItem from "./SessionItem.jsx";
+
+/**
+ * SessionSidebar — Claude-Code-on-Web parity.
+ *
+ * Shows a scrollable list of coding sessions with status indicators,
+ * timestamps, and a "New Session" button. Additive — does not modify
+ * any existing component.
+ */
+export default function SessionSidebar({
+ repo,
+ activeSessionId,
+ onSelectSession,
+ onNewSession,
+ onDeleteSession,
+ refreshNonce = 0,
+}) {
+ const [sessions, setSessions] = useState([]);
+ const [loading, setLoading] = useState(false);
+ const pollRef = useRef(null);
+
+ const repoFullName = repo?.full_name || (repo ? `${repo.owner}/${repo.name}` : null);
+
+ // Fetch sessions
+ useEffect(() => {
+ if (!repoFullName) {
+ setSessions([]);
+ return;
+ }
+
+ let cancelled = false;
+
+ const fetchSessions = async () => {
+ setLoading(true);
+ try {
+ const token = localStorage.getItem("github_token");
+ const headers = token ? { Authorization: `Bearer ${token}` } : {};
+ const res = await fetch(`/api/sessions`, { headers, cache: "no-cache" });
+ if (!res.ok) return;
+ const data = await res.json();
+ if (cancelled) return;
+
+ // Filter to current repo
+ const filtered = (data.sessions || []).filter(
+ (s) => s.repo === repoFullName
+ );
+ setSessions(filtered);
+ } catch (err) {
+ console.warn("Failed to fetch sessions:", err);
+ } finally {
+ if (!cancelled) setLoading(false);
+ }
+ };
+
+ fetchSessions();
+
+ // Poll every 15s for status updates
+ pollRef.current = setInterval(fetchSessions, 15000);
+
+ return () => {
+ cancelled = true;
+ if (pollRef.current) clearInterval(pollRef.current);
+ };
+ }, [repoFullName, refreshNonce]);
+
+ const handleDelete = async (sessionId) => {
+ try {
+ const token = localStorage.getItem("github_token");
+ const headers = token ? { Authorization: `Bearer ${token}` } : {};
+ await fetch(`/api/sessions/${sessionId}`, { method: "DELETE", headers });
+ setSessions((prev) => prev.filter((s) => s.id !== sessionId));
+ // Notify parent so it can clear the chat if this was the active session
+ onDeleteSession?.(sessionId);
+ } catch (err) {
+ console.warn("Failed to delete session:", err);
+ }
+ };
+
+ return (
+
+
+
+ {/* Header */}
+
+ SESSIONS
+
+ +
+
+
+
+ {/* Session list */}
+
+ {loading && sessions.length === 0 && (
+
Loading...
+ )}
+
+ {!loading && sessions.length === 0 && (
+
+ No sessions yet.
+
+
+ Your first message will create one automatically.
+
+
+ )}
+
+ {sessions.map((s) => (
+
onSelectSession?.(s)}
+ onDelete={() => handleDelete(s.id)}
+ />
+ ))}
+
+
+ );
+}
+
+const animStyles = `
+ @keyframes session-fade-in {
+ from { opacity: 0; transform: translateY(4px); }
+ to { opacity: 1; transform: translateY(0); }
+ }
+`;
+
+const styles = {
+ container: {
+ display: "flex",
+ flexDirection: "column",
+ borderTop: "1px solid #27272A",
+ flex: 1,
+ minHeight: 0,
+ },
+ header: {
+ display: "flex",
+ justifyContent: "space-between",
+ alignItems: "center",
+ padding: "10px 14px 6px",
+ },
+ label: {
+ fontSize: 10,
+ fontWeight: 700,
+ letterSpacing: "0.08em",
+ color: "#71717A",
+ textTransform: "uppercase",
+ },
+ newBtn: {
+ width: 22,
+ height: 22,
+ borderRadius: 4,
+ border: "1px dashed #3F3F46",
+ background: "transparent",
+ color: "#A1A1AA",
+ fontSize: 14,
+ cursor: "pointer",
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ lineHeight: 1,
+ },
+ list: {
+ flex: 1,
+ overflowY: "auto",
+ padding: "0 6px 8px",
+ },
+ empty: {
+ textAlign: "center",
+ color: "#52525B",
+ fontSize: 12,
+ padding: "20px 8px",
+ lineHeight: 1.5,
+ },
+};
diff --git a/frontend/components/SettingsModal.jsx b/frontend/components/SettingsModal.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..24d43b1dd25831da589d35e7a386b0a7a1aa37a4
--- /dev/null
+++ b/frontend/components/SettingsModal.jsx
@@ -0,0 +1,333 @@
+import React, { useEffect, useState } from "react";
+
+export default function SettingsModal({ onClose }) {
+ const [settings, setSettings] = useState(null);
+ const [models, setModels] = useState([]);
+ const [modelsError, setModelsError] = useState(null);
+ const [loadingModels, setLoadingModels] = useState(false);
+ const [testResult, setTestResult] = useState(null); // { ok: bool, message: string }
+ const [testing, setTesting] = useState(false);
+
+ const loadSettings = async () => {
+ const res = await fetch("/api/settings");
+ const data = await res.json();
+ setSettings(data);
+ };
+
+ useEffect(() => {
+ loadSettings();
+ }, []);
+
+ const changeProvider = async (provider) => {
+ const res = await fetch("/api/settings/provider", {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ provider }),
+ });
+ const data = await res.json();
+ setSettings(data);
+
+ // Reset models state when provider changes
+ setModels([]);
+ setModelsError(null);
+ };
+
+ const loadModels = async () => {
+ if (!settings) return;
+ setLoadingModels(true);
+ setModelsError(null);
+ try {
+ const res = await fetch(
+ `/api/settings/models?provider=${settings.provider}`
+ );
+ const data = await res.json();
+ if (data.error) {
+ setModelsError(data.error);
+ setModels([]);
+ } else {
+ setModels(data.models || []);
+ }
+ } catch (err) {
+ console.error(err);
+ setModelsError("Failed to load models");
+ setModels([]);
+ } finally {
+ setLoadingModels(false);
+ }
+ };
+
+ const currentModelForActiveProvider = () => {
+ if (!settings) return "";
+ const p = settings.provider;
+ if (p === "openai") return settings.openai?.model || "";
+ if (p === "claude") return settings.claude?.model || "";
+ if (p === "watsonx") return settings.watsonx?.model_id || "";
+ if (p === "ollama") return settings.ollama?.model || "";
+ return "";
+ };
+
+ const changeModel = async (model) => {
+ if (!settings) return;
+ const provider = settings.provider;
+
+ let payload = {};
+ if (provider === "openai") {
+ payload = {
+ openai: {
+ ...settings.openai,
+ model,
+ },
+ };
+ } else if (provider === "claude") {
+ payload = {
+ claude: {
+ ...settings.claude,
+ model,
+ },
+ };
+ } else if (provider === "watsonx") {
+ payload = {
+ watsonx: {
+ ...settings.watsonx,
+ model_id: model,
+ },
+ };
+ } else if (provider === "ollama") {
+ payload = {
+ ollama: {
+ ...settings.ollama,
+ model,
+ },
+ };
+ }
+
+ const res = await fetch("/api/settings/llm", {
+ method: "PUT",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify(payload),
+ });
+ const data = await res.json();
+ setSettings(data);
+ };
+
+ const testConnection = async () => {
+ if (!settings) return;
+ setTesting(true);
+ setTestResult(null);
+ try {
+ const res = await fetch(`/api/settings/test?provider=${settings.provider}`);
+ const data = await res.json();
+ if (!res.ok || data.error) {
+ setTestResult({ ok: false, message: data.error || data.detail || "Connection failed" });
+ } else {
+ setTestResult({ ok: true, message: data.message || "Connection successful" });
+ }
+ } catch (err) {
+ setTestResult({ ok: false, message: err.message || "Connection test failed" });
+ } finally {
+ setTesting(false);
+ }
+ };
+
+ const toggleLiteMode = async () => {
+ if (!settings) return;
+ const newValue = !settings.lite_mode;
+ try {
+ const res = await fetch("/api/settings/lite-mode", {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({ lite_mode: newValue }),
+ });
+ if (res.ok) {
+ setSettings((prev) => ({ ...prev, lite_mode: newValue }));
+ }
+ } catch (err) {
+ console.error("Failed to toggle lite mode:", err);
+ }
+ };
+
+ if (!settings) return null;
+
+ const activeModel = currentModelForActiveProvider();
+
+ return (
+
+
e.stopPropagation()}>
+
+
+
+ Select which LLM provider GitPilot should use for planning and chat.
+
+
+
+ {settings.providers.map((p) => (
+
+
{p}
+
changeProvider(p)}
+ disabled={settings.provider === p}
+ >
+ {settings.provider === p ? "Active" : "Use"}
+
+
+ ))}
+
+
+ {/* Models section */}
+
+
+ Active provider: {settings.provider}
+
+
+
+
+ {testing ? "Testing…" : "Test Connection"}
+
+
+ {loadingModels ? "Loading…" : "Display models"}
+
+
+ {activeModel && (
+
+ Current model: {activeModel}
+
+ )}
+
+
+ {modelsError && (
+
+ {modelsError}
+
+ )}
+
+ {testResult && (
+
+ {testResult.ok ? "✓ " : "✗ "}{testResult.message}
+
+ )}
+
+ {models.length > 0 && (
+
+
+ Select model for {settings.provider}:
+
+ changeModel(e.target.value)}
+ >
+ -- select a model --
+ {models.map((m) => (
+
+ {m}
+
+ ))}
+
+
+ )}
+
+
+ {/* Lite Mode section */}
+
+
+
+ Lite Mode
+
+
+ {settings.lite_mode ? "ON" : "OFF"}
+
+
+
+ Optimized for small models (under 7B parameters).
+ Uses simplified prompts and single-agent execution instead
+ of multi-agent pipelines. Recommended for: qwen2.5:1.5b,
+ phi-3-mini, gemma-2b, tinyllama, etc.
+
+
+
+
+ );
+}
diff --git a/frontend/components/StartupScreen.jsx b/frontend/components/StartupScreen.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..a9c19e897a1b143761b111e4f2a5f46ed9643c61
--- /dev/null
+++ b/frontend/components/StartupScreen.jsx
@@ -0,0 +1,92 @@
+import React from "react";
+
+function normalizeProvider(provider) {
+ if (!provider) return "Checking...";
+ if (typeof provider === "string") return provider.toUpperCase();
+ if (typeof provider === "object") {
+ return (
+ provider.name ||
+ provider.provider ||
+ provider.type ||
+ provider.label ||
+ "Checking..."
+ );
+ }
+ return "Checking...";
+}
+
+function normalizeVersion(version) {
+ if (!version) return "Checking...";
+ return String(version);
+}
+
+export default function StartupScreen({
+ appName = "GitPilot",
+ subtitle = "Enterprise Workspace Copilot",
+ frontendVersion = "Checking...",
+ backendVersion = "Checking...",
+ provider = "Checking...",
+ statusMessage = "Starting application...",
+ detailMessage = "Initializing authentication, provider, and workspace context.",
+ phase = "booting",
+}) {
+ const providerLabel = normalizeProvider(provider);
+ const frontendLabel = normalizeVersion(frontendVersion);
+ const backendLabel = normalizeVersion(backendVersion);
+
+ return (
+
+
+
+
+
+
+
{appName}
+
{subtitle}
+
+
+
+
+
+
+
{statusMessage}
+
{detailMessage}
+
+
+
+ {phase}
+
+
+
+
+
Frontend
+
v{frontendLabel}
+
+
+
+
Backend
+
v{backendLabel}
+
+
+
+
Provider
+
{providerLabel}
+
+
+
+
+ Preparing workspace services, restoring session state, and checking
+ platform readiness.
+
+
+
+ );
+}
\ No newline at end of file
diff --git a/frontend/components/StreamingMessage.jsx b/frontend/components/StreamingMessage.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..71aaf5918c26d7ae6b097505381767d990357823
--- /dev/null
+++ b/frontend/components/StreamingMessage.jsx
@@ -0,0 +1,182 @@
+import React from "react";
+
+/**
+ * StreamingMessage — Claude-Code-on-Web parity streaming renderer.
+ *
+ * Renders agent messages incrementally as they arrive via WebSocket.
+ * Shows tool use blocks (bash commands + output), explanatory text,
+ * and status indicators.
+ */
+export default function StreamingMessage({ events }) {
+ if (!events || events.length === 0) return null;
+
+ return (
+
+ {events.map((evt, idx) => (
+
+ ))}
+
+ );
+}
+
+function StreamingEvent({ event, isLast }) {
+ const { type } = event;
+
+ if (type === "agent_message") {
+ return (
+
+ {event.content}
+ {isLast && | }
+
+ );
+ }
+
+ if (type === "tool_use") {
+ return (
+
+
+
+
+
+
+
{event.tool || "terminal"}
+
+
+ $ {event.input}
+
+
+ );
+ }
+
+ if (type === "tool_result") {
+ return (
+
+ );
+ }
+
+ if (type === "status_change") {
+ const statusLabels = {
+ active: "Working...",
+ waiting: "Waiting for input",
+ completed: "Completed",
+ failed: "Failed",
+ };
+ return (
+
+
+
{statusLabels[event.status] || event.status}
+
+ );
+ }
+
+ if (type === "diff_update") {
+ return null; // Handled by DiffStats in parent
+ }
+
+ if (type === "error") {
+ return (
+
+ {event.message}
+
+ );
+ }
+
+ return null;
+}
+
+const styles = {
+ container: {
+ display: "flex",
+ flexDirection: "column",
+ gap: 4,
+ },
+ textBlock: {
+ fontSize: 14,
+ lineHeight: 1.6,
+ color: "#D4D4D8",
+ whiteSpace: "pre-wrap",
+ wordBreak: "break-word",
+ },
+ cursor: {
+ display: "inline-block",
+ animation: "blink 1s step-end infinite",
+ color: "#3B82F6",
+ fontWeight: 700,
+ },
+ toolBlock: {
+ margin: "4px 0",
+ borderRadius: 6,
+ border: "1px solid #27272A",
+ overflow: "hidden",
+ },
+ toolHeader: {
+ display: "flex",
+ alignItems: "center",
+ gap: 6,
+ padding: "6px 10px",
+ backgroundColor: "#18181B",
+ fontSize: 11,
+ color: "#71717A",
+ fontFamily: "monospace",
+ },
+ toolName: {
+ fontWeight: 600,
+ },
+ toolInput: {
+ padding: "8px 10px",
+ backgroundColor: "#0D0D0F",
+ fontFamily: "monospace",
+ fontSize: 12,
+ color: "#10B981",
+ whiteSpace: "pre-wrap",
+ wordBreak: "break-all",
+ },
+ toolOutput: {
+ padding: "8px 10px",
+ backgroundColor: "#0D0D0F",
+ maxHeight: 300,
+ overflowY: "auto",
+ },
+ toolOutputPre: {
+ margin: 0,
+ fontFamily: "monospace",
+ fontSize: 11,
+ color: "#A1A1AA",
+ whiteSpace: "pre-wrap",
+ wordBreak: "break-all",
+ },
+ statusLine: {
+ display: "flex",
+ alignItems: "center",
+ gap: 6,
+ padding: "4px 0",
+ fontSize: 12,
+ color: "#71717A",
+ fontStyle: "italic",
+ },
+ statusDot: {
+ width: 6,
+ height: 6,
+ borderRadius: "50%",
+ },
+ errorBlock: {
+ padding: "8px 12px",
+ borderRadius: 6,
+ backgroundColor: "rgba(239, 68, 68, 0.08)",
+ border: "1px solid rgba(239, 68, 68, 0.2)",
+ color: "#FCA5A5",
+ fontSize: 13,
+ },
+};
diff --git a/frontend/components/ThinkingIndicator.jsx b/frontend/components/ThinkingIndicator.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..92b6e3d563739c16bc64c88144f0478c06c79d4f
--- /dev/null
+++ b/frontend/components/ThinkingIndicator.jsx
@@ -0,0 +1,151 @@
+// frontend/components/ThinkingIndicator.jsx
+//
+// Compact, enterprise-grade thinking state. Sits inline in the chat
+// timeline as a small assistant-style bubble:
+//
+// ● Reading repository... · · ·
+//
+// Design goals (from the bug report):
+// * Calm, precise, technical — no large card, no big glow, no
+// all-caps "THINKING" label.
+// * Sits inline next to other chat messages; ~36 px tall, auto width.
+// * Tiny pulsing brand-orange dot as the only accent (no rings,
+// no progress sweep, no nested animated panels).
+// * Muted text, sentence case, task-specific labels that rotate
+// ("Reading repository", "Building plan", "Checking context",
+// "Preparing response").
+// * Three tiny fading dots on the right as a generic "still working"
+// signal.
+//
+// Implementation constraints (this codebase, not the proposal's):
+// * No Tailwind — uses plain inline-style objects.
+// * No framer-motion — uses CSS @keyframes in one scoped
+
+ {/* keyed on the label so the fade-in plays each rotation */}
+
+ {currentLabel}
+
+
+
+
+
+
+
+ );
+}
diff --git a/frontend/components/UserMenu.jsx b/frontend/components/UserMenu.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..34849df04c66d62e71a434147a6dcaa5e5cf7fac
--- /dev/null
+++ b/frontend/components/UserMenu.jsx
@@ -0,0 +1,424 @@
+// frontend/components/UserMenu.jsx
+import React, { useEffect, useRef, useState, useCallback } from "react";
+
+/**
+ * UserMenu — account dropdown attached to the profile avatar in the
+ * bottom-left of the sidebar. Follows the Claude Code / ChatGPT pattern:
+ * click avatar → popover with Settings, About, Logout.
+ *
+ * Best practices applied:
+ * - Click outside to close (mousedown listener on document)
+ * - Escape key closes
+ * - ARIA: role="menu" + aria-haspopup + aria-expanded on trigger
+ * - Keyboard navigation (Tab / Shift+Tab cycles items, Enter activates)
+ * - Position: absolute popover anchored to trigger, opens upward
+ * - Brand palette: #D95C3D accent, #1C1C1F card, #27272A border
+ * - Respects sidebarCollapsed: when collapsed, only avatar is shown
+ * - Animation: subtle fade+translate for polish
+ */
+
+export default function UserMenu({
+ userInfo,
+ sidebarCollapsed = false,
+ onOpenSettings,
+ onOpenAbout,
+ onLogout,
+}) {
+ const [open, setOpen] = useState(false);
+ const [fixedPos, setFixedPos] = useState(null);
+ const containerRef = useRef(null);
+ const triggerRef = useRef(null);
+ const menuRef = useRef(null);
+
+ // When the sidebar is collapsed, the parent .sidebar has overflow-x:hidden
+ // which clips an absolutely-positioned popover. Escape the clip by using
+ // position:fixed with coordinates measured from the trigger's bounding
+ // rect. Recompute on open, window resize, and scroll.
+ useEffect(() => {
+ if (!open || !sidebarCollapsed) {
+ setFixedPos(null);
+ return;
+ }
+ const compute = () => {
+ const el = triggerRef.current;
+ if (!el) return;
+ const rect = el.getBoundingClientRect();
+ setFixedPos({
+ left: Math.round(rect.right + 8),
+ bottom: Math.round(window.innerHeight - rect.bottom),
+ });
+ };
+ compute();
+ window.addEventListener("resize", compute);
+ window.addEventListener("scroll", compute, true);
+ return () => {
+ window.removeEventListener("resize", compute);
+ window.removeEventListener("scroll", compute, true);
+ };
+ }, [open, sidebarCollapsed]);
+
+ // Close on click outside
+ useEffect(() => {
+ if (!open) return;
+ const handleDocMouseDown = (e) => {
+ if (containerRef.current && !containerRef.current.contains(e.target)) {
+ setOpen(false);
+ }
+ };
+ document.addEventListener("mousedown", handleDocMouseDown);
+ return () => document.removeEventListener("mousedown", handleDocMouseDown);
+ }, [open]);
+
+ // Close on Escape
+ useEffect(() => {
+ if (!open) return;
+ const handleKey = (e) => {
+ if (e.key === "Escape") {
+ setOpen(false);
+ triggerRef.current?.focus();
+ }
+ };
+ document.addEventListener("keydown", handleKey);
+ return () => document.removeEventListener("keydown", handleKey);
+ }, [open]);
+
+ // Focus the first menu item when opened
+ useEffect(() => {
+ if (open && menuRef.current) {
+ const firstItem = menuRef.current.querySelector('[role="menuitem"]');
+ firstItem?.focus();
+ }
+ }, [open]);
+
+ const handleItemClick = useCallback((action) => {
+ setOpen(false);
+ // Defer to next tick so the dropdown close animation doesn't jitter
+ // against the modal open animation.
+ window.setTimeout(() => action?.(), 0);
+ }, []);
+
+ if (!userInfo) return null;
+
+ const displayName = userInfo.name || userInfo.login;
+ const login = userInfo.login || "";
+
+ return (
+
+ {/* Trigger: avatar + optional name */}
+
setOpen((v) => !v)}
+ aria-haspopup="menu"
+ aria-expanded={open}
+ aria-label={`Account menu for ${displayName}`}
+ className="user-menu-trigger"
+ style={{
+ display: "flex",
+ alignItems: "center",
+ gap: sidebarCollapsed ? 0 : 10,
+ width: "100%",
+ padding: sidebarCollapsed ? "6px" : "8px 10px",
+ background: open ? "#27272A" : "transparent",
+ border: "1px solid",
+ borderColor: open ? "#D95C3D" : "transparent",
+ borderRadius: 10,
+ cursor: "pointer",
+ color: "#EDEDED",
+ textAlign: "left",
+ transition: "background 120ms ease, border-color 120ms ease",
+ fontFamily: "inherit",
+ }}
+ onMouseEnter={(e) => {
+ if (!open) e.currentTarget.style.background = "#1C1C1F";
+ }}
+ onMouseLeave={(e) => {
+ if (!open) e.currentTarget.style.background = "transparent";
+ }}
+ >
+ {userInfo.avatar_url ? (
+
+ ) : (
+
+ {(displayName || "?").slice(0, 2).toUpperCase()}
+
+ )}
+
+ {!sidebarCollapsed && (
+
+
+ {displayName}
+
+ {login && (
+
+ @{login}
+
+ )}
+
+ )}
+
+ {!sidebarCollapsed && (
+
+
+
+ )}
+
+
+ {/* Dropdown popover */}
+ {open && (
+
+ {/* Header: show full email/username for context */}
+
+
+ Signed in as
+
+
+ {displayName}
+
+
+
+
}
+ label="Settings"
+ onClick={() => handleItemClick(onOpenSettings)}
+ />
+
}
+ label="About GitPilot"
+ onClick={() => handleItemClick(onOpenAbout)}
+ />
+
+
+
+
}
+ label="Log out"
+ onClick={() => handleItemClick(onLogout)}
+ danger
+ />
+
+ )}
+
+ {/* Scoped keyframe animation */}
+
+
+ );
+}
+
+// ── Menu item primitive ────────────────────────────────────────────
+function MenuItem({ icon, label, onClick, danger = false }) {
+ const [hover, setHover] = useState(false);
+ const color = danger ? "#f87171" : "#EDEDED";
+ return (
+ setHover(true)}
+ onMouseLeave={() => setHover(false)}
+ style={{
+ display: "flex",
+ alignItems: "center",
+ gap: 12,
+ width: "100%",
+ padding: "9px 12px",
+ background: hover ? "#27272A" : "transparent",
+ border: "none",
+ borderRadius: 8,
+ cursor: "pointer",
+ color: color,
+ fontSize: 13,
+ fontWeight: 500,
+ textAlign: "left",
+ fontFamily: "inherit",
+ transition: "background 80ms ease",
+ }}
+ >
+
+ {icon}
+
+ {label}
+
+ );
+}
+
+// ── Inline icons (no extra asset loads) ────────────────────────────
+function SettingsIcon() {
+ return (
+
+
+
+
+ );
+}
+
+function InfoIcon() {
+ return (
+
+
+
+
+
+ );
+}
+
+function LogoutIcon() {
+ return (
+
+
+
+
+
+ );
+}
diff --git a/frontend/index.html b/frontend/index.html
new file mode 100644
index 0000000000000000000000000000000000000000..497850b3f10fa590f7e06d95843e28e5ce3fd4b5
--- /dev/null
+++ b/frontend/index.html
@@ -0,0 +1,12 @@
+
+
+
+
+ GitPilot
+
+
+
+
+
+
+
diff --git a/frontend/main.jsx b/frontend/main.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..2c2017cf902ef50f84816577bd83e21af501f43e
--- /dev/null
+++ b/frontend/main.jsx
@@ -0,0 +1,11 @@
+import React from "react";
+import ReactDOM from "react-dom/client";
+import App from "./App.jsx";
+import "./styles.css";
+import "./ollabridge.css";
+
+ReactDOM.createRoot(document.getElementById("root")).render(
+
+
+
+);
diff --git a/frontend/nginx.conf b/frontend/nginx.conf
new file mode 100644
index 0000000000000000000000000000000000000000..455bb91c50c5c97affbe57cf37fe1f7e07572f1d
--- /dev/null
+++ b/frontend/nginx.conf
@@ -0,0 +1,58 @@
+server {
+ listen 80;
+ server_name _;
+ root /usr/share/nginx/html;
+ index index.html;
+
+ # DNS resolver for dynamic upstream resolution
+ # This allows nginx to start even if backend doesn't exist yet
+ resolver 127.0.0.11 valid=30s ipv6=off;
+
+ # Gzip compression
+ gzip on;
+ gzip_vary on;
+ gzip_min_length 1024;
+ gzip_types text/plain text/css text/xml text/javascript application/x-javascript application/xml+rss application/json application/javascript;
+
+ # Security headers
+ add_header X-Frame-Options "SAMEORIGIN" always;
+ add_header X-Content-Type-Options "nosniff" always;
+ add_header X-XSS-Protection "1; mode=block" always;
+
+ # Handle API requests - proxy to backend (docker-compose only)
+ # Uses variables to force runtime DNS resolution instead of startup
+ location /api/ {
+ # Use variable to force runtime DNS resolution
+ set $backend "backend:8000";
+ proxy_pass http://$backend;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection 'upgrade';
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_cache_bypass $http_upgrade;
+
+ # Handle backend connection errors gracefully
+ proxy_intercept_errors on;
+ error_page 502 503 504 = @backend_unavailable;
+ }
+
+ # Fallback for when backend is unavailable
+ location @backend_unavailable {
+ add_header Content-Type application/json;
+ return 503 '{"error": "Backend service unavailable. Configure VITE_BACKEND_URL in frontend or ensure backend container is running."}';
+ }
+
+ # Serve static files
+ location / {
+ try_files $uri $uri/ /index.html;
+ }
+
+ # Cache static assets
+ location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg|woff|woff2|ttf|eot)$ {
+ expires 1y;
+ add_header Cache-Control "public, immutable";
+ }
+}
diff --git a/frontend/ollabridge.css b/frontend/ollabridge.css
new file mode 100644
index 0000000000000000000000000000000000000000..26fc57504d5b22bf2e5384bff838314ab94115db
--- /dev/null
+++ b/frontend/ollabridge.css
@@ -0,0 +1,222 @@
+/* ============================================================================
+ OLLABRIDGE CLOUD - Provider Tabs & Pairing UI
+ ============================================================================ */
+
+/* Provider selection tabs (replaces dropdown) */
+.settings-provider-tabs {
+ display: flex;
+ gap: 4px;
+ flex-wrap: wrap;
+ margin-top: 4px;
+}
+
+.settings-provider-tab {
+ border: 1px solid #272832;
+ outline: none;
+ background: #0a0b0f;
+ color: #9a9bb0;
+ border-radius: 8px;
+ padding: 8px 14px;
+ font-size: 13px;
+ font-weight: 500;
+ cursor: pointer;
+ transition: all 0.2s ease;
+ font-family: inherit;
+}
+
+.settings-provider-tab:hover {
+ background: #1a1b26;
+ color: #c3c5dd;
+ border-color: #3a3b4d;
+}
+
+.settings-provider-tab-active {
+ background: rgba(255, 122, 60, 0.12);
+ color: #ff7a3c;
+ border-color: #ff7a3c;
+ font-weight: 600;
+}
+
+.settings-provider-tab-active:hover {
+ background: rgba(255, 122, 60, 0.18);
+ color: #ff8b52;
+}
+
+/* Auth mode tabs (Device Pairing / API Key / Local Trust) */
+.ob-auth-tabs {
+ display: flex;
+ gap: 4px;
+ margin-top: 4px;
+ margin-bottom: 8px;
+}
+
+.ob-auth-tab {
+ display: flex;
+ align-items: center;
+ gap: 6px;
+ border: 1px solid #272832;
+ outline: none;
+ background: #0a0b0f;
+ color: #9a9bb0;
+ border-radius: 8px;
+ padding: 7px 12px;
+ font-size: 12px;
+ font-weight: 500;
+ cursor: pointer;
+ transition: all 0.2s ease;
+ font-family: inherit;
+ white-space: nowrap;
+}
+
+.ob-auth-tab:hover {
+ background: #1a1b26;
+ color: #c3c5dd;
+ border-color: #3a3b4d;
+}
+
+.ob-auth-tab-active {
+ background: rgba(59, 130, 246, 0.1);
+ color: #60a5fa;
+ border-color: #3B82F6;
+ font-weight: 600;
+}
+
+.ob-auth-tab-active:hover {
+ background: rgba(59, 130, 246, 0.15);
+}
+
+.ob-auth-tab-icon {
+ font-size: 14px;
+ line-height: 1;
+}
+
+/* Auth panel (content below tabs) */
+.ob-auth-panel {
+ padding: 12px;
+ background: #0a0b0f;
+ border: 1px solid #1e1f30;
+ border-radius: 8px;
+ margin-bottom: 4px;
+}
+
+.ob-auth-desc {
+ font-size: 12px;
+ color: #9a9bb0;
+ line-height: 1.5;
+ margin-bottom: 10px;
+}
+
+/* Pairing row */
+.ob-pair-row {
+ display: flex;
+ gap: 8px;
+ align-items: center;
+}
+
+.ob-pair-input {
+ flex: 1;
+ font-family: "SF Mono", Monaco, "Cascadia Code", monospace;
+ font-size: 16px !important;
+ letter-spacing: 2px;
+ text-align: center;
+ text-transform: uppercase;
+}
+
+.ob-pair-btn {
+ display: flex;
+ align-items: center;
+ gap: 6px;
+ border: none;
+ outline: none;
+ background: #3B82F6;
+ color: #fff;
+ border-radius: 8px;
+ padding: 9px 16px;
+ font-size: 13px;
+ font-weight: 600;
+ cursor: pointer;
+ transition: all 0.2s ease;
+ white-space: nowrap;
+ font-family: inherit;
+}
+
+.ob-pair-btn:hover:not(:disabled) {
+ background: #4d93f7;
+ transform: translateY(-1px);
+ box-shadow: 0 4px 12px rgba(59, 130, 246, 0.3);
+}
+
+.ob-pair-btn:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+}
+
+/* Pair spinner */
+.ob-pair-spinner {
+ display: inline-block;
+ width: 14px;
+ height: 14px;
+ border: 2px solid rgba(255, 255, 255, 0.3);
+ border-top-color: #fff;
+ border-radius: 50%;
+ animation: spin 0.6s linear infinite;
+}
+
+/* Pair result feedback */
+.ob-pair-result {
+ margin-top: 8px;
+ padding: 8px 12px;
+ border-radius: 6px;
+ font-size: 12px;
+ font-weight: 500;
+ animation: fadeIn 0.3s ease;
+}
+
+.ob-pair-result-ok {
+ background: rgba(76, 175, 136, 0.12);
+ border: 1px solid rgba(76, 175, 136, 0.3);
+ color: #7cffb3;
+}
+
+.ob-pair-result-err {
+ background: rgba(255, 82, 82, 0.1);
+ border: 1px solid rgba(255, 82, 82, 0.3);
+ color: #ff8a8a;
+}
+
+/* Model row (input + fetch button) */
+.ob-model-row {
+ display: flex;
+ gap: 8px;
+ align-items: center;
+}
+
+.ob-fetch-btn {
+ display: flex;
+ align-items: center;
+ gap: 5px;
+ border: 1px solid #272832;
+ outline: none;
+ background: #1a1b26;
+ color: #c3c5dd;
+ border-radius: 8px;
+ padding: 8px 12px;
+ font-size: 12px;
+ font-weight: 500;
+ cursor: pointer;
+ transition: all 0.2s ease;
+ white-space: nowrap;
+ font-family: inherit;
+}
+
+.ob-fetch-btn:hover:not(:disabled) {
+ background: #222335;
+ border-color: #3a3b4d;
+ color: #f5f5f7;
+ transform: translateY(-1px);
+}
+
+.ob-fetch-btn:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+}
diff --git a/frontend/package-lock.json b/frontend/package-lock.json
new file mode 100644
index 0000000000000000000000000000000000000000..a59f1ef312e2433e2b84476beb3fec107102aaf7
--- /dev/null
+++ b/frontend/package-lock.json
@@ -0,0 +1,3346 @@
+{
+ "name": "gitpilot-frontend",
+ "version": "0.2.6",
+ "lockfileVersion": 3,
+ "requires": true,
+ "packages": {
+ "": {
+ "name": "gitpilot-frontend",
+ "version": "0.2.6",
+ "dependencies": {
+ "react": "^18.2.0",
+ "react-dom": "^18.2.0",
+ "react-markdown": "^10.1.0",
+ "reactflow": "^11.11.4"
+ },
+ "devDependencies": {
+ "@vitejs/plugin-react": "^4.0.0",
+ "vite": "^5.0.0"
+ }
+ },
+ "node_modules/@babel/code-frame": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz",
+ "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-validator-identifier": "^7.27.1",
+ "js-tokens": "^4.0.0",
+ "picocolors": "^1.1.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/compat-data": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.28.5.tgz",
+ "integrity": "sha512-6uFXyCayocRbqhZOB+6XcuZbkMNimwfVGFji8CTZnCzOHVGvDqzvitu1re2AU5LROliz7eQPhB8CpAMvnx9EjA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/core": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.28.5.tgz",
+ "integrity": "sha512-e7jT4DxYvIDLk1ZHmU/m/mB19rex9sv0c2ftBtjSBv+kVM/902eh0fINUzD7UwLLNR+jU585GxUJ8/EBfAM5fw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/code-frame": "^7.27.1",
+ "@babel/generator": "^7.28.5",
+ "@babel/helper-compilation-targets": "^7.27.2",
+ "@babel/helper-module-transforms": "^7.28.3",
+ "@babel/helpers": "^7.28.4",
+ "@babel/parser": "^7.28.5",
+ "@babel/template": "^7.27.2",
+ "@babel/traverse": "^7.28.5",
+ "@babel/types": "^7.28.5",
+ "@jridgewell/remapping": "^2.3.5",
+ "convert-source-map": "^2.0.0",
+ "debug": "^4.1.0",
+ "gensync": "^1.0.0-beta.2",
+ "json5": "^2.2.3",
+ "semver": "^6.3.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/babel"
+ }
+ },
+ "node_modules/@babel/generator": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.28.5.tgz",
+ "integrity": "sha512-3EwLFhZ38J4VyIP6WNtt2kUdW9dokXA9Cr4IVIFHuCpZ3H8/YFOl5JjZHisrn1fATPBmKKqXzDFvh9fUwHz6CQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/parser": "^7.28.5",
+ "@babel/types": "^7.28.5",
+ "@jridgewell/gen-mapping": "^0.3.12",
+ "@jridgewell/trace-mapping": "^0.3.28",
+ "jsesc": "^3.0.2"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-compilation-targets": {
+ "version": "7.27.2",
+ "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.27.2.tgz",
+ "integrity": "sha512-2+1thGUUWWjLTYTHZWK1n8Yga0ijBz1XAhUXcKy81rd5g6yh7hGqMp45v7cadSbEHc9G3OTv45SyneRN3ps4DQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/compat-data": "^7.27.2",
+ "@babel/helper-validator-option": "^7.27.1",
+ "browserslist": "^4.24.0",
+ "lru-cache": "^5.1.1",
+ "semver": "^6.3.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-globals": {
+ "version": "7.28.0",
+ "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz",
+ "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-module-imports": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.27.1.tgz",
+ "integrity": "sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/traverse": "^7.27.1",
+ "@babel/types": "^7.27.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-module-transforms": {
+ "version": "7.28.3",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.28.3.tgz",
+ "integrity": "sha512-gytXUbs8k2sXS9PnQptz5o0QnpLL51SwASIORY6XaBKF88nsOT0Zw9szLqlSGQDP/4TljBAD5y98p2U1fqkdsw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-module-imports": "^7.27.1",
+ "@babel/helper-validator-identifier": "^7.27.1",
+ "@babel/traverse": "^7.28.3"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
+ }
+ },
+ "node_modules/@babel/helper-plugin-utils": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.27.1.tgz",
+ "integrity": "sha512-1gn1Up5YXka3YYAHGKpbideQ5Yjf1tDa9qYcgysz+cNCXukyLl6DjPXhD3VRwSb8c0J9tA4b2+rHEZtc6R0tlw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-string-parser": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz",
+ "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-validator-identifier": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz",
+ "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-validator-option": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz",
+ "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helpers": {
+ "version": "7.28.4",
+ "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.28.4.tgz",
+ "integrity": "sha512-HFN59MmQXGHVyYadKLVumYsA9dBFun/ldYxipEjzA4196jpLZd8UjEEBLkbEkvfYreDqJhZxYAWFPtrfhNpj4w==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/template": "^7.27.2",
+ "@babel/types": "^7.28.4"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/parser": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.5.tgz",
+ "integrity": "sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/types": "^7.28.5"
+ },
+ "bin": {
+ "parser": "bin/babel-parser.js"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-react-jsx-self": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.27.1.tgz",
+ "integrity": "sha512-6UzkCs+ejGdZ5mFFC/OCUrv028ab2fp1znZmCZjAOBKiBK2jXD1O+BPSfX8X2qjJ75fZBMSnQn3Rq2mrBJK2mw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.27.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-react-jsx-source": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.27.1.tgz",
+ "integrity": "sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.27.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/template": {
+ "version": "7.27.2",
+ "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz",
+ "integrity": "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/code-frame": "^7.27.1",
+ "@babel/parser": "^7.27.2",
+ "@babel/types": "^7.27.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/traverse": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.28.5.tgz",
+ "integrity": "sha512-TCCj4t55U90khlYkVV/0TfkJkAkUg3jZFA3Neb7unZT8CPok7iiRfaX0F+WnqWqt7OxhOn0uBKXCw4lbL8W0aQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/code-frame": "^7.27.1",
+ "@babel/generator": "^7.28.5",
+ "@babel/helper-globals": "^7.28.0",
+ "@babel/parser": "^7.28.5",
+ "@babel/template": "^7.27.2",
+ "@babel/types": "^7.28.5",
+ "debug": "^4.3.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/types": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.5.tgz",
+ "integrity": "sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-string-parser": "^7.27.1",
+ "@babel/helper-validator-identifier": "^7.28.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@esbuild/aix-ppc64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz",
+ "integrity": "sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "aix"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/android-arm": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.21.5.tgz",
+ "integrity": "sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/android-arm64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.21.5.tgz",
+ "integrity": "sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/android-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.21.5.tgz",
+ "integrity": "sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/darwin-arm64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.21.5.tgz",
+ "integrity": "sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/darwin-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.21.5.tgz",
+ "integrity": "sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/freebsd-arm64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.21.5.tgz",
+ "integrity": "sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "freebsd"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/freebsd-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.21.5.tgz",
+ "integrity": "sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "freebsd"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-arm": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.21.5.tgz",
+ "integrity": "sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-arm64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.21.5.tgz",
+ "integrity": "sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-ia32": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.21.5.tgz",
+ "integrity": "sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-loong64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.21.5.tgz",
+ "integrity": "sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==",
+ "cpu": [
+ "loong64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-mips64el": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.21.5.tgz",
+ "integrity": "sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==",
+ "cpu": [
+ "mips64el"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-ppc64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.21.5.tgz",
+ "integrity": "sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-riscv64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.21.5.tgz",
+ "integrity": "sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==",
+ "cpu": [
+ "riscv64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-s390x": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.21.5.tgz",
+ "integrity": "sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==",
+ "cpu": [
+ "s390x"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.21.5.tgz",
+ "integrity": "sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/netbsd-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.21.5.tgz",
+ "integrity": "sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "netbsd"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/openbsd-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.21.5.tgz",
+ "integrity": "sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "openbsd"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/sunos-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.21.5.tgz",
+ "integrity": "sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "sunos"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/win32-arm64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.21.5.tgz",
+ "integrity": "sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/win32-ia32": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.21.5.tgz",
+ "integrity": "sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/win32-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.21.5.tgz",
+ "integrity": "sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@jridgewell/gen-mapping": {
+ "version": "0.3.13",
+ "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz",
+ "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jridgewell/sourcemap-codec": "^1.5.0",
+ "@jridgewell/trace-mapping": "^0.3.24"
+ }
+ },
+ "node_modules/@jridgewell/remapping": {
+ "version": "2.3.5",
+ "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz",
+ "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jridgewell/gen-mapping": "^0.3.5",
+ "@jridgewell/trace-mapping": "^0.3.24"
+ }
+ },
+ "node_modules/@jridgewell/resolve-uri": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz",
+ "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@jridgewell/sourcemap-codec": {
+ "version": "1.5.5",
+ "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz",
+ "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@jridgewell/trace-mapping": {
+ "version": "0.3.31",
+ "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz",
+ "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jridgewell/resolve-uri": "^3.1.0",
+ "@jridgewell/sourcemap-codec": "^1.4.14"
+ }
+ },
+ "node_modules/@reactflow/background": {
+ "version": "11.3.14",
+ "resolved": "https://registry.npmjs.org/@reactflow/background/-/background-11.3.14.tgz",
+ "integrity": "sha512-Gewd7blEVT5Lh6jqrvOgd4G6Qk17eGKQfsDXgyRSqM+CTwDqRldG2LsWN4sNeno6sbqVIC2fZ+rAUBFA9ZEUDA==",
+ "license": "MIT",
+ "dependencies": {
+ "@reactflow/core": "11.11.4",
+ "classcat": "^5.0.3",
+ "zustand": "^4.4.1"
+ },
+ "peerDependencies": {
+ "react": ">=17",
+ "react-dom": ">=17"
+ }
+ },
+ "node_modules/@reactflow/controls": {
+ "version": "11.2.14",
+ "resolved": "https://registry.npmjs.org/@reactflow/controls/-/controls-11.2.14.tgz",
+ "integrity": "sha512-MiJp5VldFD7FrqaBNIrQ85dxChrG6ivuZ+dcFhPQUwOK3HfYgX2RHdBua+gx+40p5Vw5It3dVNp/my4Z3jF0dw==",
+ "license": "MIT",
+ "dependencies": {
+ "@reactflow/core": "11.11.4",
+ "classcat": "^5.0.3",
+ "zustand": "^4.4.1"
+ },
+ "peerDependencies": {
+ "react": ">=17",
+ "react-dom": ">=17"
+ }
+ },
+ "node_modules/@reactflow/core": {
+ "version": "11.11.4",
+ "resolved": "https://registry.npmjs.org/@reactflow/core/-/core-11.11.4.tgz",
+ "integrity": "sha512-H4vODklsjAq3AMq6Np4LE12i1I4Ta9PrDHuBR9GmL8uzTt2l2jh4CiQbEMpvMDcp7xi4be0hgXj+Ysodde/i7Q==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3": "^7.4.0",
+ "@types/d3-drag": "^3.0.1",
+ "@types/d3-selection": "^3.0.3",
+ "@types/d3-zoom": "^3.0.1",
+ "classcat": "^5.0.3",
+ "d3-drag": "^3.0.0",
+ "d3-selection": "^3.0.0",
+ "d3-zoom": "^3.0.0",
+ "zustand": "^4.4.1"
+ },
+ "peerDependencies": {
+ "react": ">=17",
+ "react-dom": ">=17"
+ }
+ },
+ "node_modules/@reactflow/minimap": {
+ "version": "11.7.14",
+ "resolved": "https://registry.npmjs.org/@reactflow/minimap/-/minimap-11.7.14.tgz",
+ "integrity": "sha512-mpwLKKrEAofgFJdkhwR5UQ1JYWlcAAL/ZU/bctBkuNTT1yqV+y0buoNVImsRehVYhJwffSWeSHaBR5/GJjlCSQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@reactflow/core": "11.11.4",
+ "@types/d3-selection": "^3.0.3",
+ "@types/d3-zoom": "^3.0.1",
+ "classcat": "^5.0.3",
+ "d3-selection": "^3.0.0",
+ "d3-zoom": "^3.0.0",
+ "zustand": "^4.4.1"
+ },
+ "peerDependencies": {
+ "react": ">=17",
+ "react-dom": ">=17"
+ }
+ },
+ "node_modules/@reactflow/node-resizer": {
+ "version": "2.2.14",
+ "resolved": "https://registry.npmjs.org/@reactflow/node-resizer/-/node-resizer-2.2.14.tgz",
+ "integrity": "sha512-fwqnks83jUlYr6OHcdFEedumWKChTHRGw/kbCxj0oqBd+ekfs+SIp4ddyNU0pdx96JIm5iNFS0oNrmEiJbbSaA==",
+ "license": "MIT",
+ "dependencies": {
+ "@reactflow/core": "11.11.4",
+ "classcat": "^5.0.4",
+ "d3-drag": "^3.0.0",
+ "d3-selection": "^3.0.0",
+ "zustand": "^4.4.1"
+ },
+ "peerDependencies": {
+ "react": ">=17",
+ "react-dom": ">=17"
+ }
+ },
+ "node_modules/@reactflow/node-toolbar": {
+ "version": "1.3.14",
+ "resolved": "https://registry.npmjs.org/@reactflow/node-toolbar/-/node-toolbar-1.3.14.tgz",
+ "integrity": "sha512-rbynXQnH/xFNu4P9H+hVqlEUafDCkEoCy0Dg9mG22Sg+rY/0ck6KkrAQrYrTgXusd+cEJOMK0uOOFCK2/5rSGQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@reactflow/core": "11.11.4",
+ "classcat": "^5.0.3",
+ "zustand": "^4.4.1"
+ },
+ "peerDependencies": {
+ "react": ">=17",
+ "react-dom": ">=17"
+ }
+ },
+ "node_modules/@rolldown/pluginutils": {
+ "version": "1.0.0-beta.27",
+ "resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-beta.27.tgz",
+ "integrity": "sha512-+d0F4MKMCbeVUJwG96uQ4SgAznZNSq93I3V+9NHA4OpvqG8mRCpGdKmK8l/dl02h2CCDHwW2FqilnTyDcAnqjA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@rollup/rollup-android-arm-eabi": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.53.2.tgz",
+ "integrity": "sha512-yDPzwsgiFO26RJA4nZo8I+xqzh7sJTZIWQOxn+/XOdPE31lAvLIYCKqjV+lNH/vxE2L2iH3plKxDCRK6i+CwhA==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ]
+ },
+ "node_modules/@rollup/rollup-android-arm64": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.53.2.tgz",
+ "integrity": "sha512-k8FontTxIE7b0/OGKeSN5B6j25EuppBcWM33Z19JoVT7UTXFSo3D9CdU39wGTeb29NO3XxpMNauh09B+Ibw+9g==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ]
+ },
+ "node_modules/@rollup/rollup-darwin-arm64": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.53.2.tgz",
+ "integrity": "sha512-A6s4gJpomNBtJ2yioj8bflM2oogDwzUiMl2yNJ2v9E7++sHrSrsQ29fOfn5DM/iCzpWcebNYEdXpaK4tr2RhfQ==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ]
+ },
+ "node_modules/@rollup/rollup-darwin-x64": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.53.2.tgz",
+ "integrity": "sha512-e6XqVmXlHrBlG56obu9gDRPW3O3hLxpwHpLsBJvuI8qqnsrtSZ9ERoWUXtPOkY8c78WghyPHZdmPhHLWNdAGEw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ]
+ },
+ "node_modules/@rollup/rollup-freebsd-arm64": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.53.2.tgz",
+ "integrity": "sha512-v0E9lJW8VsrwPux5Qe5CwmH/CF/2mQs6xU1MF3nmUxmZUCHazCjLgYvToOk+YuuUqLQBio1qkkREhxhc656ViA==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "freebsd"
+ ]
+ },
+ "node_modules/@rollup/rollup-freebsd-x64": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.53.2.tgz",
+ "integrity": "sha512-ClAmAPx3ZCHtp6ysl4XEhWU69GUB1D+s7G9YjHGhIGCSrsg00nEGRRZHmINYxkdoJehde8VIsDC5t9C0gb6yqA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "freebsd"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm-gnueabihf": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.53.2.tgz",
+ "integrity": "sha512-EPlb95nUsz6Dd9Qy13fI5kUPXNSljaG9FiJ4YUGU1O/Q77i5DYFW5KR8g1OzTcdZUqQQ1KdDqsTohdFVwCwjqg==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm-musleabihf": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.53.2.tgz",
+ "integrity": "sha512-BOmnVW+khAUX+YZvNfa0tGTEMVVEerOxN0pDk2E6N6DsEIa2Ctj48FOMfNDdrwinocKaC7YXUZ1pHlKpnkja/Q==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm64-gnu": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.53.2.tgz",
+ "integrity": "sha512-Xt2byDZ+6OVNuREgBXr4+CZDJtrVso5woFtpKdGPhpTPHcNG7D8YXeQzpNbFRxzTVqJf7kvPMCub/pcGUWgBjA==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm64-musl": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.53.2.tgz",
+ "integrity": "sha512-+LdZSldy/I9N8+klim/Y1HsKbJ3BbInHav5qE9Iy77dtHC/pibw1SR/fXlWyAk0ThnpRKoODwnAuSjqxFRDHUQ==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-loong64-gnu": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.53.2.tgz",
+ "integrity": "sha512-8ms8sjmyc1jWJS6WdNSA23rEfdjWB30LH8Wqj0Cqvv7qSHnvw6kgMMXRdop6hkmGPlyYBdRPkjJnj3KCUHV/uQ==",
+ "cpu": [
+ "loong64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-ppc64-gnu": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.53.2.tgz",
+ "integrity": "sha512-3HRQLUQbpBDMmzoxPJYd3W6vrVHOo2cVW8RUo87Xz0JPJcBLBr5kZ1pGcQAhdZgX9VV7NbGNipah1omKKe23/g==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-riscv64-gnu": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.53.2.tgz",
+ "integrity": "sha512-fMjKi+ojnmIvhk34gZP94vjogXNNUKMEYs+EDaB/5TG/wUkoeua7p7VCHnE6T2Tx+iaghAqQX8teQzcvrYpaQA==",
+ "cpu": [
+ "riscv64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-riscv64-musl": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.53.2.tgz",
+ "integrity": "sha512-XuGFGU+VwUUV5kLvoAdi0Wz5Xbh2SrjIxCtZj6Wq8MDp4bflb/+ThZsVxokM7n0pcbkEr2h5/pzqzDYI7cCgLQ==",
+ "cpu": [
+ "riscv64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-s390x-gnu": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.53.2.tgz",
+ "integrity": "sha512-w6yjZF0P+NGzWR3AXWX9zc0DNEGdtvykB03uhonSHMRa+oWA6novflo2WaJr6JZakG2ucsyb+rvhrKac6NIy+w==",
+ "cpu": [
+ "s390x"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-x64-gnu": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.53.2.tgz",
+ "integrity": "sha512-yo8d6tdfdeBArzC7T/PnHd7OypfI9cbuZzPnzLJIyKYFhAQ8SvlkKtKBMbXDxe1h03Rcr7u++nFS7tqXz87Gtw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-x64-musl": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.53.2.tgz",
+ "integrity": "sha512-ah59c1YkCxKExPP8O9PwOvs+XRLKwh/mV+3YdKqQ5AMQ0r4M4ZDuOrpWkUaqO7fzAHdINzV9tEVu8vNw48z0lA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-openharmony-arm64": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.53.2.tgz",
+ "integrity": "sha512-4VEd19Wmhr+Zy7hbUsFZ6YXEiP48hE//KPLCSVNY5RMGX2/7HZ+QkN55a3atM1C/BZCGIgqN+xrVgtdak2S9+A==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "openharmony"
+ ]
+ },
+ "node_modules/@rollup/rollup-win32-arm64-msvc": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.53.2.tgz",
+ "integrity": "sha512-IlbHFYc/pQCgew/d5fslcy1KEaYVCJ44G8pajugd8VoOEI8ODhtb/j8XMhLpwHCMB3yk2J07ctup10gpw2nyMA==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
+ "node_modules/@rollup/rollup-win32-ia32-msvc": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.53.2.tgz",
+ "integrity": "sha512-lNlPEGgdUfSzdCWU176ku/dQRnA7W+Gp8d+cWv73jYrb8uT7HTVVxq62DUYxjbaByuf1Yk0RIIAbDzp+CnOTFg==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
+ "node_modules/@rollup/rollup-win32-x64-gnu": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.53.2.tgz",
+ "integrity": "sha512-S6YojNVrHybQis2lYov1sd+uj7K0Q05NxHcGktuMMdIQ2VixGwAfbJ23NnlvvVV1bdpR2m5MsNBViHJKcA4ADw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
+ "node_modules/@rollup/rollup-win32-x64-msvc": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.53.2.tgz",
+ "integrity": "sha512-k+/Rkcyx//P6fetPoLMb8pBeqJBNGx81uuf7iljX9++yNBVRDQgD04L+SVXmXmh5ZP4/WOp4mWF0kmi06PW2tA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
+ "node_modules/@types/babel__core": {
+ "version": "7.20.5",
+ "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz",
+ "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/parser": "^7.20.7",
+ "@babel/types": "^7.20.7",
+ "@types/babel__generator": "*",
+ "@types/babel__template": "*",
+ "@types/babel__traverse": "*"
+ }
+ },
+ "node_modules/@types/babel__generator": {
+ "version": "7.27.0",
+ "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.27.0.tgz",
+ "integrity": "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/types": "^7.0.0"
+ }
+ },
+ "node_modules/@types/babel__template": {
+ "version": "7.4.4",
+ "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz",
+ "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/parser": "^7.1.0",
+ "@babel/types": "^7.0.0"
+ }
+ },
+ "node_modules/@types/babel__traverse": {
+ "version": "7.28.0",
+ "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.28.0.tgz",
+ "integrity": "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/types": "^7.28.2"
+ }
+ },
+ "node_modules/@types/d3": {
+ "version": "7.4.3",
+ "resolved": "https://registry.npmjs.org/@types/d3/-/d3-7.4.3.tgz",
+ "integrity": "sha512-lZXZ9ckh5R8uiFVt8ogUNf+pIrK4EsWrx2Np75WvF/eTpJ0FMHNhjXk8CKEx/+gpHbNQyJWehbFaTvqmHWB3ww==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3-array": "*",
+ "@types/d3-axis": "*",
+ "@types/d3-brush": "*",
+ "@types/d3-chord": "*",
+ "@types/d3-color": "*",
+ "@types/d3-contour": "*",
+ "@types/d3-delaunay": "*",
+ "@types/d3-dispatch": "*",
+ "@types/d3-drag": "*",
+ "@types/d3-dsv": "*",
+ "@types/d3-ease": "*",
+ "@types/d3-fetch": "*",
+ "@types/d3-force": "*",
+ "@types/d3-format": "*",
+ "@types/d3-geo": "*",
+ "@types/d3-hierarchy": "*",
+ "@types/d3-interpolate": "*",
+ "@types/d3-path": "*",
+ "@types/d3-polygon": "*",
+ "@types/d3-quadtree": "*",
+ "@types/d3-random": "*",
+ "@types/d3-scale": "*",
+ "@types/d3-scale-chromatic": "*",
+ "@types/d3-selection": "*",
+ "@types/d3-shape": "*",
+ "@types/d3-time": "*",
+ "@types/d3-time-format": "*",
+ "@types/d3-timer": "*",
+ "@types/d3-transition": "*",
+ "@types/d3-zoom": "*"
+ }
+ },
+ "node_modules/@types/d3-array": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/@types/d3-array/-/d3-array-3.2.2.tgz",
+ "integrity": "sha512-hOLWVbm7uRza0BYXpIIW5pxfrKe0W+D5lrFiAEYR+pb6w3N2SwSMaJbXdUfSEv+dT4MfHBLtn5js0LAWaO6otw==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-axis": {
+ "version": "3.0.6",
+ "resolved": "https://registry.npmjs.org/@types/d3-axis/-/d3-axis-3.0.6.tgz",
+ "integrity": "sha512-pYeijfZuBd87T0hGn0FO1vQ/cgLk6E1ALJjfkC0oJ8cbwkZl3TpgS8bVBLZN+2jjGgg38epgxb2zmoGtSfvgMw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3-selection": "*"
+ }
+ },
+ "node_modules/@types/d3-brush": {
+ "version": "3.0.6",
+ "resolved": "https://registry.npmjs.org/@types/d3-brush/-/d3-brush-3.0.6.tgz",
+ "integrity": "sha512-nH60IZNNxEcrh6L1ZSMNA28rj27ut/2ZmI3r96Zd+1jrZD++zD3LsMIjWlvg4AYrHn/Pqz4CF3veCxGjtbqt7A==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3-selection": "*"
+ }
+ },
+ "node_modules/@types/d3-chord": {
+ "version": "3.0.6",
+ "resolved": "https://registry.npmjs.org/@types/d3-chord/-/d3-chord-3.0.6.tgz",
+ "integrity": "sha512-LFYWWd8nwfwEmTZG9PfQxd17HbNPksHBiJHaKuY1XeqscXacsS2tyoo6OdRsjf+NQYeB6XrNL3a25E3gH69lcg==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-color": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/@types/d3-color/-/d3-color-3.1.3.tgz",
+ "integrity": "sha512-iO90scth9WAbmgv7ogoq57O9YpKmFBbmoEoCHDB2xMBY0+/KVrqAaCDyCE16dUspeOvIxFFRI+0sEtqDqy2b4A==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-contour": {
+ "version": "3.0.6",
+ "resolved": "https://registry.npmjs.org/@types/d3-contour/-/d3-contour-3.0.6.tgz",
+ "integrity": "sha512-BjzLgXGnCWjUSYGfH1cpdo41/hgdWETu4YxpezoztawmqsvCeep+8QGfiY6YbDvfgHz/DkjeIkkZVJavB4a3rg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3-array": "*",
+ "@types/geojson": "*"
+ }
+ },
+ "node_modules/@types/d3-delaunay": {
+ "version": "6.0.4",
+ "resolved": "https://registry.npmjs.org/@types/d3-delaunay/-/d3-delaunay-6.0.4.tgz",
+ "integrity": "sha512-ZMaSKu4THYCU6sV64Lhg6qjf1orxBthaC161plr5KuPHo3CNm8DTHiLw/5Eq2b6TsNP0W0iJrUOFscY6Q450Hw==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-dispatch": {
+ "version": "3.0.7",
+ "resolved": "https://registry.npmjs.org/@types/d3-dispatch/-/d3-dispatch-3.0.7.tgz",
+ "integrity": "sha512-5o9OIAdKkhN1QItV2oqaE5KMIiXAvDWBDPrD85e58Qlz1c1kI/J0NcqbEG88CoTwJrYe7ntUCVfeUl2UJKbWgA==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-drag": {
+ "version": "3.0.7",
+ "resolved": "https://registry.npmjs.org/@types/d3-drag/-/d3-drag-3.0.7.tgz",
+ "integrity": "sha512-HE3jVKlzU9AaMazNufooRJ5ZpWmLIoc90A37WU2JMmeq28w1FQqCZswHZ3xR+SuxYftzHq6WU6KJHvqxKzTxxQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3-selection": "*"
+ }
+ },
+ "node_modules/@types/d3-dsv": {
+ "version": "3.0.7",
+ "resolved": "https://registry.npmjs.org/@types/d3-dsv/-/d3-dsv-3.0.7.tgz",
+ "integrity": "sha512-n6QBF9/+XASqcKK6waudgL0pf/S5XHPPI8APyMLLUHd8NqouBGLsU8MgtO7NINGtPBtk9Kko/W4ea0oAspwh9g==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-ease": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/@types/d3-ease/-/d3-ease-3.0.2.tgz",
+ "integrity": "sha512-NcV1JjO5oDzoK26oMzbILE6HW7uVXOHLQvHshBUW4UMdZGfiY6v5BeQwh9a9tCzv+CeefZQHJt5SRgK154RtiA==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-fetch": {
+ "version": "3.0.7",
+ "resolved": "https://registry.npmjs.org/@types/d3-fetch/-/d3-fetch-3.0.7.tgz",
+ "integrity": "sha512-fTAfNmxSb9SOWNB9IoG5c8Hg6R+AzUHDRlsXsDZsNp6sxAEOP0tkP3gKkNSO/qmHPoBFTxNrjDprVHDQDvo5aA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3-dsv": "*"
+ }
+ },
+ "node_modules/@types/d3-force": {
+ "version": "3.0.10",
+ "resolved": "https://registry.npmjs.org/@types/d3-force/-/d3-force-3.0.10.tgz",
+ "integrity": "sha512-ZYeSaCF3p73RdOKcjj+swRlZfnYpK1EbaDiYICEEp5Q6sUiqFaFQ9qgoshp5CzIyyb/yD09kD9o2zEltCexlgw==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-format": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/d3-format/-/d3-format-3.0.4.tgz",
+ "integrity": "sha512-fALi2aI6shfg7vM5KiR1wNJnZ7r6UuggVqtDA+xiEdPZQwy/trcQaHnwShLuLdta2rTymCNpxYTiMZX/e09F4g==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-geo": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/@types/d3-geo/-/d3-geo-3.1.0.tgz",
+ "integrity": "sha512-856sckF0oP/diXtS4jNsiQw/UuK5fQG8l/a9VVLeSouf1/PPbBE1i1W852zVwKwYCBkFJJB7nCFTbk6UMEXBOQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/geojson": "*"
+ }
+ },
+ "node_modules/@types/d3-hierarchy": {
+ "version": "3.1.7",
+ "resolved": "https://registry.npmjs.org/@types/d3-hierarchy/-/d3-hierarchy-3.1.7.tgz",
+ "integrity": "sha512-tJFtNoYBtRtkNysX1Xq4sxtjK8YgoWUNpIiUee0/jHGRwqvzYxkq0hGVbbOGSz+JgFxxRu4K8nb3YpG3CMARtg==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-interpolate": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/d3-interpolate/-/d3-interpolate-3.0.4.tgz",
+ "integrity": "sha512-mgLPETlrpVV1YRJIglr4Ez47g7Yxjl1lj7YKsiMCb27VJH9W8NVM6Bb9d8kkpG/uAQS5AmbA48q2IAolKKo1MA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3-color": "*"
+ }
+ },
+ "node_modules/@types/d3-path": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/@types/d3-path/-/d3-path-3.1.1.tgz",
+ "integrity": "sha512-VMZBYyQvbGmWyWVea0EHs/BwLgxc+MKi1zLDCONksozI4YJMcTt8ZEuIR4Sb1MMTE8MMW49v0IwI5+b7RmfWlg==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-polygon": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/@types/d3-polygon/-/d3-polygon-3.0.2.tgz",
+ "integrity": "sha512-ZuWOtMaHCkN9xoeEMr1ubW2nGWsp4nIql+OPQRstu4ypeZ+zk3YKqQT0CXVe/PYqrKpZAi+J9mTs05TKwjXSRA==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-quadtree": {
+ "version": "3.0.6",
+ "resolved": "https://registry.npmjs.org/@types/d3-quadtree/-/d3-quadtree-3.0.6.tgz",
+ "integrity": "sha512-oUzyO1/Zm6rsxKRHA1vH0NEDG58HrT5icx/azi9MF1TWdtttWl0UIUsjEQBBh+SIkrpd21ZjEv7ptxWys1ncsg==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-random": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/@types/d3-random/-/d3-random-3.0.3.tgz",
+ "integrity": "sha512-Imagg1vJ3y76Y2ea0871wpabqp613+8/r0mCLEBfdtqC7xMSfj9idOnmBYyMoULfHePJyxMAw3nWhJxzc+LFwQ==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-scale": {
+ "version": "4.0.9",
+ "resolved": "https://registry.npmjs.org/@types/d3-scale/-/d3-scale-4.0.9.tgz",
+ "integrity": "sha512-dLmtwB8zkAeO/juAMfnV+sItKjlsw2lKdZVVy6LRr0cBmegxSABiLEpGVmSJJ8O08i4+sGR6qQtb6WtuwJdvVw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3-time": "*"
+ }
+ },
+ "node_modules/@types/d3-scale-chromatic": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/@types/d3-scale-chromatic/-/d3-scale-chromatic-3.1.0.tgz",
+ "integrity": "sha512-iWMJgwkK7yTRmWqRB5plb1kadXyQ5Sj8V/zYlFGMUBbIPKQScw+Dku9cAAMgJG+z5GYDoMjWGLVOvjghDEFnKQ==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-selection": {
+ "version": "3.0.11",
+ "resolved": "https://registry.npmjs.org/@types/d3-selection/-/d3-selection-3.0.11.tgz",
+ "integrity": "sha512-bhAXu23DJWsrI45xafYpkQ4NtcKMwWnAC/vKrd2l+nxMFuvOT3XMYTIj2opv8vq8AO5Yh7Qac/nSeP/3zjTK0w==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-shape": {
+ "version": "3.1.7",
+ "resolved": "https://registry.npmjs.org/@types/d3-shape/-/d3-shape-3.1.7.tgz",
+ "integrity": "sha512-VLvUQ33C+3J+8p+Daf+nYSOsjB4GXp19/S/aGo60m9h1v6XaxjiT82lKVWJCfzhtuZ3yD7i/TPeC/fuKLLOSmg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3-path": "*"
+ }
+ },
+ "node_modules/@types/d3-time": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/d3-time/-/d3-time-3.0.4.tgz",
+ "integrity": "sha512-yuzZug1nkAAaBlBBikKZTgzCeA+k1uy4ZFwWANOfKw5z5LRhV0gNA7gNkKm7HoK+HRN0wX3EkxGk0fpbWhmB7g==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-time-format": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/@types/d3-time-format/-/d3-time-format-4.0.3.tgz",
+ "integrity": "sha512-5xg9rC+wWL8kdDj153qZcsJ0FWiFt0J5RB6LYUNZjwSnesfblqrI/bJ1wBdJ8OQfncgbJG5+2F+qfqnqyzYxyg==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-timer": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/@types/d3-timer/-/d3-timer-3.0.2.tgz",
+ "integrity": "sha512-Ps3T8E8dZDam6fUyNiMkekK3XUsaUEik+idO9/YjPtfj2qruF8tFBXS7XhtE4iIXBLxhmLjP3SXpLhVf21I9Lw==",
+ "license": "MIT"
+ },
+ "node_modules/@types/d3-transition": {
+ "version": "3.0.9",
+ "resolved": "https://registry.npmjs.org/@types/d3-transition/-/d3-transition-3.0.9.tgz",
+ "integrity": "sha512-uZS5shfxzO3rGlu0cC3bjmMFKsXv+SmZZcgp0KD22ts4uGXp5EVYGzu/0YdwZeKmddhcAccYtREJKkPfXkZuCg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3-selection": "*"
+ }
+ },
+ "node_modules/@types/d3-zoom": {
+ "version": "3.0.8",
+ "resolved": "https://registry.npmjs.org/@types/d3-zoom/-/d3-zoom-3.0.8.tgz",
+ "integrity": "sha512-iqMC4/YlFCSlO8+2Ii1GGGliCAY4XdeG748w5vQUbevlbDu0zSjH/+jojorQVBK/se0j6DUFNPBGSqD3YWYnDw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/d3-interpolate": "*",
+ "@types/d3-selection": "*"
+ }
+ },
+ "node_modules/@types/debug": {
+ "version": "4.1.12",
+ "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz",
+ "integrity": "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/ms": "*"
+ }
+ },
+ "node_modules/@types/estree": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz",
+ "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==",
+ "license": "MIT"
+ },
+ "node_modules/@types/estree-jsx": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/@types/estree-jsx/-/estree-jsx-1.0.5.tgz",
+ "integrity": "sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree": "*"
+ }
+ },
+ "node_modules/@types/geojson": {
+ "version": "7946.0.16",
+ "resolved": "https://registry.npmjs.org/@types/geojson/-/geojson-7946.0.16.tgz",
+ "integrity": "sha512-6C8nqWur3j98U6+lXDfTUWIfgvZU+EumvpHKcYjujKH7woYyLj2sUmff0tRhrqM7BohUw7Pz3ZB1jj2gW9Fvmg==",
+ "license": "MIT"
+ },
+ "node_modules/@types/hast": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz",
+ "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "*"
+ }
+ },
+ "node_modules/@types/mdast": {
+ "version": "4.0.4",
+ "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-4.0.4.tgz",
+ "integrity": "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "*"
+ }
+ },
+ "node_modules/@types/ms": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/@types/ms/-/ms-2.1.0.tgz",
+ "integrity": "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==",
+ "license": "MIT"
+ },
+ "node_modules/@types/react": {
+ "version": "19.2.7",
+ "resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.7.tgz",
+ "integrity": "sha512-MWtvHrGZLFttgeEj28VXHxpmwYbor/ATPYbBfSFZEIRK0ecCFLl2Qo55z52Hss+UV9CRN7trSeq1zbgx7YDWWg==",
+ "license": "MIT",
+ "peer": true,
+ "dependencies": {
+ "csstype": "^3.2.2"
+ }
+ },
+ "node_modules/@types/unist": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz",
+ "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==",
+ "license": "MIT"
+ },
+ "node_modules/@ungap/structured-clone": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.3.0.tgz",
+ "integrity": "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==",
+ "license": "ISC"
+ },
+ "node_modules/@vitejs/plugin-react": {
+ "version": "4.7.0",
+ "resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-4.7.0.tgz",
+ "integrity": "sha512-gUu9hwfWvvEDBBmgtAowQCojwZmJ5mcLn3aufeCsitijs3+f2NsrPtlAWIR6OPiqljl96GVCUbLe0HyqIpVaoA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/core": "^7.28.0",
+ "@babel/plugin-transform-react-jsx-self": "^7.27.1",
+ "@babel/plugin-transform-react-jsx-source": "^7.27.1",
+ "@rolldown/pluginutils": "1.0.0-beta.27",
+ "@types/babel__core": "^7.20.5",
+ "react-refresh": "^0.17.0"
+ },
+ "engines": {
+ "node": "^14.18.0 || >=16.0.0"
+ },
+ "peerDependencies": {
+ "vite": "^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0"
+ }
+ },
+ "node_modules/bail": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
+ "integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/baseline-browser-mapping": {
+ "version": "2.8.28",
+ "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.8.28.tgz",
+ "integrity": "sha512-gYjt7OIqdM0PcttNYP2aVrr2G0bMALkBaoehD4BuRGjAOtipg0b6wHg1yNL+s5zSnLZZrGHOw4IrND8CD+3oIQ==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "bin": {
+ "baseline-browser-mapping": "dist/cli.js"
+ }
+ },
+ "node_modules/browserslist": {
+ "version": "4.28.0",
+ "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.0.tgz",
+ "integrity": "sha512-tbydkR/CxfMwelN0vwdP/pLkDwyAASZ+VfWm4EOwlB6SWhx1sYnWLqo8N5j0rAzPfzfRaxt0mM/4wPU/Su84RQ==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/browserslist"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "baseline-browser-mapping": "^2.8.25",
+ "caniuse-lite": "^1.0.30001754",
+ "electron-to-chromium": "^1.5.249",
+ "node-releases": "^2.0.27",
+ "update-browserslist-db": "^1.1.4"
+ },
+ "bin": {
+ "browserslist": "cli.js"
+ },
+ "engines": {
+ "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7"
+ }
+ },
+ "node_modules/caniuse-lite": {
+ "version": "1.0.30001754",
+ "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001754.tgz",
+ "integrity": "sha512-x6OeBXueoAceOmotzx3PO4Zpt4rzpeIFsSr6AAePTZxSkXiYDUmpypEl7e2+8NCd9bD7bXjqyef8CJYPC1jfxg==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/caniuse-lite"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "CC-BY-4.0"
+ },
+ "node_modules/ccount": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/ccount/-/ccount-2.0.1.tgz",
+ "integrity": "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/character-entities": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz",
+ "integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/character-entities-html4": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/character-entities-html4/-/character-entities-html4-2.1.0.tgz",
+ "integrity": "sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/character-entities-legacy": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-3.0.0.tgz",
+ "integrity": "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/character-reference-invalid": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-2.0.1.tgz",
+ "integrity": "sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/classcat": {
+ "version": "5.0.5",
+ "resolved": "https://registry.npmjs.org/classcat/-/classcat-5.0.5.tgz",
+ "integrity": "sha512-JhZUT7JFcQy/EzW605k/ktHtncoo9vnyW/2GspNYwFlN1C/WmjuV/xtS04e9SOkL2sTdw0VAZ2UGCcQ9lR6p6w==",
+ "license": "MIT"
+ },
+ "node_modules/comma-separated-tokens": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz",
+ "integrity": "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/convert-source-map": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz",
+ "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/csstype": {
+ "version": "3.2.3",
+ "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz",
+ "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==",
+ "license": "MIT",
+ "peer": true
+ },
+ "node_modules/d3-color": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/d3-color/-/d3-color-3.1.0.tgz",
+ "integrity": "sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-dispatch": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-dispatch/-/d3-dispatch-3.0.1.tgz",
+ "integrity": "sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-drag": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/d3-drag/-/d3-drag-3.0.0.tgz",
+ "integrity": "sha512-pWbUJLdETVA8lQNJecMxoXfH6x+mO2UQo8rSmZ+QqxcbyA3hfeprFgIT//HW2nlHChWeIIMwS2Fq+gEARkhTkg==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-dispatch": "1 - 3",
+ "d3-selection": "3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-ease": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-ease/-/d3-ease-3.0.1.tgz",
+ "integrity": "sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==",
+ "license": "BSD-3-Clause",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-interpolate": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-3.0.1.tgz",
+ "integrity": "sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-color": "1 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-selection": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-3.0.0.tgz",
+ "integrity": "sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-timer": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-3.0.1.tgz",
+ "integrity": "sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==",
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/d3-transition": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/d3-transition/-/d3-transition-3.0.1.tgz",
+ "integrity": "sha512-ApKvfjsSR6tg06xrL434C0WydLr7JewBB3V+/39RMHsaXTOG0zmt/OAXeng5M5LBm0ojmxJrpomQVZ1aPvBL4w==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-color": "1 - 3",
+ "d3-dispatch": "1 - 3",
+ "d3-ease": "1 - 3",
+ "d3-interpolate": "1 - 3",
+ "d3-timer": "1 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "peerDependencies": {
+ "d3-selection": "2 - 3"
+ }
+ },
+ "node_modules/d3-zoom": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/d3-zoom/-/d3-zoom-3.0.0.tgz",
+ "integrity": "sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw==",
+ "license": "ISC",
+ "dependencies": {
+ "d3-dispatch": "1 - 3",
+ "d3-drag": "2 - 3",
+ "d3-interpolate": "1 - 3",
+ "d3-selection": "2 - 3",
+ "d3-transition": "2 - 3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/debug": {
+ "version": "4.4.3",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz",
+ "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==",
+ "license": "MIT",
+ "dependencies": {
+ "ms": "^2.1.3"
+ },
+ "engines": {
+ "node": ">=6.0"
+ },
+ "peerDependenciesMeta": {
+ "supports-color": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/decode-named-character-reference": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.2.0.tgz",
+ "integrity": "sha512-c6fcElNV6ShtZXmsgNgFFV5tVX2PaV4g+MOAkb8eXHvn6sryJBrZa9r0zV6+dtTyoCKxtDy5tyQ5ZwQuidtd+Q==",
+ "license": "MIT",
+ "dependencies": {
+ "character-entities": "^2.0.0"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/dequal": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz",
+ "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/devlop": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/devlop/-/devlop-1.1.0.tgz",
+ "integrity": "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==",
+ "license": "MIT",
+ "dependencies": {
+ "dequal": "^2.0.0"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/electron-to-chromium": {
+ "version": "1.5.252",
+ "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.252.tgz",
+ "integrity": "sha512-53uTpjtRgS7gjIxZ4qCgFdNO2q+wJt/Z8+xAvxbCqXPJrY6h7ighUkadQmNMXH96crtpa6gPFNP7BF4UBGDuaA==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/esbuild": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.21.5.tgz",
+ "integrity": "sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==",
+ "dev": true,
+ "hasInstallScript": true,
+ "license": "MIT",
+ "bin": {
+ "esbuild": "bin/esbuild"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "optionalDependencies": {
+ "@esbuild/aix-ppc64": "0.21.5",
+ "@esbuild/android-arm": "0.21.5",
+ "@esbuild/android-arm64": "0.21.5",
+ "@esbuild/android-x64": "0.21.5",
+ "@esbuild/darwin-arm64": "0.21.5",
+ "@esbuild/darwin-x64": "0.21.5",
+ "@esbuild/freebsd-arm64": "0.21.5",
+ "@esbuild/freebsd-x64": "0.21.5",
+ "@esbuild/linux-arm": "0.21.5",
+ "@esbuild/linux-arm64": "0.21.5",
+ "@esbuild/linux-ia32": "0.21.5",
+ "@esbuild/linux-loong64": "0.21.5",
+ "@esbuild/linux-mips64el": "0.21.5",
+ "@esbuild/linux-ppc64": "0.21.5",
+ "@esbuild/linux-riscv64": "0.21.5",
+ "@esbuild/linux-s390x": "0.21.5",
+ "@esbuild/linux-x64": "0.21.5",
+ "@esbuild/netbsd-x64": "0.21.5",
+ "@esbuild/openbsd-x64": "0.21.5",
+ "@esbuild/sunos-x64": "0.21.5",
+ "@esbuild/win32-arm64": "0.21.5",
+ "@esbuild/win32-ia32": "0.21.5",
+ "@esbuild/win32-x64": "0.21.5"
+ }
+ },
+ "node_modules/escalade": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz",
+ "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/estree-util-is-identifier-name": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/estree-util-is-identifier-name/-/estree-util-is-identifier-name-3.0.0.tgz",
+ "integrity": "sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg==",
+ "license": "MIT",
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/extend": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz",
+ "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==",
+ "license": "MIT"
+ },
+ "node_modules/fsevents": {
+ "version": "2.3.3",
+ "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz",
+ "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==",
+ "dev": true,
+ "hasInstallScript": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": "^8.16.0 || ^10.6.0 || >=11.0.0"
+ }
+ },
+ "node_modules/gensync": {
+ "version": "1.0.0-beta.2",
+ "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz",
+ "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/hast-util-to-jsx-runtime": {
+ "version": "2.3.6",
+ "resolved": "https://registry.npmjs.org/hast-util-to-jsx-runtime/-/hast-util-to-jsx-runtime-2.3.6.tgz",
+ "integrity": "sha512-zl6s8LwNyo1P9uw+XJGvZtdFF1GdAkOg8ujOw+4Pyb76874fLps4ueHXDhXWdk6YHQ6OgUtinliG7RsYvCbbBg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree": "^1.0.0",
+ "@types/hast": "^3.0.0",
+ "@types/unist": "^3.0.0",
+ "comma-separated-tokens": "^2.0.0",
+ "devlop": "^1.0.0",
+ "estree-util-is-identifier-name": "^3.0.0",
+ "hast-util-whitespace": "^3.0.0",
+ "mdast-util-mdx-expression": "^2.0.0",
+ "mdast-util-mdx-jsx": "^3.0.0",
+ "mdast-util-mdxjs-esm": "^2.0.0",
+ "property-information": "^7.0.0",
+ "space-separated-tokens": "^2.0.0",
+ "style-to-js": "^1.0.0",
+ "unist-util-position": "^5.0.0",
+ "vfile-message": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hast-util-whitespace": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/hast-util-whitespace/-/hast-util-whitespace-3.0.0.tgz",
+ "integrity": "sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/hast": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/html-url-attributes": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/html-url-attributes/-/html-url-attributes-3.0.1.tgz",
+ "integrity": "sha512-ol6UPyBWqsrO6EJySPz2O7ZSr856WDrEzM5zMqp+FJJLGMW35cLYmmZnl0vztAZxRUoNZJFTCohfjuIJ8I4QBQ==",
+ "license": "MIT",
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/inline-style-parser": {
+ "version": "0.2.7",
+ "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.2.7.tgz",
+ "integrity": "sha512-Nb2ctOyNR8DqQoR0OwRG95uNWIC0C1lCgf5Naz5H6Ji72KZ8OcFZLz2P5sNgwlyoJ8Yif11oMuYs5pBQa86csA==",
+ "license": "MIT"
+ },
+ "node_modules/is-alphabetical": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-2.0.1.tgz",
+ "integrity": "sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/is-alphanumerical": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-2.0.1.tgz",
+ "integrity": "sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==",
+ "license": "MIT",
+ "dependencies": {
+ "is-alphabetical": "^2.0.0",
+ "is-decimal": "^2.0.0"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/is-decimal": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-2.0.1.tgz",
+ "integrity": "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/is-hexadecimal": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-2.0.1.tgz",
+ "integrity": "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/is-plain-obj": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
+ "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/js-tokens": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
+ "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==",
+ "license": "MIT"
+ },
+ "node_modules/jsesc": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz",
+ "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==",
+ "dev": true,
+ "license": "MIT",
+ "bin": {
+ "jsesc": "bin/jsesc"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/json5": {
+ "version": "2.2.3",
+ "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz",
+ "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==",
+ "dev": true,
+ "license": "MIT",
+ "bin": {
+ "json5": "lib/cli.js"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/longest-streak": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz",
+ "integrity": "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/loose-envify": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz",
+ "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==",
+ "license": "MIT",
+ "dependencies": {
+ "js-tokens": "^3.0.0 || ^4.0.0"
+ },
+ "bin": {
+ "loose-envify": "cli.js"
+ }
+ },
+ "node_modules/lru-cache": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz",
+ "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "yallist": "^3.0.2"
+ }
+ },
+ "node_modules/mdast-util-from-markdown": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.2.tgz",
+ "integrity": "sha512-uZhTV/8NBuw0WHkPTrCqDOl0zVe1BIng5ZtHoDk49ME1qqcjYmmLmOf0gELgcRMxN4w2iuIeVso5/6QymSrgmA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "@types/unist": "^3.0.0",
+ "decode-named-character-reference": "^1.0.0",
+ "devlop": "^1.0.0",
+ "mdast-util-to-string": "^4.0.0",
+ "micromark": "^4.0.0",
+ "micromark-util-decode-numeric-character-reference": "^2.0.0",
+ "micromark-util-decode-string": "^2.0.0",
+ "micromark-util-normalize-identifier": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0",
+ "unist-util-stringify-position": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-mdx-expression": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/mdast-util-mdx-expression/-/mdast-util-mdx-expression-2.0.1.tgz",
+ "integrity": "sha512-J6f+9hUp+ldTZqKRSg7Vw5V6MqjATc+3E4gf3CFNcuZNWD8XdyI6zQ8GqH7f8169MM6P7hMBRDVGnn7oHB9kXQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree-jsx": "^1.0.0",
+ "@types/hast": "^3.0.0",
+ "@types/mdast": "^4.0.0",
+ "devlop": "^1.0.0",
+ "mdast-util-from-markdown": "^2.0.0",
+ "mdast-util-to-markdown": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-mdx-jsx": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.2.0.tgz",
+ "integrity": "sha512-lj/z8v0r6ZtsN/cGNNtemmmfoLAFZnjMbNyLzBafjzikOM+glrjNHPlf6lQDOTccj9n5b0PPihEBbhneMyGs1Q==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree-jsx": "^1.0.0",
+ "@types/hast": "^3.0.0",
+ "@types/mdast": "^4.0.0",
+ "@types/unist": "^3.0.0",
+ "ccount": "^2.0.0",
+ "devlop": "^1.1.0",
+ "mdast-util-from-markdown": "^2.0.0",
+ "mdast-util-to-markdown": "^2.0.0",
+ "parse-entities": "^4.0.0",
+ "stringify-entities": "^4.0.0",
+ "unist-util-stringify-position": "^4.0.0",
+ "vfile-message": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-mdxjs-esm": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/mdast-util-mdxjs-esm/-/mdast-util-mdxjs-esm-2.0.1.tgz",
+ "integrity": "sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree-jsx": "^1.0.0",
+ "@types/hast": "^3.0.0",
+ "@types/mdast": "^4.0.0",
+ "devlop": "^1.0.0",
+ "mdast-util-from-markdown": "^2.0.0",
+ "mdast-util-to-markdown": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-phrasing": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-phrasing/-/mdast-util-phrasing-4.1.0.tgz",
+ "integrity": "sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "unist-util-is": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-to-hast": {
+ "version": "13.2.1",
+ "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.2.1.tgz",
+ "integrity": "sha512-cctsq2wp5vTsLIcaymblUriiTcZd0CwWtCbLvrOzYCDZoWyMNV8sZ7krj09FSnsiJi3WVsHLM4k6Dq/yaPyCXA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "@types/mdast": "^4.0.0",
+ "@ungap/structured-clone": "^1.0.0",
+ "devlop": "^1.0.0",
+ "micromark-util-sanitize-uri": "^2.0.0",
+ "trim-lines": "^3.0.0",
+ "unist-util-position": "^5.0.0",
+ "unist-util-visit": "^5.0.0",
+ "vfile": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-to-markdown": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-2.1.2.tgz",
+ "integrity": "sha512-xj68wMTvGXVOKonmog6LwyJKrYXZPvlwabaryTjLh9LuvovB/KAH+kvi8Gjj+7rJjsFi23nkUxRQv1KqSroMqA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "@types/unist": "^3.0.0",
+ "longest-streak": "^3.0.0",
+ "mdast-util-phrasing": "^4.0.0",
+ "mdast-util-to-string": "^4.0.0",
+ "micromark-util-classify-character": "^2.0.0",
+ "micromark-util-decode-string": "^2.0.0",
+ "unist-util-visit": "^5.0.0",
+ "zwitch": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-to-string": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-4.0.0.tgz",
+ "integrity": "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/micromark/-/micromark-4.0.2.tgz",
+ "integrity": "sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "@types/debug": "^4.0.0",
+ "debug": "^4.0.0",
+ "decode-named-character-reference": "^1.0.0",
+ "devlop": "^1.0.0",
+ "micromark-core-commonmark": "^2.0.0",
+ "micromark-factory-space": "^2.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-chunked": "^2.0.0",
+ "micromark-util-combine-extensions": "^2.0.0",
+ "micromark-util-decode-numeric-character-reference": "^2.0.0",
+ "micromark-util-encode": "^2.0.0",
+ "micromark-util-normalize-identifier": "^2.0.0",
+ "micromark-util-resolve-all": "^2.0.0",
+ "micromark-util-sanitize-uri": "^2.0.0",
+ "micromark-util-subtokenize": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-core-commonmark": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-2.0.3.tgz",
+ "integrity": "sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "decode-named-character-reference": "^1.0.0",
+ "devlop": "^1.0.0",
+ "micromark-factory-destination": "^2.0.0",
+ "micromark-factory-label": "^2.0.0",
+ "micromark-factory-space": "^2.0.0",
+ "micromark-factory-title": "^2.0.0",
+ "micromark-factory-whitespace": "^2.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-chunked": "^2.0.0",
+ "micromark-util-classify-character": "^2.0.0",
+ "micromark-util-html-tag-name": "^2.0.0",
+ "micromark-util-normalize-identifier": "^2.0.0",
+ "micromark-util-resolve-all": "^2.0.0",
+ "micromark-util-subtokenize": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-factory-destination": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-2.0.1.tgz",
+ "integrity": "sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-factory-label": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-2.0.1.tgz",
+ "integrity": "sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "devlop": "^1.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-factory-space": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz",
+ "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-factory-title": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-2.0.1.tgz",
+ "integrity": "sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-factory-space": "^2.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-factory-whitespace": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-2.0.1.tgz",
+ "integrity": "sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-factory-space": "^2.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-character": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz",
+ "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-chunked": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-2.0.1.tgz",
+ "integrity": "sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-symbol": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-classify-character": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-2.0.1.tgz",
+ "integrity": "sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-combine-extensions": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-2.0.1.tgz",
+ "integrity": "sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-chunked": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-decode-numeric-character-reference": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-2.0.2.tgz",
+ "integrity": "sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-symbol": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-decode-string": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-decode-string/-/micromark-util-decode-string-2.0.1.tgz",
+ "integrity": "sha512-nDV/77Fj6eH1ynwscYTOsbK7rR//Uj0bZXBwJZRfaLEJ1iGBR6kIfNmlNqaqJf649EP0F3NWNdeJi03elllNUQ==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "decode-named-character-reference": "^1.0.0",
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-decode-numeric-character-reference": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-encode": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-2.0.1.tgz",
+ "integrity": "sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT"
+ },
+ "node_modules/micromark-util-html-tag-name": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-2.0.1.tgz",
+ "integrity": "sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT"
+ },
+ "node_modules/micromark-util-normalize-identifier": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-2.0.1.tgz",
+ "integrity": "sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-symbol": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-resolve-all": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-2.0.1.tgz",
+ "integrity": "sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-sanitize-uri": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-2.0.1.tgz",
+ "integrity": "sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "micromark-util-character": "^2.0.0",
+ "micromark-util-encode": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-subtokenize": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-2.1.0.tgz",
+ "integrity": "sha512-XQLu552iSctvnEcgXw6+Sx75GflAPNED1qx7eBJ+wydBb2KCbRZe+NwvIEEMM83uml1+2WSXpBAcp9IUCgCYWA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "devlop": "^1.0.0",
+ "micromark-util-chunked": "^2.0.0",
+ "micromark-util-symbol": "^2.0.0",
+ "micromark-util-types": "^2.0.0"
+ }
+ },
+ "node_modules/micromark-util-symbol": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz",
+ "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT"
+ },
+ "node_modules/micromark-util-types": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-2.0.2.tgz",
+ "integrity": "sha512-Yw0ECSpJoViF1qTU4DC6NwtC4aWGt1EkzaQB8KPPyCRR8z9TWeV0HbEFGTO+ZY1wB22zmxnJqhPyTpOVCpeHTA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "license": "MIT"
+ },
+ "node_modules/ms": {
+ "version": "2.1.3",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
+ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
+ "license": "MIT"
+ },
+ "node_modules/nanoid": {
+ "version": "3.3.11",
+ "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz",
+ "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "bin": {
+ "nanoid": "bin/nanoid.cjs"
+ },
+ "engines": {
+ "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1"
+ }
+ },
+ "node_modules/node-releases": {
+ "version": "2.0.27",
+ "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.27.tgz",
+ "integrity": "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/parse-entities": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-4.0.2.tgz",
+ "integrity": "sha512-GG2AQYWoLgL877gQIKeRPGO1xF9+eG1ujIb5soS5gPvLQ1y2o8FL90w2QWNdf9I361Mpp7726c+lj3U0qK1uGw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "character-entities-legacy": "^3.0.0",
+ "character-reference-invalid": "^2.0.0",
+ "decode-named-character-reference": "^1.0.0",
+ "is-alphanumerical": "^2.0.0",
+ "is-decimal": "^2.0.0",
+ "is-hexadecimal": "^2.0.0"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/parse-entities/node_modules/@types/unist": {
+ "version": "2.0.11",
+ "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz",
+ "integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==",
+ "license": "MIT"
+ },
+ "node_modules/picocolors": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz",
+ "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/postcss": {
+ "version": "8.5.6",
+ "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz",
+ "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/postcss/"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/postcss"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "nanoid": "^3.3.11",
+ "picocolors": "^1.1.1",
+ "source-map-js": "^1.2.1"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14"
+ }
+ },
+ "node_modules/property-information": {
+ "version": "7.1.0",
+ "resolved": "https://registry.npmjs.org/property-information/-/property-information-7.1.0.tgz",
+ "integrity": "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/react": {
+ "version": "18.3.1",
+ "resolved": "https://registry.npmjs.org/react/-/react-18.3.1.tgz",
+ "integrity": "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==",
+ "license": "MIT",
+ "dependencies": {
+ "loose-envify": "^1.1.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/react-dom": {
+ "version": "18.3.1",
+ "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz",
+ "integrity": "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==",
+ "license": "MIT",
+ "dependencies": {
+ "loose-envify": "^1.1.0",
+ "scheduler": "^0.23.2"
+ },
+ "peerDependencies": {
+ "react": "^18.3.1"
+ }
+ },
+ "node_modules/react-markdown": {
+ "version": "10.1.0",
+ "resolved": "https://registry.npmjs.org/react-markdown/-/react-markdown-10.1.0.tgz",
+ "integrity": "sha512-qKxVopLT/TyA6BX3Ue5NwabOsAzm0Q7kAPwq6L+wWDwisYs7R8vZ0nRXqq6rkueboxpkjvLGU9fWifiX/ZZFxQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "@types/mdast": "^4.0.0",
+ "devlop": "^1.0.0",
+ "hast-util-to-jsx-runtime": "^2.0.0",
+ "html-url-attributes": "^3.0.0",
+ "mdast-util-to-hast": "^13.0.0",
+ "remark-parse": "^11.0.0",
+ "remark-rehype": "^11.0.0",
+ "unified": "^11.0.0",
+ "unist-util-visit": "^5.0.0",
+ "vfile": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ },
+ "peerDependencies": {
+ "@types/react": ">=18",
+ "react": ">=18"
+ }
+ },
+ "node_modules/react-refresh": {
+ "version": "0.17.0",
+ "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.17.0.tgz",
+ "integrity": "sha512-z6F7K9bV85EfseRCp2bzrpyQ0Gkw1uLoCel9XBVWPg/TjRj94SkJzUTGfOa4bs7iJvBWtQG0Wq7wnI0syw3EBQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/reactflow": {
+ "version": "11.11.4",
+ "resolved": "https://registry.npmjs.org/reactflow/-/reactflow-11.11.4.tgz",
+ "integrity": "sha512-70FOtJkUWH3BAOsN+LU9lCrKoKbtOPnz2uq0CV2PLdNSwxTXOhCbsZr50GmZ+Rtw3jx8Uv7/vBFtCGixLfd4Og==",
+ "license": "MIT",
+ "dependencies": {
+ "@reactflow/background": "11.3.14",
+ "@reactflow/controls": "11.2.14",
+ "@reactflow/core": "11.11.4",
+ "@reactflow/minimap": "11.7.14",
+ "@reactflow/node-resizer": "2.2.14",
+ "@reactflow/node-toolbar": "1.3.14"
+ },
+ "peerDependencies": {
+ "react": ">=17",
+ "react-dom": ">=17"
+ }
+ },
+ "node_modules/remark-parse": {
+ "version": "11.0.0",
+ "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-11.0.0.tgz",
+ "integrity": "sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^4.0.0",
+ "mdast-util-from-markdown": "^2.0.0",
+ "micromark-util-types": "^2.0.0",
+ "unified": "^11.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/remark-rehype": {
+ "version": "11.1.2",
+ "resolved": "https://registry.npmjs.org/remark-rehype/-/remark-rehype-11.1.2.tgz",
+ "integrity": "sha512-Dh7l57ianaEoIpzbp0PC9UKAdCSVklD8E5Rpw7ETfbTl3FqcOOgq5q2LVDhgGCkaBv7p24JXikPdvhhmHvKMsw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/hast": "^3.0.0",
+ "@types/mdast": "^4.0.0",
+ "mdast-util-to-hast": "^13.0.0",
+ "unified": "^11.0.0",
+ "vfile": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/rollup": {
+ "version": "4.53.2",
+ "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.53.2.tgz",
+ "integrity": "sha512-MHngMYwGJVi6Fmnk6ISmnk7JAHRNF0UkuucA0CUW3N3a4KnONPEZz+vUanQP/ZC/iY1Qkf3bwPWzyY84wEks1g==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree": "1.0.8"
+ },
+ "bin": {
+ "rollup": "dist/bin/rollup"
+ },
+ "engines": {
+ "node": ">=18.0.0",
+ "npm": ">=8.0.0"
+ },
+ "optionalDependencies": {
+ "@rollup/rollup-android-arm-eabi": "4.53.2",
+ "@rollup/rollup-android-arm64": "4.53.2",
+ "@rollup/rollup-darwin-arm64": "4.53.2",
+ "@rollup/rollup-darwin-x64": "4.53.2",
+ "@rollup/rollup-freebsd-arm64": "4.53.2",
+ "@rollup/rollup-freebsd-x64": "4.53.2",
+ "@rollup/rollup-linux-arm-gnueabihf": "4.53.2",
+ "@rollup/rollup-linux-arm-musleabihf": "4.53.2",
+ "@rollup/rollup-linux-arm64-gnu": "4.53.2",
+ "@rollup/rollup-linux-arm64-musl": "4.53.2",
+ "@rollup/rollup-linux-loong64-gnu": "4.53.2",
+ "@rollup/rollup-linux-ppc64-gnu": "4.53.2",
+ "@rollup/rollup-linux-riscv64-gnu": "4.53.2",
+ "@rollup/rollup-linux-riscv64-musl": "4.53.2",
+ "@rollup/rollup-linux-s390x-gnu": "4.53.2",
+ "@rollup/rollup-linux-x64-gnu": "4.53.2",
+ "@rollup/rollup-linux-x64-musl": "4.53.2",
+ "@rollup/rollup-openharmony-arm64": "4.53.2",
+ "@rollup/rollup-win32-arm64-msvc": "4.53.2",
+ "@rollup/rollup-win32-ia32-msvc": "4.53.2",
+ "@rollup/rollup-win32-x64-gnu": "4.53.2",
+ "@rollup/rollup-win32-x64-msvc": "4.53.2",
+ "fsevents": "~2.3.2"
+ }
+ },
+ "node_modules/scheduler": {
+ "version": "0.23.2",
+ "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.2.tgz",
+ "integrity": "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==",
+ "license": "MIT",
+ "dependencies": {
+ "loose-envify": "^1.1.0"
+ }
+ },
+ "node_modules/semver": {
+ "version": "6.3.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
+ "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
+ "dev": true,
+ "license": "ISC",
+ "bin": {
+ "semver": "bin/semver.js"
+ }
+ },
+ "node_modules/source-map-js": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz",
+ "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==",
+ "dev": true,
+ "license": "BSD-3-Clause",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/space-separated-tokens": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz",
+ "integrity": "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/stringify-entities": {
+ "version": "4.0.4",
+ "resolved": "https://registry.npmjs.org/stringify-entities/-/stringify-entities-4.0.4.tgz",
+ "integrity": "sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==",
+ "license": "MIT",
+ "dependencies": {
+ "character-entities-html4": "^2.0.0",
+ "character-entities-legacy": "^3.0.0"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/style-to-js": {
+ "version": "1.1.21",
+ "resolved": "https://registry.npmjs.org/style-to-js/-/style-to-js-1.1.21.tgz",
+ "integrity": "sha512-RjQetxJrrUJLQPHbLku6U/ocGtzyjbJMP9lCNK7Ag0CNh690nSH8woqWH9u16nMjYBAok+i7JO1NP2pOy8IsPQ==",
+ "license": "MIT",
+ "dependencies": {
+ "style-to-object": "1.0.14"
+ }
+ },
+ "node_modules/style-to-object": {
+ "version": "1.0.14",
+ "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-1.0.14.tgz",
+ "integrity": "sha512-LIN7rULI0jBscWQYaSswptyderlarFkjQ+t79nzty8tcIAceVomEVlLzH5VP4Cmsv6MtKhs7qaAiwlcp+Mgaxw==",
+ "license": "MIT",
+ "dependencies": {
+ "inline-style-parser": "0.2.7"
+ }
+ },
+ "node_modules/trim-lines": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/trim-lines/-/trim-lines-3.0.1.tgz",
+ "integrity": "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/trough": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/trough/-/trough-2.2.0.tgz",
+ "integrity": "sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/unified": {
+ "version": "11.0.5",
+ "resolved": "https://registry.npmjs.org/unified/-/unified-11.0.5.tgz",
+ "integrity": "sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "bail": "^2.0.0",
+ "devlop": "^1.0.0",
+ "extend": "^3.0.0",
+ "is-plain-obj": "^4.0.0",
+ "trough": "^2.0.0",
+ "vfile": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-is": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.1.tgz",
+ "integrity": "sha512-LsiILbtBETkDz8I9p1dQ0uyRUWuaQzd/cuEeS1hoRSyW5E5XGmTzlwY1OrNzzakGowI9Dr/I8HVaw4hTtnxy8g==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-position": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-5.0.0.tgz",
+ "integrity": "sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-stringify-position": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz",
+ "integrity": "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-visit": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-5.0.0.tgz",
+ "integrity": "sha512-MR04uvD+07cwl/yhVuVWAtw+3GOR/knlL55Nd/wAdblk27GCVt3lqpTivy/tkJcZoNPzTwS1Y+KMojlLDhoTzg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "unist-util-is": "^6.0.0",
+ "unist-util-visit-parents": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-visit-parents": {
+ "version": "6.0.2",
+ "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-6.0.2.tgz",
+ "integrity": "sha512-goh1s1TBrqSqukSc8wrjwWhL0hiJxgA8m4kFxGlQ+8FYQ3C/m11FcTs4YYem7V664AhHVvgoQLk890Ssdsr2IQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "unist-util-is": "^6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/update-browserslist-db": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.4.tgz",
+ "integrity": "sha512-q0SPT4xyU84saUX+tomz1WLkxUbuaJnR1xWt17M7fJtEJigJeWUNGUqrauFXsHnqev9y9JTRGwk13tFBuKby4A==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/browserslist"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "escalade": "^3.2.0",
+ "picocolors": "^1.1.1"
+ },
+ "bin": {
+ "update-browserslist-db": "cli.js"
+ },
+ "peerDependencies": {
+ "browserslist": ">= 4.21.0"
+ }
+ },
+ "node_modules/use-sync-external-store": {
+ "version": "1.6.0",
+ "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.6.0.tgz",
+ "integrity": "sha512-Pp6GSwGP/NrPIrxVFAIkOQeyw8lFenOHijQWkUTrDvrF4ALqylP2C/KCkeS9dpUM3KvYRQhna5vt7IL95+ZQ9w==",
+ "license": "MIT",
+ "peerDependencies": {
+ "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0"
+ }
+ },
+ "node_modules/vfile": {
+ "version": "6.0.3",
+ "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.3.tgz",
+ "integrity": "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "vfile-message": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/vfile-message": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-4.0.3.tgz",
+ "integrity": "sha512-QTHzsGd1EhbZs4AsQ20JX1rC3cOlt/IWJruk893DfLRr57lcnOeMaWG4K0JrRta4mIJZKth2Au3mM3u03/JWKw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^3.0.0",
+ "unist-util-stringify-position": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/vite": {
+ "version": "5.4.21",
+ "resolved": "https://registry.npmjs.org/vite/-/vite-5.4.21.tgz",
+ "integrity": "sha512-o5a9xKjbtuhY6Bi5S3+HvbRERmouabWbyUcpXXUA1u+GNUKoROi9byOJ8M0nHbHYHkYICiMlqxkg1KkYmm25Sw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "esbuild": "^0.21.3",
+ "postcss": "^8.4.43",
+ "rollup": "^4.20.0"
+ },
+ "bin": {
+ "vite": "bin/vite.js"
+ },
+ "engines": {
+ "node": "^18.0.0 || >=20.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/vitejs/vite?sponsor=1"
+ },
+ "optionalDependencies": {
+ "fsevents": "~2.3.3"
+ },
+ "peerDependencies": {
+ "@types/node": "^18.0.0 || >=20.0.0",
+ "less": "*",
+ "lightningcss": "^1.21.0",
+ "sass": "*",
+ "sass-embedded": "*",
+ "stylus": "*",
+ "sugarss": "*",
+ "terser": "^5.4.0"
+ },
+ "peerDependenciesMeta": {
+ "@types/node": {
+ "optional": true
+ },
+ "less": {
+ "optional": true
+ },
+ "lightningcss": {
+ "optional": true
+ },
+ "sass": {
+ "optional": true
+ },
+ "sass-embedded": {
+ "optional": true
+ },
+ "stylus": {
+ "optional": true
+ },
+ "sugarss": {
+ "optional": true
+ },
+ "terser": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/yallist": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz",
+ "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/zustand": {
+ "version": "4.5.7",
+ "resolved": "https://registry.npmjs.org/zustand/-/zustand-4.5.7.tgz",
+ "integrity": "sha512-CHOUy7mu3lbD6o6LJLfllpjkzhHXSBlX8B9+qPddUsIfeF5S/UZ5q0kmCsnRqT1UHFQZchNFDDzMbQsuesHWlw==",
+ "license": "MIT",
+ "dependencies": {
+ "use-sync-external-store": "^1.2.2"
+ },
+ "engines": {
+ "node": ">=12.7.0"
+ },
+ "peerDependencies": {
+ "@types/react": ">=16.8",
+ "immer": ">=9.0.6",
+ "react": ">=16.8"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ },
+ "immer": {
+ "optional": true
+ },
+ "react": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/zwitch": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-2.0.4.tgz",
+ "integrity": "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ }
+ }
+}
diff --git a/frontend/package.json b/frontend/package.json
new file mode 100644
index 0000000000000000000000000000000000000000..5ffdd0f207fd4f5ecabbcb943c1610e75dde4f59
--- /dev/null
+++ b/frontend/package.json
@@ -0,0 +1,21 @@
+{
+ "name": "gitpilot-frontend",
+ "version": "0.2.6",
+ "private": true,
+ "scripts": {
+ "dev": "vite --host",
+ "build": "vite build",
+ "vercel-build": "vite build",
+ "preview": "vite preview"
+ },
+ "dependencies": {
+ "react": "^18.2.0",
+ "react-dom": "^18.2.0",
+ "react-markdown": "^10.1.0",
+ "reactflow": "^11.11.4"
+ },
+ "devDependencies": {
+ "@vitejs/plugin-react": "^4.0.0",
+ "vite": "^5.0.0"
+ }
+}
diff --git a/frontend/styles.css b/frontend/styles.css
new file mode 100644
index 0000000000000000000000000000000000000000..03c11e9bbc0b2582514768240e5b69ca176ae051
--- /dev/null
+++ b/frontend/styles.css
@@ -0,0 +1,3301 @@
+:root {
+ color-scheme: dark;
+ font-family: system-ui, -apple-system, BlinkMacSystemFont, "SF Pro Text",
+ sans-serif;
+ background: #050608;
+ color: #f5f5f7;
+}
+
+*,
+*::before,
+*::after {
+ box-sizing: border-box;
+}
+
+body {
+ margin: 0;
+ overflow: hidden;
+}
+
+/* Custom scrollbar styling - Claude Code style */
+::-webkit-scrollbar {
+ width: 8px;
+ height: 8px;
+}
+
+::-webkit-scrollbar-track {
+ background: transparent;
+}
+
+::-webkit-scrollbar-thumb {
+ background: #272832;
+ border-radius: 4px;
+ transition: background 0.2s ease;
+}
+
+::-webkit-scrollbar-thumb:hover {
+ background: #3a3b4d;
+}
+
+/* App Root - Fixed height with footer accommodation */
+.app-root {
+ display: flex;
+ flex-direction: column;
+ height: 100vh;
+ background: radial-gradient(circle at top, #171823 0, #050608 55%);
+ color: #f5f5f7;
+ overflow: hidden;
+}
+
+/* Main content wrapper (sidebar + workspace) */
+.main-wrapper {
+ display: flex;
+ flex: 1;
+ min-height: 0;
+ overflow: hidden;
+}
+
+/* Sidebar */
+.sidebar {
+ width: 320px;
+ min-width: 320px;
+ padding: 16px 14px;
+ border-right: 1px solid #272832;
+ background: linear-gradient(180deg, #101117 0, #050608 100%);
+ display: flex;
+ flex-direction: column;
+ gap: 16px;
+ overflow-y: auto;
+ overflow-x: hidden;
+ transition: width 0.25s cubic-bezier(0.4, 0, 0.2, 1),
+ min-width 0.25s cubic-bezier(0.4, 0, 0.2, 1),
+ padding 0.25s cubic-bezier(0.4, 0, 0.2, 1);
+}
+
+.sidebar--collapsed {
+ width: 52px;
+ min-width: 52px;
+ padding: 16px 8px;
+ gap: 8px;
+}
+
+/* ---- Sidebar top row: logo + toggle ---- */
+.sidebar-top-row {
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+ gap: 6px;
+ min-height: 32px;
+}
+
+.sidebar--collapsed .sidebar-top-row {
+ justify-content: center;
+}
+
+.sidebar-toggle-btn {
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ width: 28px;
+ height: 28px;
+ border-radius: 6px;
+ border: none;
+ background: transparent;
+ color: #6b6d82;
+ cursor: pointer;
+ transition: background 0.15s, color 0.15s;
+ flex-shrink: 0;
+}
+
+.sidebar-toggle-btn:hover {
+ background: #1e1f2e;
+ color: #e0e1eb;
+}
+
+.sidebar--collapsed .sidebar-toggle-btn {
+ display: none;
+}
+
+/* ---- Nav buttons: icon + label layout ---- */
+.sidebar .nav-btn {
+ display: flex;
+ align-items: center;
+ gap: 10px;
+}
+
+.sidebar .nav-btn svg {
+ flex-shrink: 0;
+}
+
+.sidebar--collapsed .nav-btn {
+ justify-content: center;
+ padding: 8px;
+}
+
+/* ---- User profile in collapsed state ---- */
+.sidebar--collapsed .user-profile {
+ align-items: center;
+}
+
+.sidebar--collapsed .user-avatar {
+ width: 28px;
+ height: 28px;
+}
+
+/* User Profile Section */
+.user-profile {
+ margin-top: auto;
+ padding-top: 16px;
+ border-top: 1px solid #272832;
+ display: flex;
+ flex-direction: column;
+ gap: 12px;
+ animation: fadeIn 0.3s ease;
+}
+
+.user-profile-header {
+ display: flex;
+ align-items: center;
+ gap: 10px;
+}
+
+.user-avatar {
+ width: 40px;
+ height: 40px;
+ border-radius: 10px;
+ border: 2px solid #272832;
+ transition: all 0.2s ease;
+}
+
+.user-avatar:hover {
+ border-color: #ff7a3c;
+ transform: scale(1.05);
+}
+
+.user-info {
+ flex: 1;
+ min-width: 0;
+}
+
+.user-name {
+ font-size: 13px;
+ font-weight: 600;
+ color: #f5f5f7;
+ white-space: nowrap;
+ overflow: hidden;
+ text-overflow: ellipsis;
+}
+
+.user-login {
+ font-size: 11px;
+ color: #9a9bb0;
+ white-space: nowrap;
+ overflow: hidden;
+ text-overflow: ellipsis;
+}
+
+.btn-logout {
+ border: none;
+ outline: none;
+ background: #1a1b26;
+ color: #c3c5dd;
+ border-radius: 8px;
+ padding: 8px 12px;
+ font-size: 12px;
+ font-weight: 500;
+ cursor: pointer;
+ transition: all 0.2s ease;
+ border: 1px solid #272832;
+}
+
+.btn-logout:hover {
+ background: #2a2b3c;
+ border-color: #ff7a3c;
+ color: #ff7a3c;
+ transform: translateY(-1px);
+}
+
+.logo-row {
+ display: flex;
+ align-items: center;
+ gap: 10px;
+ animation: fadeIn 0.3s ease;
+}
+
+@keyframes fadeIn {
+ from {
+ opacity: 0;
+ transform: translateY(-10px);
+ }
+ to {
+ opacity: 1;
+ transform: translateY(0);
+ }
+}
+
+.logo-square {
+ width: 32px;
+ height: 32px;
+ border-radius: 8px;
+ background: #ff7a3c;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ font-weight: 700;
+ color: #050608;
+ transition: transform 0.2s ease;
+}
+
+.logo-square:hover {
+ transform: scale(1.05);
+}
+
+.logo-title {
+ font-size: 16px;
+ font-weight: 600;
+}
+
+.logo-subtitle {
+ font-size: 12px;
+ color: #a1a2b3;
+}
+
+/* Active context card */
+.sidebar-context-card {
+ padding: 10px 12px;
+ border-radius: 10px;
+ background: #151622;
+ border: 1px solid #272832;
+ display: flex;
+ flex-direction: column;
+ gap: 6px;
+ animation: slideIn 0.3s ease;
+}
+
+.sidebar-context-header {
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+}
+
+.sidebar-context-close {
+ width: 22px;
+ height: 22px;
+ border-radius: 4px;
+ border: none;
+ background: transparent;
+ color: #71717a;
+ cursor: pointer;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ padding: 0;
+ transition: all 0.15s ease;
+}
+
+.sidebar-context-close:hover {
+ background: #272832;
+ color: #f5f5f7;
+}
+
+.sidebar-section-label {
+ font-size: 10px;
+ font-weight: 700;
+ letter-spacing: 0.08em;
+ color: #71717a;
+ text-transform: uppercase;
+}
+
+.sidebar-context-body {
+ display: flex;
+ flex-direction: column;
+ gap: 2px;
+}
+
+.sidebar-context-repo {
+ font-size: 13px;
+ font-weight: 600;
+ color: #f5f5f7;
+ white-space: nowrap;
+ overflow: hidden;
+ text-overflow: ellipsis;
+}
+
+.sidebar-context-meta {
+ font-size: 11px;
+ color: #9a9bb0;
+ display: flex;
+ align-items: center;
+ gap: 6px;
+}
+
+.sidebar-context-dot {
+ width: 3px;
+ height: 3px;
+ border-radius: 50%;
+ background: #4a4b5e;
+ display: inline-block;
+}
+
+.sidebar-context-actions {
+ display: flex;
+ gap: 6px;
+ margin-top: 2px;
+}
+
+.sidebar-context-btn {
+ border: none;
+ outline: none;
+ background: #1a1b26;
+ color: #9a9bb0;
+ border-radius: 6px;
+ padding: 4px 10px;
+ font-size: 11px;
+ font-weight: 500;
+ cursor: pointer;
+ transition: all 0.15s ease;
+ border: 1px solid #272832;
+}
+
+.sidebar-context-btn:hover {
+ background: #222335;
+ color: #c3c5dd;
+ border-color: #3a3b4d;
+}
+
+/* Per-repo chip list in sidebar context card */
+.sidebar-repo-chips {
+ display: flex;
+ flex-direction: column;
+ gap: 3px;
+}
+
+.sidebar-repo-chip {
+ display: flex;
+ align-items: center;
+ gap: 5px;
+ padding: 5px 6px 5px 8px;
+ border-radius: 6px;
+ border: 1px solid #272832;
+ background: #111220;
+ cursor: pointer;
+ white-space: nowrap;
+ overflow: hidden;
+ transition: border-color 0.15s, background-color 0.15s;
+}
+
+.sidebar-repo-chip:hover {
+ border-color: #3a3b4d;
+ background: #1a1b2e;
+}
+
+.sidebar-repo-chip-active {
+ border-color: #3B82F6;
+ background: rgba(59, 130, 246, 0.06);
+}
+
+.sidebar-chip-name {
+ font-size: 12px;
+ font-weight: 600;
+ color: #c3c5dd;
+ font-family: monospace;
+ overflow: hidden;
+ text-overflow: ellipsis;
+ flex: 1;
+ min-width: 0;
+}
+
+.sidebar-repo-chip-active .sidebar-chip-name {
+ color: #f5f5f7;
+}
+
+.sidebar-chip-dot {
+ width: 2px;
+ height: 2px;
+ border-radius: 50%;
+ background: #4a4b5e;
+ flex-shrink: 0;
+}
+
+.sidebar-chip-branch {
+ font-size: 10px;
+ color: #71717a;
+ font-family: monospace;
+ flex-shrink: 0;
+}
+
+.sidebar-repo-chip-active .sidebar-chip-branch {
+ color: #60a5fa;
+}
+
+.sidebar-chip-write-badge {
+ font-size: 8px;
+ font-weight: 700;
+ text-transform: uppercase;
+ letter-spacing: 0.06em;
+ color: #4caf88;
+ padding: 0 4px;
+ border-radius: 3px;
+ border: 1px solid rgba(76, 175, 136, 0.25);
+ flex-shrink: 0;
+}
+
+/* Per-chip remove button: subtle by default, visible on hover */
+.sidebar-chip-remove {
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ width: 16px;
+ height: 16px;
+ border-radius: 3px;
+ border: none;
+ background: transparent;
+ color: #52525B;
+ cursor: pointer;
+ flex-shrink: 0;
+ padding: 0;
+ opacity: 0;
+ transition: opacity 0.15s, color 0.15s, background 0.15s;
+}
+
+.sidebar-repo-chip:hover .sidebar-chip-remove {
+ opacity: 1;
+}
+
+.sidebar-chip-remove:hover {
+ color: #f87171;
+ background: rgba(248, 113, 113, 0.1);
+}
+
+/* "clear all" link-style button */
+.sidebar-clear-all {
+ font-size: 9px;
+ color: #52525B;
+ width: auto;
+ height: auto;
+ padding: 2px 6px;
+ font-weight: 600;
+ text-transform: uppercase;
+ letter-spacing: 0.04em;
+}
+
+.sidebar-clear-all:hover {
+ color: #f87171;
+ background: rgba(248, 113, 113, 0.08);
+}
+
+@keyframes slideIn {
+ from {
+ opacity: 0;
+ transform: translateX(-10px);
+ }
+ to {
+ opacity: 1;
+ transform: translateX(0);
+ }
+}
+
+/* ContextBar — horizontal chip bar above workspace */
+.ctxbar {
+ display: flex;
+ align-items: center;
+ gap: 8px;
+ padding: 6px 12px;
+ border-bottom: 1px solid #1E1F23;
+ background-color: #0D0D10;
+ min-height: 40px;
+}
+
+.ctxbar-scroll {
+ display: flex;
+ align-items: center;
+ gap: 6px;
+ flex: 1;
+ overflow-x: auto;
+ scrollbar-width: none;
+}
+
+.ctxbar-scroll::-webkit-scrollbar {
+ display: none;
+}
+
+.ctxbar-chip {
+ display: flex;
+ align-items: center;
+ gap: 5px;
+ padding: 4px 6px 4px 8px;
+ border-radius: 6px;
+ border: 1px solid #27272A;
+ background: #18181B;
+ cursor: pointer;
+ white-space: nowrap;
+ position: relative;
+ flex-shrink: 0;
+ transition: border-color 0.15s, background-color 0.15s;
+}
+
+.ctxbar-chip:hover {
+ border-color: #3a3b4d;
+ background: #1e1f30;
+}
+
+.ctxbar-chip-active {
+ border-color: #3B82F6;
+ background: rgba(59, 130, 246, 0.08);
+}
+
+.ctxbar-chip-indicator {
+ position: absolute;
+ left: 0;
+ top: 25%;
+ bottom: 25%;
+ width: 2px;
+ border-radius: 1px;
+ background-color: #3B82F6;
+}
+
+.ctxbar-chip-name {
+ font-size: 12px;
+ font-weight: 600;
+ font-family: monospace;
+ color: #A1A1AA;
+ max-width: 120px;
+ overflow: hidden;
+ text-overflow: ellipsis;
+}
+
+.ctxbar-chip-active .ctxbar-chip-name {
+ color: #E4E4E7;
+}
+
+.ctxbar-chip-dot {
+ width: 2px;
+ height: 2px;
+ border-radius: 50%;
+ background: #4a4b5e;
+ flex-shrink: 0;
+}
+
+.ctxbar-chip-branch {
+ font-size: 10px;
+ font-family: monospace;
+ background: none;
+ border: 1px solid transparent;
+ border-radius: 3px;
+ padding: 1px 4px;
+ cursor: pointer;
+ color: #71717A;
+ transition: border-color 0.15s, color 0.15s;
+}
+
+.ctxbar-chip-branch:hover {
+ border-color: #3a3b4d;
+}
+
+.ctxbar-chip-branch-active {
+ color: #60a5fa;
+}
+
+.ctxbar-chip-write {
+ font-size: 8px;
+ font-weight: 700;
+ text-transform: uppercase;
+ letter-spacing: 0.06em;
+ color: #4caf88;
+ padding: 0 4px;
+ border-radius: 3px;
+ border: 1px solid rgba(76, 175, 136, 0.25);
+ flex-shrink: 0;
+}
+
+/* Hover-reveal remove button (Claude-style: hidden → visible on chip hover → red on X hover) */
+.ctxbar-chip-remove {
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ width: 16px;
+ height: 16px;
+ border-radius: 3px;
+ border: none;
+ background: transparent;
+ color: #52525B;
+ cursor: pointer;
+ flex-shrink: 0;
+ padding: 0;
+ opacity: 0;
+ transition: opacity 0.15s, color 0.15s, background 0.15s;
+}
+
+.ctxbar-chip-remove-visible,
+.ctxbar-chip:hover .ctxbar-chip-remove {
+ opacity: 1;
+}
+
+.ctxbar-chip-remove:hover {
+ color: #f87171;
+ background: rgba(248, 113, 113, 0.1);
+}
+
+.ctxbar-add {
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ width: 28px;
+ height: 28px;
+ border-radius: 6px;
+ border: 1px dashed #3F3F46;
+ background: transparent;
+ color: #71717A;
+ cursor: pointer;
+ flex-shrink: 0;
+ transition: border-color 0.15s, color 0.15s;
+}
+
+.ctxbar-add:hover {
+ border-color: #60a5fa;
+ color: #60a5fa;
+}
+
+.ctxbar-meta {
+ font-size: 10px;
+ color: #52525B;
+ white-space: nowrap;
+ flex-shrink: 0;
+ font-weight: 600;
+ text-transform: uppercase;
+ letter-spacing: 0.04em;
+}
+
+.ctxbar-branch-picker {
+ position: absolute;
+ top: 100%;
+ left: 0;
+ z-index: 100;
+ margin-top: 4px;
+}
+
+/* Legacy compat — kept for other uses */
+.sidebar-repo-info {
+ padding: 10px 12px;
+ border-radius: 10px;
+ background: #151622;
+ border: 1px solid #272832;
+ animation: slideIn 0.3s ease;
+}
+
+.sidebar-repo-name {
+ font-size: 13px;
+ font-weight: 500;
+}
+
+.sidebar-repo-meta {
+ font-size: 11px;
+ color: #9a9bb0;
+ margin-top: 2px;
+}
+
+.settings-button {
+ border: none;
+ outline: none;
+ background: #1a1b26;
+ color: #f5f5f7;
+ border-radius: 8px;
+ padding: 8px 10px;
+ cursor: pointer;
+ font-size: 13px;
+ transition: all 0.2s ease;
+}
+
+.settings-button:hover {
+ background: #222335;
+ transform: translateY(-1px);
+}
+
+/* Repo search */
+.repo-search-box {
+ border-radius: 12px;
+ background: #101117;
+ border: 1px solid #272832;
+ padding: 8px;
+ display: flex;
+ flex-direction: column;
+ gap: 8px;
+}
+
+/* Search header wrapper */
+.repo-search-header {
+ display: flex;
+ flex-direction: column;
+ gap: 8px;
+}
+
+/* Search row with input and button */
+.repo-search-row {
+ display: flex;
+ gap: 6px;
+ align-items: center;
+}
+
+/* Search input */
+.repo-search-input {
+ flex: 1;
+ border-radius: 7px;
+ padding: 8px 10px;
+ border: 1px solid #272832;
+ background: #050608;
+ color: #f5f5f7;
+ font-size: 13px;
+ transition: all 0.2s ease;
+}
+
+.repo-search-input:focus {
+ outline: none;
+ border-color: #ff7a3c;
+ background: #0a0b0f;
+ box-shadow: 0 0 0 3px rgba(255, 122, 60, 0.08);
+}
+
+.repo-search-input::placeholder {
+ color: #676883;
+}
+
+.repo-search-input:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+}
+
+/* Search button */
+.repo-search-btn {
+ border-radius: 7px;
+ border: none;
+ outline: none;
+ padding: 8px 14px;
+ background: #1a1b26;
+ color: #f5f5f7;
+ cursor: pointer;
+ font-size: 13px;
+ font-weight: 500;
+ transition: all 0.2s ease;
+ white-space: nowrap;
+}
+
+.repo-search-btn:hover:not(:disabled) {
+ background: #222335;
+ transform: translateY(-1px);
+}
+
+.repo-search-btn:active:not(:disabled) {
+ transform: translateY(0);
+}
+
+.repo-search-btn:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+}
+
+/* Info bar (shows count and clear button) */
+.repo-info-bar {
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+ padding: 6px 10px;
+ background: #0a0b0f;
+ border: 1px solid #272832;
+ border-radius: 7px;
+ font-size: 11px;
+}
+
+.repo-count {
+ color: #9a9bb0;
+ font-weight: 500;
+}
+
+.repo-clear-btn {
+ padding: 3px 10px;
+ background: transparent;
+ border: 1px solid #272832;
+ border-radius: 5px;
+ color: #9a9bb0;
+ font-size: 11px;
+ font-weight: 500;
+ cursor: pointer;
+ transition: all 0.2s ease;
+}
+
+.repo-clear-btn:hover:not(:disabled) {
+ background: #1a1b26;
+ color: #c3c5dd;
+ border-color: #3a3b4d;
+}
+
+.repo-clear-btn:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+}
+
+/* Status message */
+.repo-status {
+ padding: 8px 10px;
+ background: #1a1b26;
+ border: 1px solid #272832;
+ border-radius: 7px;
+ color: #9a9bb0;
+ font-size: 11px;
+ text-align: center;
+}
+
+/* Repository list */
+.repo-list {
+ max-height: 220px;
+ overflow-y: auto;
+ overflow-x: hidden;
+ padding-right: 2px;
+ display: flex;
+ flex-direction: column;
+ gap: 4px;
+}
+
+/* Custom scrollbar for repo list */
+.repo-list::-webkit-scrollbar {
+ width: 6px;
+}
+
+.repo-list::-webkit-scrollbar-track {
+ background: transparent;
+}
+
+.repo-list::-webkit-scrollbar-thumb {
+ background: #272832;
+ border-radius: 3px;
+}
+
+.repo-list::-webkit-scrollbar-thumb:hover {
+ background: #3a3b4d;
+}
+
+/* Repository item */
+.repo-item {
+ width: 100%;
+ text-align: left;
+ border: none;
+ outline: none;
+ background: transparent;
+ color: #f5f5f7;
+ padding: 8px 8px;
+ border-radius: 7px;
+ cursor: pointer;
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+ gap: 8px;
+ transition: all 0.15s ease;
+ border: 1px solid transparent;
+}
+
+.repo-item:hover {
+ background: #1a1b26;
+ border-color: #272832;
+ transform: translateX(2px);
+}
+
+.repo-item-content {
+ display: flex;
+ flex-direction: column;
+ gap: 2px;
+ flex: 1;
+ min-width: 0;
+}
+
+.repo-name {
+ font-size: 13px;
+ font-weight: 500;
+ overflow: hidden;
+ text-overflow: ellipsis;
+ white-space: nowrap;
+}
+
+.repo-owner {
+ font-size: 11px;
+ color: #8e8fac;
+ overflow: hidden;
+ text-overflow: ellipsis;
+ white-space: nowrap;
+}
+
+/* Private badge */
+.repo-badge-private {
+ padding: 2px 6px;
+ background: #1a1b26;
+ border: 1px solid #3a3b4d;
+ border-radius: 4px;
+ color: #9a9bb0;
+ font-size: 9px;
+ font-weight: 600;
+ text-transform: uppercase;
+ letter-spacing: 0.3px;
+ white-space: nowrap;
+ flex-shrink: 0;
+}
+
+/* Loading states */
+.repo-loading {
+ display: flex;
+ flex-direction: column;
+ align-items: center;
+ gap: 10px;
+ padding: 30px 20px;
+ color: #9a9bb0;
+ font-size: 12px;
+}
+
+.repo-loading-spinner {
+ width: 24px;
+ height: 24px;
+ border: 2px solid #272832;
+ border-top-color: #ff7a3c;
+ border-radius: 50%;
+ animation: repo-spin 0.8s linear infinite;
+}
+
+.repo-loading-spinner-small {
+ width: 14px;
+ height: 14px;
+ border: 2px solid rgba(255, 122, 60, 0.3);
+ border-top-color: #ff7a3c;
+ border-radius: 50%;
+ animation: repo-spin 0.8s linear infinite;
+}
+
+@keyframes repo-spin {
+ to {
+ transform: rotate(360deg);
+ }
+}
+
+/* Load more button */
+.repo-load-more {
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ gap: 8px;
+ width: 100%;
+ padding: 10px 12px;
+ margin: 4px 0;
+ background: #0a0b0f;
+ border: 1px solid #272832;
+ border-radius: 7px;
+ color: #c3c5dd;
+ font-size: 12px;
+ font-weight: 500;
+ cursor: pointer;
+ transition: all 0.2s ease;
+}
+
+.repo-load-more:hover:not(:disabled) {
+ background: #1a1b26;
+ border-color: #3a3b4d;
+ transform: translateY(-1px);
+}
+
+.repo-load-more:active:not(:disabled) {
+ transform: translateY(0);
+}
+
+.repo-load-more:disabled {
+ opacity: 0.6;
+ cursor: not-allowed;
+}
+
+.repo-load-more-count {
+ color: #7779a0;
+ font-weight: 400;
+}
+
+/* All loaded message */
+.repo-all-loaded {
+ padding: 10px 12px;
+ margin: 4px 0;
+ background: rgba(124, 255, 179, 0.08);
+ border: 1px solid rgba(124, 255, 179, 0.2);
+ border-radius: 7px;
+ color: #7cffb3;
+ font-size: 11px;
+ text-align: center;
+ font-weight: 500;
+}
+
+/* GitHub App installation notice */
+.repo-github-notice {
+ display: flex;
+ align-items: flex-start;
+ gap: 10px;
+ padding: 10px 12px;
+ background: #0a0b0f;
+ border: 1px solid #272832;
+ border-radius: 7px;
+ font-size: 11px;
+ line-height: 1.5;
+ margin-top: 4px;
+}
+
+.repo-github-icon {
+ flex-shrink: 0;
+ margin-top: 1px;
+ opacity: 0.6;
+ color: #9a9bb0;
+ width: 16px;
+ height: 16px;
+}
+
+.repo-github-notice-content {
+ flex: 1;
+ display: flex;
+ flex-direction: column;
+ gap: 3px;
+}
+
+.repo-github-notice-title {
+ color: #c3c5dd;
+ font-weight: 600;
+ font-size: 11px;
+}
+
+.repo-github-notice-text {
+ color: #9a9bb0;
+}
+
+.repo-github-link {
+ color: #ff7a3c;
+ text-decoration: none;
+ font-weight: 500;
+ transition: color 0.2s ease;
+}
+
+.repo-github-link:hover {
+ color: #ff8b52;
+ text-decoration: underline;
+}
+
+/* Focus visible for accessibility */
+.repo-item:focus-visible,
+.repo-search-btn:focus-visible,
+.repo-load-more:focus-visible,
+.repo-clear-btn:focus-visible {
+ outline: 2px solid #ff7a3c;
+ outline-offset: 2px;
+}
+
+/* Reduced motion support */
+@media (prefers-reduced-motion: reduce) {
+ .repo-item,
+ .repo-search-btn,
+ .repo-load-more,
+ .repo-clear-btn {
+ transition: none;
+ }
+
+ .repo-loading-spinner,
+ .repo-loading-spinner-small {
+ animation: none;
+ }
+}
+
+/* Mobile responsive adjustments */
+@media (max-width: 768px) {
+ .repo-search-input {
+ font-size: 16px; /* Prevents zoom on iOS */
+ }
+
+ .repo-item {
+ padding: 7px 7px;
+ }
+
+ .repo-name {
+ font-size: 12px;
+ }
+
+ .repo-owner {
+ font-size: 10px;
+ }
+}
+
+/* Workspace */
+.workspace {
+ flex: 1;
+ display: flex;
+ flex-direction: column;
+ position: relative;
+ overflow: hidden;
+ min-height: 0;
+}
+
+.empty-state {
+ margin: auto;
+ max-width: 420px;
+ text-align: center;
+ color: #c3c5dd;
+ animation: fadeIn 0.5s ease;
+}
+
+.empty-bot {
+ font-size: 36px;
+ margin-bottom: 12px;
+ animation: bounce 2s ease infinite;
+}
+
+@keyframes bounce {
+ 0%, 100% {
+ transform: translateY(0);
+ }
+ 50% {
+ transform: translateY(-10px);
+ }
+}
+
+.empty-state h1 {
+ font-size: 24px;
+ margin-bottom: 6px;
+}
+
+.empty-state p {
+ font-size: 14px;
+ color: #9a9bb0;
+}
+
+/* Workspace grid - Properly constrained */
+.workspace-grid {
+ display: grid;
+ grid-template-columns: 320px minmax(340px, 1fr);
+ height: 100%;
+ overflow: hidden;
+ flex: 1;
+ min-height: 0;
+}
+
+/* Panels */
+.panel-header {
+ height: 40px;
+ padding: 0 16px;
+ border-bottom: 1px solid #272832;
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+ font-size: 13px;
+ font-weight: 500;
+ color: #c3c5dd;
+ background: #0a0b0f;
+ flex-shrink: 0;
+}
+
+.badge {
+ padding: 2px 6px;
+ border-radius: 999px;
+ border: 1px solid #3a3b4d;
+ font-size: 10px;
+}
+
+/* Files */
+.files-panel {
+ border-right: 1px solid #272832;
+ background: #101117;
+ display: flex;
+ flex-direction: column;
+ overflow: hidden;
+}
+
+.files-list {
+ flex: 1;
+ overflow-y: auto;
+ overflow-x: hidden;
+ padding: 6px 4px;
+ min-height: 0;
+}
+
+.files-item {
+ border: none;
+ outline: none;
+ width: 100%;
+ background: transparent;
+ color: #f5f5f7;
+ display: flex;
+ align-items: center;
+ gap: 8px;
+ padding: 4px 8px;
+ border-radius: 6px;
+ cursor: pointer;
+ font-size: 12px;
+ transition: all 0.15s ease;
+}
+
+.files-item:hover {
+ background: #1a1b26;
+ transform: translateX(2px);
+}
+
+.files-item-active {
+ background: #2a2b3c;
+}
+
+.file-icon {
+ width: 16px;
+ flex-shrink: 0;
+}
+
+.file-path {
+ white-space: nowrap;
+ overflow: hidden;
+ text-overflow: ellipsis;
+}
+
+.files-empty {
+ padding: 10px 12px;
+ font-size: 12px;
+ color: #9a9bb0;
+}
+
+/* Chat panel */
+.editor-panel {
+ display: flex;
+ flex-direction: column;
+ background: #050608;
+}
+
+.chat-container {
+ display: flex;
+ flex-direction: column;
+ flex: 1;
+ min-height: 0;
+ overflow: hidden;
+}
+
+.chat-messages {
+ flex: 1;
+ padding: 12px 16px;
+ overflow-y: auto;
+ overflow-x: hidden;
+ font-size: 13px;
+ min-height: 0;
+ scroll-behavior: smooth;
+}
+
+.chat-message-user {
+ margin-bottom: 16px;
+ animation: slideInRight 0.3s ease;
+}
+
+@keyframes slideInRight {
+ from {
+ opacity: 0;
+ transform: translateX(20px);
+ }
+ to {
+ opacity: 1;
+ transform: translateX(0);
+ }
+}
+
+.chat-message-ai {
+ margin-bottom: 16px;
+ animation: slideInLeft 0.3s ease;
+}
+
+@keyframes slideInLeft {
+ from {
+ opacity: 0;
+ transform: translateX(-20px);
+ }
+ to {
+ opacity: 1;
+ transform: translateX(0);
+ }
+}
+
+.chat-message-ai span {
+ display: inline-block;
+ padding: 10px 14px;
+ border-radius: 12px;
+ max-width: 80%;
+ line-height: 1.5;
+}
+
+.chat-message-user span {
+ display: inline;
+ padding: 0;
+ border-radius: 0;
+ background: transparent;
+ border: none;
+ max-width: none;
+ line-height: inherit;
+}
+
+.chat-message-ai span {
+ background: #151622;
+ border: 1px solid #272832;
+}
+
+/* Compact thinking bubble — defensive isolation so the global
+ .chat-message-ai span rule (which gives every span a chunky
+ 10×14 padded pill with a dark background) cannot leak into the
+ thinking indicator's tiny inline-styled dots and label. */
+.gitpilot-thinking-indicator,
+.gitpilot-thinking-indicator span {
+ background: transparent;
+ border: none;
+ padding: 0;
+ max-width: none;
+ line-height: 1.4;
+}
+
+.chat-empty-state {
+ display: flex;
+ flex-direction: column;
+ align-items: center;
+ justify-content: center;
+ min-height: 300px;
+ padding: 40px 20px;
+ text-align: center;
+}
+
+.chat-empty-icon {
+ font-size: 48px;
+ margin-bottom: 16px;
+ opacity: 0.6;
+ animation: pulse 2s ease infinite;
+}
+
+@keyframes pulse {
+ 0%, 100% {
+ opacity: 0.6;
+ }
+ 50% {
+ opacity: 0.8;
+ }
+}
+
+.chat-empty-state p {
+ margin: 0;
+ font-size: 13px;
+ color: #9a9bb0;
+ max-width: 400px;
+}
+
+.chat-input-box {
+ padding: 12px 16px;
+ border-top: 1px solid #272832;
+ display: flex;
+ flex-direction: column;
+ gap: 10px;
+ background: #050608;
+ flex-shrink: 0;
+ min-height: fit-content;
+ box-shadow: 0 -2px 10px rgba(0, 0, 0, 0.3);
+}
+
+.chat-input-row {
+ display: flex;
+ gap: 10px;
+ align-items: center;
+ flex-wrap: wrap;
+}
+
+.chat-input {
+ flex: 1;
+ min-width: 200px;
+ border-radius: 8px;
+ padding: 10px 12px;
+ border: 1px solid #272832;
+ background: #0a0b0f;
+ color: #f5f5f7;
+ font-size: 13px;
+ line-height: 1.5;
+ transition: all 0.2s ease;
+}
+
+.chat-input:focus {
+ outline: none;
+ border-color: #ff7a3c;
+ background: #101117;
+ box-shadow: 0 0 0 3px rgba(255, 122, 60, 0.1);
+}
+
+.chat-input::placeholder {
+ color: #676883;
+}
+
+.chat-btn {
+ border-radius: 8px;
+ border: none;
+ outline: none;
+ padding: 10px 16px;
+ background: #ff7a3c;
+ color: #050608;
+ cursor: pointer;
+ font-size: 13px;
+ font-weight: 600;
+ transition: all 0.2s ease;
+ white-space: nowrap;
+ min-height: 40px;
+}
+
+.chat-btn:hover:not(:disabled) {
+ background: #ff8c52;
+ transform: translateY(-1px);
+ box-shadow: 0 4px 12px rgba(255, 122, 60, 0.3);
+}
+
+.chat-btn:active:not(:disabled) {
+ transform: translateY(0);
+}
+
+.chat-btn.secondary {
+ background: #1a1b26;
+ color: #f5f5f7;
+ border: 1px solid #272832;
+}
+
+.chat-btn.secondary:hover:not(:disabled) {
+ background: #222335;
+ border-color: #3a3b4d;
+}
+
+.chat-btn:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+}
+
+/* Plan rendering */
+.plan-card {
+ border-radius: 12px;
+ background: #101117;
+ border: 1px solid #272832;
+ padding: 10px 12px;
+ margin-top: 6px;
+ animation: fadeIn 0.3s ease;
+}
+
+.plan-steps {
+ margin: 6px 0 0;
+ padding-left: 18px;
+ font-size: 12px;
+}
+
+.plan-steps li {
+ margin-bottom: 4px;
+}
+
+/* Modal */
+.modal-backdrop {
+ position: fixed;
+ inset: 0;
+ background: rgba(0, 0, 0, 0.55);
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ z-index: 20;
+ animation: fadeIn 0.2s ease;
+}
+
+.modal {
+ background: #101117;
+ border-radius: 16px;
+ border: 1px solid #272832;
+ padding: 16px 18px;
+ width: 360px;
+ animation: scaleIn 0.3s ease;
+}
+
+@keyframes scaleIn {
+ from {
+ opacity: 0;
+ transform: scale(0.9);
+ }
+ to {
+ opacity: 1;
+ transform: scale(1);
+ }
+}
+
+.modal-header {
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+ margin-bottom: 10px;
+}
+
+.modal-title {
+ font-size: 15px;
+ font-weight: 600;
+}
+
+.modal-close {
+ border: none;
+ outline: none;
+ background: transparent;
+ color: #9a9bb0;
+ cursor: pointer;
+ transition: color 0.2s ease;
+}
+
+.modal-close:hover {
+ color: #ff7a3c;
+}
+
+.provider-list {
+ display: flex;
+ flex-direction: column;
+ gap: 6px;
+ margin-top: 8px;
+}
+
+.provider-item {
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+ padding: 6px 8px;
+ border-radius: 8px;
+ background: #151622;
+ border: 1px solid #272832;
+ font-size: 13px;
+ transition: all 0.2s ease;
+}
+
+.provider-item:hover {
+ border-color: #3a3b4d;
+}
+
+.provider-item.active {
+ border-color: #ff7a3c;
+ background: rgba(255, 122, 60, 0.1);
+}
+
+.provider-name {
+ font-weight: 500;
+}
+
+.provider-badge {
+ font-size: 11px;
+ color: #9a9bb0;
+}
+
+/* Navigation */
+.main-nav {
+ display: flex;
+ flex-direction: column;
+ gap: 2px;
+ margin-top: 10px;
+ margin-bottom: 10px;
+}
+
+.nav-btn {
+ border: none;
+ outline: none;
+ background: transparent;
+ color: #9a9bb0;
+ border-radius: 8px;
+ font-size: 13px;
+ font-weight: 500;
+ padding: 8px 12px;
+ text-align: left;
+ cursor: pointer;
+ transition: all 0.15s ease;
+}
+
+.nav-btn:hover {
+ background: #1a1b26;
+ color: #c3c5dd;
+}
+
+.nav-btn-active {
+ background: #1a1b26;
+ color: #f5f5f7;
+ font-weight: 600;
+ border-left: 2px solid #ff7a3c;
+ padding-left: 10px;
+}
+
+/* Settings page */
+.settings-root {
+ padding: 20px 24px;
+ overflow-y: auto;
+ max-width: 800px;
+}
+
+.settings-root h1 {
+ margin-top: 0;
+ font-size: 24px;
+ margin-bottom: 8px;
+}
+
+.settings-muted {
+ font-size: 13px;
+ color: #9a9bb0;
+ margin-bottom: 20px;
+ line-height: 1.5;
+}
+
+.settings-card {
+ background: #101117;
+ border-radius: 12px;
+ border: 1px solid #272832;
+ padding: 14px 16px;
+ margin-bottom: 14px;
+ display: flex;
+ flex-direction: column;
+ gap: 8px;
+ transition: all 0.2s ease;
+}
+
+.settings-card:hover {
+ border-color: #3a3b4d;
+}
+
+.settings-title {
+ font-size: 15px;
+ font-weight: 600;
+ margin-bottom: 4px;
+}
+
+.settings-label {
+ font-size: 12px;
+ color: #9a9bb0;
+ font-weight: 500;
+ margin-top: 4px;
+}
+
+.settings-input,
+.settings-select {
+ background: #050608;
+ border-radius: 8px;
+ border: 1px solid #272832;
+ padding: 8px 10px;
+ color: #f5f5f7;
+ font-size: 13px;
+ font-family: inherit;
+ transition: all 0.2s ease;
+}
+
+.settings-input:focus,
+.settings-select:focus {
+ outline: none;
+ border-color: #ff7a3c;
+ box-shadow: 0 0 0 3px rgba(255, 122, 60, 0.1);
+}
+
+.settings-input::placeholder {
+ color: #676883;
+}
+
+.settings-hint {
+ font-size: 11px;
+ color: #7a7b8e;
+ margin-top: -2px;
+}
+
+.settings-actions {
+ margin-top: 12px;
+ display: flex;
+ align-items: center;
+ gap: 12px;
+}
+
+.settings-save-btn {
+ background: #ff7a3c;
+ border-radius: 999px;
+ border: none;
+ outline: none;
+ padding: 9px 18px;
+ font-size: 13px;
+ cursor: pointer;
+ color: #050608;
+ font-weight: 600;
+ transition: all 0.2s ease;
+}
+
+.settings-save-btn:hover {
+ background: #ff8b52;
+ transform: translateY(-1px);
+ box-shadow: 0 4px 12px rgba(255, 122, 60, 0.3);
+}
+
+.settings-save-btn:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+ transform: none;
+}
+
+.settings-success {
+ font-size: 12px;
+ color: #7cffb3;
+ font-weight: 500;
+}
+
+.settings-error {
+ font-size: 12px;
+ color: #ff8a8a;
+ font-weight: 500;
+}
+
+/* Flow viewer */
+.flow-root {
+ display: flex;
+ flex-direction: column;
+ height: 100%;
+ overflow: hidden;
+}
+
+.flow-header {
+ padding: 16px 20px;
+ border-bottom: 1px solid #272832;
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+}
+
+.flow-header h1 {
+ margin: 0;
+ font-size: 22px;
+ margin-bottom: 4px;
+}
+
+.flow-header p {
+ margin: 0;
+ font-size: 12px;
+ color: #9a9bb0;
+ max-width: 600px;
+ line-height: 1.5;
+}
+
+.flow-canvas {
+ flex: 1;
+ background: #050608;
+ position: relative;
+}
+
+.flow-error {
+ position: absolute;
+ inset: 0;
+ display: flex;
+ flex-direction: column;
+ align-items: center;
+ justify-content: center;
+ gap: 12px;
+}
+
+.error-icon {
+ font-size: 48px;
+}
+
+.error-text {
+ font-size: 14px;
+ color: #ff8a8a;
+}
+
+/* Assistant Message Sections */
+.gp-section {
+ margin-bottom: 16px;
+ border-radius: 12px;
+ background: #101117;
+ border: 1px solid #272832;
+ overflow: hidden;
+ animation: fadeIn 0.3s ease;
+}
+
+.gp-section-header {
+ padding: 8px 12px;
+ background: #151622;
+ border-bottom: 1px solid #272832;
+}
+
+.gp-section-header h3 {
+ margin: 0;
+ font-size: 13px;
+ font-weight: 600;
+ color: #c3c5dd;
+}
+
+.gp-section-content {
+ padding: 12px;
+}
+
+.gp-section-answer .gp-section-content p {
+ margin: 0;
+ font-size: 13px;
+ line-height: 1.6;
+ color: #f5f5f7;
+}
+
+.gp-section-plan {
+ background: #0a0b0f;
+}
+
+/* Plan View Enhanced */
+.plan-header {
+ margin-bottom: 12px;
+}
+
+.plan-goal {
+ font-size: 13px;
+ font-weight: 600;
+ margin-bottom: 4px;
+ color: #f5f5f7;
+}
+
+.plan-summary {
+ font-size: 12px;
+ color: #c3c5dd;
+ line-height: 1.5;
+}
+
+.plan-totals {
+ display: flex;
+ gap: 8px;
+ margin-bottom: 12px;
+ flex-wrap: wrap;
+}
+
+.plan-total {
+ padding: 4px 8px;
+ border-radius: 6px;
+ font-size: 11px;
+ font-weight: 500;
+ animation: fadeIn 0.3s ease;
+}
+
+.plan-total-create {
+ background: rgba(76, 175, 80, 0.15);
+ color: #81c784;
+ border: 1px solid rgba(76, 175, 80, 0.3);
+}
+
+.plan-total-modify {
+ background: rgba(33, 150, 243, 0.15);
+ color: #64b5f6;
+ border: 1px solid rgba(33, 150, 243, 0.3);
+}
+
+.plan-total-delete {
+ background: rgba(244, 67, 54, 0.15);
+ color: #e57373;
+ border: 1px solid rgba(244, 67, 54, 0.3);
+}
+
+.plan-step {
+ margin-bottom: 12px;
+ padding-bottom: 12px;
+ border-bottom: 1px solid #1a1b26;
+}
+
+.plan-step:last-child {
+ border-bottom: none;
+ padding-bottom: 0;
+ margin-bottom: 0;
+}
+
+.plan-step-header {
+ margin-bottom: 6px;
+}
+
+.plan-step-description {
+ font-size: 12px;
+ color: #9a9bb0;
+ margin-bottom: 8px;
+}
+
+.plan-files {
+ list-style: none;
+ padding: 0;
+ margin: 8px 0;
+}
+
+.plan-file {
+ display: flex;
+ align-items: center;
+ gap: 8px;
+ padding: 4px 0;
+}
+
+.gp-pill {
+ padding: 2px 6px;
+ border-radius: 4px;
+ font-size: 10px;
+ font-weight: 600;
+ text-transform: uppercase;
+ letter-spacing: 0.3px;
+}
+
+.gp-pill-create {
+ background: rgba(76, 175, 80, 0.2);
+ color: #81c784;
+ border: 1px solid rgba(76, 175, 80, 0.4);
+}
+
+.gp-pill-modify {
+ background: rgba(33, 150, 243, 0.2);
+ color: #64b5f6;
+ border: 1px solid rgba(33, 150, 243, 0.4);
+}
+
+.gp-pill-delete {
+ background: rgba(244, 67, 54, 0.2);
+ color: #e57373;
+ border: 1px solid rgba(244, 67, 54, 0.4);
+}
+
+.plan-file-path {
+ font-size: 11px;
+ color: #c3c5dd;
+ font-family: "SF Mono", Monaco, "Cascadia Code", monospace;
+ background: #0a0b0f;
+ padding: 2px 6px;
+ border-radius: 4px;
+}
+
+.plan-step-risks {
+ margin-top: 8px;
+ padding: 6px 8px;
+ background: rgba(255, 152, 0, 0.1);
+ border-left: 2px solid #ff9800;
+ border-radius: 4px;
+ font-size: 11px;
+ color: #ffb74d;
+}
+
+.plan-risk-label {
+ font-weight: 600;
+}
+
+/* Execution Log */
+.execution-steps {
+ list-style: none;
+ padding: 0;
+ margin: 0;
+}
+
+.execution-step {
+ padding: 8px;
+ margin-bottom: 6px;
+ background: #0a0b0f;
+ border-radius: 6px;
+ font-size: 11px;
+ font-family: "SF Mono", Monaco, "Cascadia Code", monospace;
+ white-space: pre-wrap;
+}
+
+.execution-step-number {
+ color: #ff7a3c;
+ font-weight: 600;
+ margin-right: 8px;
+}
+
+.execution-step-summary {
+ color: #c3c5dd;
+}
+
+/* Project Context Panel - Properly constrained */
+.gp-context {
+ padding: 12px;
+ height: 100%;
+ overflow: hidden;
+ display: flex;
+ flex-direction: column;
+}
+
+.gp-context-column {
+ background: #0a0b0f;
+ border-right: 1px solid #272832;
+ overflow: hidden;
+ display: flex;
+ flex-direction: column;
+}
+
+.gp-chat-column {
+ display: flex;
+ flex-direction: column;
+ background: #050608;
+ height: 100%;
+ min-width: 0;
+ overflow: hidden;
+}
+
+.gp-card {
+ background: #101117;
+ border-radius: 12px;
+ border: 1px solid #272832;
+ overflow: hidden;
+ display: flex;
+ flex-direction: column;
+ height: 100%;
+ min-height: 0;
+}
+
+.gp-card-header {
+ padding: 10px 12px;
+ background: #151622;
+ border-bottom: 1px solid #272832;
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+ flex-shrink: 0;
+}
+
+.gp-card-header h2 {
+ margin: 0;
+ font-size: 14px;
+ font-weight: 600;
+ color: #f5f5f7;
+}
+
+.gp-badge {
+ padding: 3px 8px;
+ border-radius: 999px;
+ background: #2a2b3c;
+ border: 1px solid #3a3b4d;
+ font-size: 11px;
+ color: #c3c5dd;
+ font-weight: 500;
+ transition: all 0.2s ease;
+}
+
+.gp-badge:hover {
+ border-color: #ff7a3c;
+}
+
+.gp-context-meta {
+ padding: 12px;
+ display: flex;
+ flex-direction: column;
+ gap: 6px;
+ border-bottom: 1px solid #272832;
+ flex-shrink: 0;
+ background: #0a0b0f;
+}
+
+.gp-context-meta-item {
+ display: flex;
+ align-items: center;
+ gap: 6px;
+ font-size: 12px;
+}
+
+.gp-context-meta-label {
+ color: #9a9bb0;
+ min-width: 60px;
+}
+
+.gp-context-meta-item strong {
+ color: #f5f5f7;
+ font-weight: 500;
+}
+
+/* File tree - Properly scrollable */
+.gp-context-tree {
+ flex: 1;
+ overflow-y: auto;
+ overflow-x: hidden;
+ min-height: 0;
+ padding: 4px;
+}
+
+.gp-context-empty {
+ padding: 20px 12px;
+ text-align: center;
+ color: #9a9bb0;
+ font-size: 12px;
+}
+
+/* Footer - Fixed at bottom */
+.gp-footer {
+ position: fixed;
+ bottom: 0;
+ left: 0;
+ right: 0;
+ border-top: 1px solid #272832;
+ padding: 8px 20px;
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+ font-size: 11px;
+ color: #9a9bb0;
+ background: #0a0b0f;
+ backdrop-filter: blur(10px);
+ z-index: 10;
+ box-shadow: 0 -2px 10px rgba(0, 0, 0, 0.2);
+}
+
+.gp-footer-left {
+ display: flex;
+ align-items: center;
+ gap: 6px;
+ font-weight: 500;
+ color: #c3c5dd;
+}
+
+.gp-footer-right {
+ display: flex;
+ align-items: center;
+ gap: 12px;
+}
+
+.gp-footer-right a {
+ color: #9a9bb0;
+ text-decoration: none;
+ transition: all 0.2s ease;
+}
+
+.gp-footer-right a:hover {
+ color: #ff7a3c;
+ transform: translateY(-1px);
+}
+
+/* Adjust app-root to account for fixed footer */
+.app-root > .main-wrapper {
+ padding-bottom: 32px; /* Space for fixed footer */
+}
+
+/* ============================================================================
+ LOGIN PAGE - Enterprise GitHub Authentication
+ ============================================================================ */
+
+.login-page {
+ min-height: 100vh;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ background: radial-gradient(circle at center, #171823 0%, #050608 70%);
+ padding: 20px;
+ animation: fadeIn 0.4s ease;
+}
+
+.login-container {
+ width: 100%;
+ max-width: 480px;
+ background: #101117;
+ border: 1px solid #272832;
+ border-radius: 24px;
+ padding: 40px 36px;
+ box-shadow: 0 20px 60px rgba(0, 0, 0, 0.4);
+ animation: slideUp 0.5s ease;
+}
+
+@keyframes slideUp {
+ from {
+ opacity: 0;
+ transform: translateY(20px);
+ }
+ to {
+ opacity: 1;
+ transform: translateY(0);
+ }
+}
+
+/* Header */
+.login-header {
+ text-align: center;
+ margin-bottom: 32px;
+}
+
+.login-logo {
+ display: flex;
+ justify-content: center;
+ margin-bottom: 16px;
+}
+
+.logo-icon {
+ width: 64px;
+ height: 64px;
+ border-radius: 16px;
+ background: linear-gradient(135deg, #ff7a3c 0%, #ff6b2b 100%);
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ font-weight: 700;
+ font-size: 28px;
+ color: #050608;
+ box-shadow: 0 8px 24px rgba(255, 122, 60, 0.3);
+ transition: transform 0.3s ease;
+}
+
+.logo-icon:hover {
+ transform: scale(1.05) rotate(3deg);
+}
+
+.login-title {
+ margin: 0;
+ font-size: 28px;
+ font-weight: 700;
+ color: #f5f5f7;
+ margin-bottom: 8px;
+ letter-spacing: -0.5px;
+}
+
+.login-subtitle {
+ margin: 0;
+ font-size: 14px;
+ color: #9a9bb0;
+ font-weight: 500;
+}
+
+/* Welcome Section */
+.login-welcome {
+ margin-bottom: 28px;
+ padding-bottom: 28px;
+ border-bottom: 1px solid #272832;
+}
+
+.login-welcome h2 {
+ margin: 0 0 12px 0;
+ font-size: 20px;
+ font-weight: 600;
+ color: #f5f5f7;
+}
+
+.login-welcome p {
+ margin: 0;
+ font-size: 14px;
+ line-height: 1.6;
+ color: #c3c5dd;
+}
+
+/* Error Message */
+.login-error {
+ display: flex;
+ align-items: center;
+ gap: 10px;
+ padding: 12px 14px;
+ background: rgba(255, 82, 82, 0.1);
+ border: 1px solid rgba(255, 82, 82, 0.3);
+ border-radius: 10px;
+ color: #ff8a8a;
+ font-size: 13px;
+ margin-bottom: 20px;
+ animation: shake 0.4s ease;
+}
+
+@keyframes shake {
+ 0%, 100% { transform: translateX(0); }
+ 25% { transform: translateX(-5px); }
+ 75% { transform: translateX(5px); }
+}
+
+.login-error svg {
+ flex-shrink: 0;
+}
+
+/* Login Actions */
+.login-actions {
+ display: flex;
+ flex-direction: column;
+ gap: 14px;
+ margin-bottom: 28px;
+}
+
+/* Buttons */
+.btn-primary,
+.btn-secondary,
+.btn-text {
+ border: none;
+ outline: none;
+ cursor: pointer;
+ font-family: inherit;
+ font-weight: 600;
+ transition: all 0.2s ease;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ gap: 10px;
+}
+
+.btn-large {
+ padding: 14px 24px;
+ font-size: 15px;
+ border-radius: 12px;
+}
+
+.btn-primary {
+ background: linear-gradient(135deg, #ff7a3c 0%, #ff6b2b 100%);
+ color: #fff;
+ box-shadow: 0 4px 12px rgba(255, 122, 60, 0.25);
+}
+
+.btn-primary:hover:not(:disabled) {
+ transform: translateY(-2px);
+ box-shadow: 0 8px 20px rgba(255, 122, 60, 0.35);
+}
+
+.btn-primary:active:not(:disabled) {
+ transform: translateY(0);
+}
+
+.btn-primary:disabled {
+ opacity: 0.6;
+ cursor: not-allowed;
+}
+
+.btn-secondary {
+ background: #1a1b26;
+ color: #f5f5f7;
+ border: 1px solid #3a3b4d;
+}
+
+.btn-secondary:hover {
+ background: #2a2b3c;
+ border-color: #4a4b5d;
+ transform: translateY(-1px);
+}
+
+.btn-text {
+ background: transparent;
+ color: #9a9bb0;
+ padding: 10px;
+ font-size: 14px;
+ font-weight: 500;
+}
+
+.btn-text:hover {
+ color: #ff7a3c;
+}
+
+/* Button Spinner */
+.btn-spinner {
+ width: 16px;
+ height: 16px;
+ border: 2px solid rgba(255, 255, 255, 0.3);
+ border-top-color: #fff;
+ border-radius: 50%;
+ animation: spin 0.6s linear infinite;
+}
+
+@keyframes spin {
+ to { transform: rotate(360deg); }
+}
+
+/* Loading Spinner (Page) */
+.loading-spinner {
+ width: 48px;
+ height: 48px;
+ border: 4px solid #272832;
+ border-top-color: #ff7a3c;
+ border-radius: 50%;
+ animation: spin 0.8s linear infinite;
+ margin: 0 auto;
+}
+
+/* Divider */
+.login-divider {
+ position: relative;
+ text-align: center;
+ margin: 8px 0;
+}
+
+.login-divider::before {
+ content: '';
+ position: absolute;
+ top: 50%;
+ left: 0;
+ right: 0;
+ height: 1px;
+ background: #272832;
+}
+
+.login-divider span {
+ position: relative;
+ display: inline-block;
+ padding: 0 16px;
+ background: #101117;
+ color: #9a9bb0;
+ font-size: 12px;
+ font-weight: 500;
+}
+
+/* Form */
+.login-form {
+ display: flex;
+ flex-direction: column;
+ gap: 18px;
+ margin-bottom: 28px;
+}
+
+.form-group {
+ display: flex;
+ flex-direction: column;
+ gap: 8px;
+}
+
+.form-group label {
+ font-size: 13px;
+ font-weight: 600;
+ color: #f5f5f7;
+}
+
+.form-input {
+ background: #0a0b0f;
+ border: 1px solid #272832;
+ border-radius: 10px;
+ padding: 12px 14px;
+ color: #f5f5f7;
+ font-size: 14px;
+ font-family: "SF Mono", Monaco, monospace;
+ transition: all 0.2s ease;
+}
+
+.form-input:focus {
+ outline: none;
+ border-color: #ff7a3c;
+ box-shadow: 0 0 0 4px rgba(255, 122, 60, 0.1);
+}
+
+.form-input:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+}
+
+.form-input::placeholder {
+ color: #676883;
+}
+
+.form-hint {
+ font-size: 12px;
+ color: #9a9bb0;
+ line-height: 1.5;
+ margin: 0;
+}
+
+.form-link {
+ color: #ff7a3c;
+ text-decoration: none;
+ font-weight: 500;
+ transition: color 0.2s ease;
+}
+
+.form-link:hover {
+ color: #ff8b52;
+ text-decoration: underline;
+}
+
+.form-hint code {
+ background: #1a1b26;
+ padding: 2px 6px;
+ border-radius: 4px;
+ font-family: "SF Mono", Monaco, monospace;
+ font-size: 11px;
+ color: #ff7a3c;
+}
+
+/* Notice (for no auth configured) */
+.login-notice {
+ padding: 20px;
+ background: rgba(255, 152, 0, 0.1);
+ border: 1px solid rgba(255, 152, 0, 0.3);
+ border-radius: 12px;
+ margin-bottom: 28px;
+}
+
+.login-notice h3 {
+ margin: 0 0 12px 0;
+ font-size: 16px;
+ color: #ffb74d;
+}
+
+.login-notice p {
+ margin: 0 0 12px 0;
+ font-size: 13px;
+ color: #c3c5dd;
+ line-height: 1.6;
+}
+
+.login-notice ul {
+ margin: 0;
+ padding-left: 20px;
+ font-size: 13px;
+ color: #c3c5dd;
+ line-height: 1.8;
+}
+
+.login-notice code {
+ background: #1a1b26;
+ padding: 2px 6px;
+ border-radius: 4px;
+ font-family: "SF Mono", Monaco, monospace;
+ font-size: 12px;
+ color: #ff7a3c;
+}
+
+/* Features List */
+.login-features {
+ display: flex;
+ flex-direction: column;
+ gap: 12px;
+ padding: 20px 0;
+ border-top: 1px solid #272832;
+ border-bottom: 1px solid #272832;
+ margin-bottom: 20px;
+}
+
+.feature-item {
+ display: flex;
+ align-items: flex-start;
+ gap: 12px;
+}
+
+.feature-icon {
+ flex-shrink: 0;
+ width: 20px;
+ height: 20px;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ color: #7cffb3;
+}
+
+.feature-text {
+ display: flex;
+ flex-direction: column;
+ gap: 2px;
+}
+
+.feature-text strong {
+ font-size: 13px;
+ font-weight: 600;
+ color: #f5f5f7;
+}
+
+.feature-text span {
+ font-size: 12px;
+ color: #9a9bb0;
+}
+
+/* Footer */
+.login-footer {
+ text-align: center;
+}
+
+.login-footer p {
+ margin: 0;
+ font-size: 11px;
+ color: #7a7b8e;
+ line-height: 1.6;
+}/* ============================================================================
+ INSTALLATION MODAL - Claude Code Style
+ ============================================================================ */
+
+.install-modal-backdrop {
+ position: fixed;
+ inset: 0;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ background: rgba(0, 0, 0, 0.7);
+ backdrop-filter: blur(8px);
+ z-index: 9999;
+ animation: fadeIn 0.2s ease;
+}
+
+.install-modal {
+ width: 480px;
+ max-width: 90vw;
+ background: #101117;
+ border: 1px solid #272832;
+ border-radius: 16px;
+ box-shadow: 0 20px 60px rgba(0, 0, 0, 0.5);
+ animation: modalSlideIn 0.3s ease;
+ overflow: hidden;
+}
+
+@keyframes modalSlideIn {
+ from {
+ opacity: 0;
+ transform: translateY(-20px) scale(0.95);
+ }
+ to {
+ opacity: 1;
+ transform: translateY(0) scale(1);
+ }
+}
+
+/* Modal Header */
+.install-modal-header {
+ padding: 32px 32px 24px;
+ text-align: center;
+ border-bottom: 1px solid #272832;
+}
+
+.install-modal-logo {
+ display: flex;
+ justify-content: center;
+ margin-bottom: 16px;
+}
+
+.logo-icon-large {
+ width: 56px;
+ height: 56px;
+ border-radius: 12px;
+ background: linear-gradient(135deg, #ff7a3c 0%, #ff6b2b 100%);
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ font-weight: 700;
+ font-size: 24px;
+ color: #050608;
+ box-shadow: 0 4px 16px rgba(255, 122, 60, 0.3);
+}
+
+.install-modal-title {
+ margin: 0 0 8px 0;
+ font-size: 20px;
+ font-weight: 600;
+ color: #f5f5f7;
+}
+
+.install-modal-subtitle {
+ margin: 0;
+ font-size: 13px;
+ color: #9a9bb0;
+ line-height: 1.5;
+}
+
+/* Status Indicator */
+.install-status {
+ display: flex;
+ align-items: center;
+ gap: 10px;
+ padding: 12px 16px;
+ margin: 16px 24px;
+ border-radius: 8px;
+ font-size: 13px;
+ transition: all 0.2s ease;
+}
+
+.install-status-error {
+ background: rgba(255, 82, 82, 0.1);
+ border: 1px solid rgba(255, 82, 82, 0.3);
+ color: #ff8a8a;
+}
+
+.install-status-pending {
+ background: rgba(255, 152, 0, 0.1);
+ border: 1px solid rgba(255, 152, 0, 0.3);
+ color: #ffb74d;
+}
+
+.status-icon {
+ flex-shrink: 0;
+}
+
+.status-spinner {
+ width: 16px;
+ height: 16px;
+ border: 2px solid rgba(255, 180, 77, 0.3);
+ border-top-color: #ffb74d;
+ border-radius: 50%;
+ animation: spin 0.6s linear infinite;
+}
+
+/* Installation Steps */
+.install-steps {
+ padding: 24px 32px;
+ display: flex;
+ flex-direction: column;
+ gap: 16px;
+}
+
+.install-step {
+ display: flex;
+ align-items: flex-start;
+ gap: 12px;
+}
+
+.step-number {
+ flex-shrink: 0;
+ width: 28px;
+ height: 28px;
+ border-radius: 8px;
+ background: #1a1b26;
+ border: 1px solid #3a3b4d;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ font-size: 13px;
+ font-weight: 600;
+ color: #ff7a3c;
+}
+
+.step-content h3 {
+ margin: 0 0 4px 0;
+ font-size: 14px;
+ font-weight: 600;
+ color: #f5f5f7;
+}
+
+.step-content p {
+ margin: 0;
+ font-size: 12px;
+ color: #9a9bb0;
+ line-height: 1.5;
+}
+
+/* Action Buttons */
+.install-modal-actions {
+ display: flex;
+ align-items: center;
+ justify-content: flex-end;
+ gap: 10px;
+ padding: 16px 24px;
+ border-top: 1px solid #272832;
+ background: #0a0b0f;
+}
+
+.btn-install-primary {
+ border: none;
+ outline: none;
+ background: #000;
+ color: #fff;
+ padding: 10px 18px;
+ border-radius: 8px;
+ font-size: 13px;
+ font-weight: 600;
+ cursor: pointer;
+ display: flex;
+ align-items: center;
+ gap: 8px;
+ transition: all 0.2s ease;
+}
+
+.btn-install-primary:hover:not(:disabled) {
+ background: #1a1a1a;
+ transform: translateY(-1px);
+}
+
+.btn-install-primary:active:not(:disabled) {
+ transform: translateY(0);
+}
+
+.btn-install-primary:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+}
+
+.btn-check-status {
+ border: 1px solid #3a3b4d;
+ outline: none;
+ background: #1a1b26;
+ color: #f5f5f7;
+ padding: 10px 18px;
+ border-radius: 8px;
+ font-size: 13px;
+ font-weight: 500;
+ cursor: pointer;
+ display: flex;
+ align-items: center;
+ gap: 8px;
+ transition: all 0.2s ease;
+}
+
+.btn-check-status:hover:not(:disabled) {
+ background: #2a2b3c;
+ border-color: #4a4b5d;
+}
+
+.btn-check-status:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+}
+
+.btn-install-secondary {
+ border: 1px solid #3a3b4d;
+ outline: none;
+ background: transparent;
+ color: #c3c5dd;
+ padding: 10px 18px;
+ border-radius: 8px;
+ font-size: 13px;
+ font-weight: 500;
+ cursor: pointer;
+ display: flex;
+ align-items: center;
+ gap: 8px;
+ transition: all 0.2s ease;
+}
+
+.btn-install-secondary:hover:not(:disabled) {
+ background: #1a1b26;
+ border-color: #4a4b5d;
+}
+
+.btn-install-secondary:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+}
+
+/* Footer */
+.install-modal-footer {
+ padding: 16px 32px 24px;
+ text-align: center;
+}
+
+.install-modal-footer p {
+ margin: 0;
+ font-size: 12px;
+ color: #7a7b8e;
+ line-height: 1.6;
+}
+
+.install-modal-footer strong {
+ color: #c3c5dd;
+ font-weight: 600;
+}
+
+/* Button spinner */
+.btn-spinner {
+ width: 14px;
+ height: 14px;
+ border: 2px solid rgba(255, 255, 255, 0.3);
+ border-top-color: #fff;
+ border-radius: 50%;
+ animation: spin 0.6s linear infinite;
+}
+
+@keyframes spin {
+ to { transform: rotate(360deg); }
+}
+
+/* Secondary primary-style button for "Load available models" */
+.settings-load-btn {
+ margin-top: 8px;
+
+ /* Make it hug the text, not full width */
+ display: inline-flex;
+ align-items: center;
+ justify-content: center;
+ width: auto !important;
+ min-width: 0;
+ align-self: flex-start;
+
+ /* Size: slightly smaller than Save but same family */
+ padding: 7px 14px;
+ border-radius: 999px;
+
+ font-size: 12px;
+ font-weight: 600;
+ letter-spacing: 0.01em;
+
+ border: none;
+ outline: none;
+ cursor: pointer;
+
+ /* Match Save button color palette */
+ background: #ff7a3c;
+ color: #050608;
+
+ transition:
+ background 0.2s ease,
+ box-shadow 0.2s ease,
+ transform 0.15s ease,
+ opacity 0.2s ease;
+}
+
+.settings-load-btn:hover {
+ background: #ff8b52;
+ transform: translateY(-1px);
+ box-shadow: 0 3px 10px rgba(255, 122, 60, 0.28);
+}
+
+.settings-load-btn:active {
+ transform: translateY(0);
+ box-shadow: 0 1px 4px rgba(255, 122, 60, 0.25);
+}
+
+.settings-load-btn:disabled {
+ opacity: 0.55;
+ cursor: not-allowed;
+ transform: none;
+ box-shadow: none;
+}
+
+/* ------------------------------
+ LLM Settings Loading Experience
+ ------------------------------ */
+
+.settings-loading-shell {
+ min-height: calc(100vh - 32px);
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ padding: 32px 24px;
+}
+
+.settings-loading-card {
+ width: 100%;
+ max-width: 520px;
+ background: #101117;
+ border: 1px solid #272832;
+ border-radius: 20px;
+ padding: 40px 28px;
+ text-align: center;
+ box-shadow: 0 24px 80px rgba(0, 0, 0, 0.28);
+}
+
+.settings-loading-card h1 {
+ margin: 0 0 6px 0;
+ font-size: 28px;
+ line-height: 1.1;
+}
+
+.settings-loading-subtitle {
+ font-size: 13px;
+ color: #9a9bb0;
+ margin-bottom: 18px;
+}
+
+.settings-loading-text {
+ font-size: 14px;
+ color: #d4d7e1;
+ line-height: 1.6;
+ margin: 0;
+}
+
+.settings-loading-spinner {
+ width: 56px;
+ height: 56px;
+ border: 4px solid #272832;
+ border-top-color: #ff7a3c;
+ border-right-color: rgba(255, 122, 60, 0.7);
+ border-radius: 50%;
+ animation: spin 0.8s linear infinite;
+ margin: 0 auto 18px;
+}
+
+.settings-loading-slow {
+ margin-top: 18px;
+ padding: 14px 16px;
+ background: #0b0c11;
+ border: 1px solid #272832;
+ border-radius: 12px;
+}
+
+.settings-loading-slow p {
+ margin: 0 0 12px 0;
+ color: #9a9bb0;
+ font-size: 13px;
+ line-height: 1.5;
+}
+
+.settings-inline-error-card {
+ width: 100%;
+ max-width: 620px;
+ margin: 60px auto 0;
+ background: #101117;
+ border: 1px solid #272832;
+ border-radius: 16px;
+ padding: 28px 24px;
+}
+
+.settings-error-banner,
+.settings-success-banner {
+ border-radius: 12px;
+ padding: 12px 14px;
+ margin-bottom: 14px;
+ font-size: 13px;
+ line-height: 1.5;
+}
+
+.settings-error-banner {
+ background: rgba(255, 87, 87, 0.08);
+ border: 1px solid rgba(255, 87, 87, 0.24);
+ color: #ffb0b0;
+}
+
+.settings-success-banner {
+ background: rgba(67, 181, 129, 0.08);
+ border: 1px solid rgba(67, 181, 129, 0.24);
+ color: #9ce7c2;
+}
+
+.settings-error-text {
+ color: #ffb0b0;
+ font-size: 14px;
+ line-height: 1.6;
+ margin: 12px 0 18px;
+}
+
+.settings-secondary-btn {
+ background: transparent;
+ border: 1px solid #313244;
+ color: #f5f5f7;
+ border-radius: 999px;
+ padding: 9px 16px;
+ font-size: 13px;
+ cursor: pointer;
+ transition: all 0.2s ease;
+}
+
+.settings-secondary-btn:hover {
+ border-color: #ff7a3c;
+ color: #fff;
+ background: rgba(255, 122, 60, 0.08);
+}
+
+.settings-inline-row {
+ display: flex;
+ gap: 10px;
+ align-items: center;
+}
+
+.settings-inline-row .settings-input {
+ flex: 1;
+}
+
+.settings-model-list {
+ display: flex;
+ flex-wrap: wrap;
+ gap: 8px;
+}
+
+.settings-model-chip {
+ background: #090a0e;
+ border: 1px solid #2b2c36;
+ border-radius: 999px;
+ color: #f5f5f7;
+ padding: 8px 12px;
+ font-size: 12px;
+ cursor: pointer;
+ transition: all 0.18s ease;
+}
+
+.settings-model-chip:hover {
+ border-color: #ff7a3c;
+ background: rgba(255, 122, 60, 0.08);
+}
+
+/* =========================================================
+ Startup Screen — Enterprise Loader
+ ========================================================= */
+
+.startup-screen {
+ min-height: 100vh;
+ width: 100%;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ padding: 32px;
+ box-sizing: border-box;
+ background:
+ radial-gradient(circle at top center, rgba(255, 122, 60, 0.10), transparent 18%),
+ linear-gradient(180deg, #050814 0%, #03060f 100%);
+}
+
+.startup-card {
+ width: min(100%, 520px);
+ display: flex;
+ flex-direction: column;
+ gap: 20px;
+ padding: 28px 28px 24px;
+ border-radius: 20px;
+ border: 1px solid rgba(255, 255, 255, 0.08);
+ background:
+ linear-gradient(180deg, rgba(18, 24, 42, 0.94) 0%, rgba(10, 15, 28, 0.96) 100%);
+ box-shadow:
+ 0 10px 40px rgba(0, 0, 0, 0.45),
+ 0 0 0 1px rgba(255, 255, 255, 0.02) inset;
+ backdrop-filter: blur(12px);
+}
+
+.startup-brand-row {
+ display: flex;
+ align-items: center;
+ gap: 16px;
+}
+
+.startup-brand-mark {
+ position: relative;
+ width: 52px;
+ height: 52px;
+ flex: 0 0 52px;
+}
+
+.startup-brand-ring {
+ position: absolute;
+ inset: 0;
+ border-radius: 50%;
+ border: 3px solid rgba(255, 122, 60, 0.22);
+ border-top-color: #ff7a3c;
+ animation: startup-spin 1.1s linear infinite;
+}
+
+.startup-brand-core {
+ position: absolute;
+ inset: 11px;
+ border-radius: 50%;
+ background: radial-gradient(circle, rgba(255, 122, 60, 0.95) 0%, rgba(255, 122, 60, 0.25) 72%, transparent 100%);
+ box-shadow: 0 0 24px rgba(255, 122, 60, 0.28);
+}
+
+.startup-brand-copy {
+ min-width: 0;
+}
+
+.startup-title {
+ font-size: 26px;
+ line-height: 1.1;
+ font-weight: 700;
+ color: #f8fafc;
+ letter-spacing: 0.01em;
+}
+
+.startup-subtitle {
+ margin-top: 4px;
+ font-size: 13px;
+ line-height: 1.5;
+ color: #94a3b8;
+}
+
+.startup-loader-wrap {
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ padding-top: 4px;
+}
+
+.startup-loader {
+ position: relative;
+ width: 72px;
+ height: 72px;
+}
+
+.startup-loader-ring {
+ position: absolute;
+ inset: 0;
+ border-radius: 50%;
+}
+
+.startup-loader-ring-outer {
+ border: 4px solid rgba(255, 255, 255, 0.08);
+ border-top-color: #ff7a3c;
+ animation: startup-spin 1s linear infinite;
+}
+
+.startup-loader-ring-inner {
+ inset: 10px;
+ border: 3px solid rgba(255, 122, 60, 0.14);
+ border-bottom-color: rgba(255, 122, 60, 0.9);
+ animation: startup-spin-reverse 1.4s linear infinite;
+}
+
+.startup-status-block {
+ text-align: center;
+}
+
+.startup-status {
+ font-size: 18px;
+ font-weight: 600;
+ color: #f8fafc;
+ letter-spacing: 0.01em;
+}
+
+.startup-detail {
+ margin-top: 8px;
+ font-size: 13px;
+ line-height: 1.6;
+ color: #94a3b8;
+}
+
+.startup-phase-row {
+ display: flex;
+ justify-content: center;
+}
+
+.startup-phase-badge {
+ display: inline-flex;
+ align-items: center;
+ justify-content: center;
+ min-height: 28px;
+ padding: 0 12px;
+ border-radius: 999px;
+ background: rgba(255, 122, 60, 0.12);
+ border: 1px solid rgba(255, 122, 60, 0.24);
+ color: #ffb089;
+ font-size: 12px;
+ font-weight: 600;
+ letter-spacing: 0.04em;
+ text-transform: uppercase;
+}
+
+.startup-meta-grid {
+ display: grid;
+ grid-template-columns: repeat(2, minmax(0, 1fr));
+ gap: 12px;
+}
+
+.startup-meta-item {
+ padding: 12px 14px;
+ border-radius: 14px;
+ background: rgba(255, 255, 255, 0.035);
+ border: 1px solid rgba(255, 255, 255, 0.05);
+}
+
+.startup-meta-item-wide {
+ grid-column: 1 / -1;
+}
+
+.startup-meta-label {
+ font-size: 11px;
+ font-weight: 600;
+ letter-spacing: 0.05em;
+ text-transform: uppercase;
+ color: #64748b;
+}
+
+.startup-meta-value {
+ margin-top: 6px;
+ font-size: 14px;
+ font-weight: 600;
+ color: #e2e8f0;
+ word-break: break-word;
+}
+
+.startup-footer {
+ font-size: 12px;
+ line-height: 1.6;
+ color: #64748b;
+ text-align: center;
+}
+
+@keyframes startup-spin {
+ from {
+ transform: rotate(0deg);
+ }
+ to {
+ transform: rotate(360deg);
+ }
+}
+
+@keyframes startup-spin-reverse {
+ from {
+ transform: rotate(360deg);
+ }
+ to {
+ transform: rotate(0deg);
+ }
+}
+
+@media (max-width: 640px) {
+ .startup-screen {
+ padding: 20px;
+ }
+
+ .startup-card {
+ padding: 22px 20px 20px;
+ border-radius: 18px;
+ }
+
+ .startup-title {
+ font-size: 22px;
+ }
+
+ .startup-meta-grid {
+ grid-template-columns: 1fr;
+ }
+
+ .startup-meta-item-wide {
+ grid-column: auto;
+ }
+}
\ No newline at end of file
diff --git a/frontend/utils/api.js b/frontend/utils/api.js
new file mode 100644
index 0000000000000000000000000000000000000000..305c26364b52912f76e34fcff6610ec021444b1f
--- /dev/null
+++ b/frontend/utils/api.js
@@ -0,0 +1,251 @@
+/**
+ * API utilities for authenticated requests
+ */
+
+/**
+ * Get backend URL from environment or use relative path (for local dev)
+ * - Production (Vercel): Uses VITE_BACKEND_URL env var (e.g., https://gitpilot-backend.onrender.com)
+ * - Development (local): Uses relative paths (proxied by Vite to localhost:8000)
+ */
+const BACKEND_URL = import.meta.env.VITE_BACKEND_URL || '';
+
+/**
+ * Check if backend URL is configured
+ * @returns {boolean} True if backend URL is set
+ */
+export function isBackendConfigured() {
+ return BACKEND_URL !== '' && BACKEND_URL !== undefined;
+}
+
+/**
+ * Get the configured backend URL
+ * @returns {string} Backend URL or empty string
+ */
+export function getBackendUrl() {
+ return BACKEND_URL;
+}
+
+/**
+ * Construct full API URL
+ * @param {string} path - API endpoint path (e.g., '/api/chat/plan')
+ * @returns {string} Full URL to API endpoint
+ */
+export function apiUrl(path) {
+ // Ensure path starts with /
+ const cleanPath = path.startsWith('/') ? path : `/${path}`;
+ return `${BACKEND_URL}${cleanPath}`;
+}
+
+/**
+ * Enhanced fetch with better error handling for JSON parsing
+ * @param {string} url - URL to fetch
+ * @param {Object} options - Fetch options
+ * @returns {Promise} Parsed JSON response
+ */
+export async function safeFetchJSON(url, options = {}) {
+ try {
+ // Add timeout to prevent hanging when backend is starting up.
+ // Default raised to 15s to tolerate first-load GitHub API checks.
+ const timeout = options.timeout || 15000;
+ const controller = new AbortController();
+ const timer = setTimeout(() => controller.abort(), timeout);
+ const fetchOptions = { ...options, signal: options.signal || controller.signal };
+ delete fetchOptions.timeout;
+
+ let response;
+ try {
+ response = await fetch(url, fetchOptions);
+ } finally {
+ clearTimeout(timer);
+ }
+ const contentType = response.headers.get('content-type');
+
+ // Check if response is actually JSON
+ if (!contentType || !contentType.includes('application/json')) {
+ // If not JSON, it might be an HTML error page
+ const text = await response.text();
+
+ // Check if it looks like HTML (starts with } Fetch response
+ */
+export async function authFetch(url, options = {}) {
+ const headers = {
+ ...getAuthHeaders(),
+ ...options.headers,
+ };
+
+ return fetch(url, {
+ ...options,
+ headers,
+ });
+}
+
+/**
+ * Make an authenticated JSON request
+ * @param {string} url - API endpoint URL
+ * @param {Object} options - Fetch options
+ * @returns {Promise} Parsed JSON response
+ */
+export async function authFetchJSON(url, options = {}) {
+ const headers = {
+ 'Content-Type': 'application/json',
+ ...getAuthHeaders(),
+ ...options.headers,
+ };
+
+ const response = await fetch(url, {
+ ...options,
+ headers,
+ });
+
+ if (!response.ok) {
+ const error = await response.json().catch(() => ({ detail: 'Request failed' }));
+ throw new Error(error.detail || error.message || 'Request failed');
+ }
+
+ return response.json();
+}
+
+// ─── Redesigned API Endpoints ────────────────────────────
+
+/**
+ * Get normalized server status
+ */
+export async function fetchStatus() {
+ return safeFetchJSON(apiUrl("/api/status"));
+}
+
+/**
+ * Get server status with retry (for startup when backend may still be booting).
+ * Retries up to `maxRetries` times with `delayMs` between attempts.
+ * @param {number} maxRetries - Maximum retry attempts (default: 8)
+ * @param {number} delayMs - Delay between retries in ms (default: 2000)
+ * @returns {Promise} Parsed status response or null
+ */
+export async function fetchStatusWithRetry(maxRetries = 8, delayMs = 2000) {
+ for (let i = 0; i < maxRetries; i++) {
+ try {
+ return await safeFetchJSON(apiUrl("/api/status"), { timeout: 5000 });
+ } catch {
+ if (i < maxRetries - 1) {
+ await new Promise((r) => setTimeout(r, delayMs));
+ }
+ }
+ }
+ return null;
+}
+
+/**
+ * Get detailed provider status
+ */
+export async function fetchProviderStatus() {
+ return safeFetchJSON(apiUrl("/api/providers/status"));
+}
+
+/**
+ * Test a provider configuration
+ */
+export async function testProvider(providerConfig) {
+ return safeFetchJSON(apiUrl("/api/providers/test"), {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify(providerConfig),
+ });
+}
+
+/**
+ * Start a session by mode
+ */
+export async function startSession(sessionConfig) {
+ return safeFetchJSON(apiUrl("/api/session/start"), {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify(sessionConfig),
+ });
+}
+
+/**
+ * Send a chat message (redesigned endpoint)
+ */
+export async function sendChatMessage(messageConfig) {
+ return safeFetchJSON(apiUrl("/api/chat/send"), {
+ method: "POST",
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify(messageConfig),
+ });
+}
+
+/**
+ * Get workspace summary
+ */
+export async function fetchWorkspaceSummary(folderPath) {
+ const query = folderPath ? `?folder_path=${encodeURIComponent(folderPath)}` : "";
+ return safeFetchJSON(apiUrl(`/api/workspace/summary${query}`));
+}
+
+/**
+ * Run security scan on workspace
+ */
+export async function scanWorkspace(path) {
+ const query = path ? `?path=${encodeURIComponent(path)}` : "";
+ return safeFetchJSON(apiUrl(`/api/security/scan-workspace${query}`));
+}
\ No newline at end of file
diff --git a/frontend/utils/appInit.js b/frontend/utils/appInit.js
new file mode 100644
index 0000000000000000000000000000000000000000..72abf41f7916c1bcc9e572c2b721bf43e23fa822
--- /dev/null
+++ b/frontend/utils/appInit.js
@@ -0,0 +1,157 @@
+/**
+ * GitPilot App Initialization — Single Source of Truth.
+ *
+ * Best-practice bootstrap pattern:
+ * - Runs EXACTLY ONCE per page load (even with React StrictMode)
+ * - Two-phase strategy: fast ping → then parallel status fetch
+ * - Long retry budget for slow backends (WSL, HF Spaces cold start)
+ * - Shared result between App.jsx and LoginPage.jsx
+ * - No duplicate polling, no race conditions
+ *
+ * Phase 1 — Readiness probe (/api/ping):
+ * /api/ping is a zero-dependency endpoint that responds instantly
+ * once uvicorn is listening. We poll it with short timeouts and many
+ * retries to detect backend readiness WITHOUT wasting time on the
+ * heavy /api/status endpoint (which does GitHub API checks).
+ *
+ * Phase 2 — Data fetch (/api/status + /api/auth/status):
+ * Only after ping succeeds, we fetch the real status data in parallel.
+ * These can still be slow (GitHub API, LLM provider probes) but the
+ * user already sees the login page.
+ *
+ * API call budget per page load:
+ * - Best case: 2-3 × /api/ping + 1 × /api/status + 1 × /api/auth/status
+ * - Worst case: up to 30 × /api/ping + 1 + 1 (60s timeout budget)
+ */
+import { safeFetchJSON, apiUrl } from './api.js';
+
+// Module-level singleton — survives React StrictMode double-mount
+let _initPromise = null;
+let _initResult = null;
+
+const PING_MAX_ATTEMPTS = 30; // up to ~60s of readiness polling
+const PING_INTERVAL_MS = 2000; // 2s between pings
+const PING_TIMEOUT_MS = 4000; // each ping gives up after 4s
+const STATUS_TIMEOUT_MS = 15000; // once ready, status fetch has 15s
+
+/**
+ * Wait for the backend to become reachable by polling /api/ping.
+ * This is a zero-dependency endpoint that responds instantly once
+ * uvicorn is listening — much faster than /api/status which does
+ * GitHub API checks.
+ *
+ * @returns {Promise} true if backend became reachable, false otherwise
+ */
+async function waitForBackend() {
+ for (let i = 0; i < PING_MAX_ATTEMPTS; i++) {
+ try {
+ const result = await safeFetchJSON(
+ apiUrl('/api/ping'),
+ { timeout: PING_TIMEOUT_MS }
+ );
+ if (result && (result.ok === true || result.service)) {
+ console.log(
+ `[initApp] ✅ Backend reachable after ${i + 1} ping attempt(s) ` +
+ `(${(i * PING_INTERVAL_MS) / 1000}s elapsed)`
+ );
+ return true;
+ }
+ } catch (err) {
+ // Silent — we expect failures during cold start
+ if (i === 0 || i % 5 === 0) {
+ console.log(
+ `[initApp] Waiting for backend... ` +
+ `attempt ${i + 1}/${PING_MAX_ATTEMPTS}`
+ );
+ }
+ }
+ // Wait before next ping (except after last attempt)
+ if (i < PING_MAX_ATTEMPTS - 1) {
+ await new Promise((r) => setTimeout(r, PING_INTERVAL_MS));
+ }
+ }
+ return false;
+}
+
+/**
+ * Initialize the app.
+ * Phase 1: poll /api/ping until backend is reachable
+ * Phase 2: fetch /api/status and /api/auth/status in parallel
+ *
+ * @returns {Promise<{status: object|null, authMode: string, ready: boolean, error: string|null}>}
+ */
+export function initApp() {
+ if (_initPromise) {
+ return _initPromise;
+ }
+
+ _initPromise = (async () => {
+ // ── Phase 1: wait for backend to be reachable ──
+ const reachable = await waitForBackend();
+
+ if (!reachable) {
+ console.error(
+ `[initApp] ❌ Backend did not respond after ${PING_MAX_ATTEMPTS} ping attempts ` +
+ `(${(PING_MAX_ATTEMPTS * PING_INTERVAL_MS) / 1000}s). Giving up.`
+ );
+ _initResult = {
+ status: null,
+ authMode: 'device',
+ ready: false,
+ error: 'Backend did not become reachable. Please check that the server is running.',
+ };
+ return _initResult;
+ }
+
+ // ── Phase 2: fetch real data in parallel ──
+ try {
+ console.log('[initApp] Fetching /api/status + /api/auth/status in parallel...');
+ const [status, authStatus] = await Promise.all([
+ safeFetchJSON(apiUrl('/api/status'), { timeout: STATUS_TIMEOUT_MS }),
+ safeFetchJSON(apiUrl('/api/auth/status'), { timeout: STATUS_TIMEOUT_MS })
+ .catch(() => null),
+ ]);
+
+ console.log('[initApp] ✅ Init complete');
+ _initResult = {
+ status,
+ authMode: (authStatus && authStatus.mode) || 'device',
+ ready: true,
+ error: null,
+ };
+ return _initResult;
+ } catch (err) {
+ // Backend was reachable via ping but status fetch failed
+ // Still return ready:true so UI can proceed with limited state
+ console.warn(
+ `[initApp] Status fetch failed after ping succeeded: ${err.message || err}. ` +
+ `Proceeding with limited state.`
+ );
+ _initResult = {
+ status: null,
+ authMode: 'device',
+ ready: true, // backend is up, just slow
+ error: null,
+ };
+ return _initResult;
+ }
+ })();
+
+ return _initPromise;
+}
+
+/**
+ * Get the cached init result (null if init hasn't completed yet).
+ */
+export function getInitResult() {
+ return _initResult;
+}
+
+/**
+ * Reset the init singleton. Call this only when you need to force
+ * a re-initialization (e.g., after the user manually clicks "Retry").
+ */
+export function resetInit() {
+ _initPromise = null;
+ _initResult = null;
+}
diff --git a/frontend/utils/sse.js b/frontend/utils/sse.js
new file mode 100644
index 0000000000000000000000000000000000000000..99a99f138e12ec1b045598d3140bad4035985714
--- /dev/null
+++ b/frontend/utils/sse.js
@@ -0,0 +1,183 @@
+/**
+ * SSE (Server-Sent Events) client for GitPilot V2 streaming API.
+ *
+ * Usage:
+ * import { streamChat, cancelStream } from '../utils/sse';
+ *
+ * const unsubscribe = streamChat(sessionId, message, {
+ * onTextDelta: (text) => appendToChat(text),
+ * onToolStart: (data) => showToolActivity(data),
+ * onToolResult: (data) => updateToolActivity(data),
+ * onApprovalNeeded: (data) => showApprovalModal(data),
+ * onTerminalOutput: (data) => appendTerminal(data),
+ * onTestResult: (data) => showTestBadges(data),
+ * onDiagnostics: (data) => showDiagnostics(data),
+ * onDone: (data) => finalize(data),
+ * onError: (error) => showError(error),
+ * });
+ *
+ * // To cancel:
+ * cancelStream(sessionId);
+ */
+
+const BACKEND_URL = import.meta.env.VITE_BACKEND_URL || '';
+
+function apiUrl(path) {
+ return BACKEND_URL ? `${BACKEND_URL}${path}` : path;
+}
+
+// Active abort controllers keyed by sessionId
+const activeControllers = new Map();
+
+/**
+ * Stream a chat message via the V2 SSE endpoint.
+ * Returns a cleanup function to abort the stream.
+ */
+export function streamChat(sessionId, message, handlers = {}) {
+ const controller = new AbortController();
+ activeControllers.set(sessionId, controller);
+
+ const run = async () => {
+ try {
+ const res = await fetch(apiUrl('/api/v2/chat/stream'), {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify({
+ session_id: sessionId,
+ message,
+ permission_mode: 'normal',
+ }),
+ signal: controller.signal,
+ });
+
+ if (!res.ok || !res.body) {
+ handlers.onError?.({ error: `Server returned ${res.status}` });
+ return;
+ }
+
+ const reader = res.body.getReader();
+ const decoder = new TextDecoder();
+ let buffer = '';
+
+ while (true) {
+ const { done, value } = await reader.read();
+ if (done) break;
+
+ buffer += decoder.decode(value, { stream: true });
+ const parts = buffer.split('\n\n');
+ buffer = parts.pop() || '';
+
+ for (const part of parts) {
+ if (!part.startsWith('data: ')) continue;
+ let event;
+ try {
+ event = JSON.parse(part.slice(6));
+ } catch {
+ continue;
+ }
+
+ switch (event.type) {
+ case 'text_delta':
+ handlers.onTextDelta?.(event.text);
+ break;
+ case 'tool_start':
+ handlers.onToolStart?.(event);
+ break;
+ case 'tool_result':
+ handlers.onToolResult?.(event);
+ break;
+ case 'approval_needed':
+ handlers.onApprovalNeeded?.(event);
+ break;
+ case 'plan_step':
+ handlers.onPlanStep?.(event);
+ break;
+ case 'terminal_output':
+ handlers.onTerminalOutput?.(event);
+ break;
+ case 'terminal_exit':
+ handlers.onTerminalExit?.(event);
+ break;
+ case 'test_result':
+ handlers.onTestResult?.(event);
+ break;
+ case 'diagnostics':
+ handlers.onDiagnostics?.(event);
+ break;
+ case 'status_change':
+ handlers.onStatusChange?.(event.status, event.message);
+ break;
+ case 'done':
+ handlers.onDone?.(event);
+ break;
+ case 'error':
+ handlers.onError?.(event);
+ break;
+ }
+ }
+ }
+ } catch (err) {
+ if (controller.signal.aborted) {
+ // User cancelled — not an error
+ return;
+ }
+ handlers.onError?.({ error: String(err) });
+ } finally {
+ activeControllers.delete(sessionId);
+ }
+ };
+
+ run();
+
+ return () => {
+ controller.abort();
+ activeControllers.delete(sessionId);
+ };
+}
+
+/**
+ * Cancel the active SSE stream for a session.
+ */
+export function cancelStream(sessionId) {
+ const controller = activeControllers.get(sessionId);
+ if (controller) {
+ controller.abort();
+ activeControllers.delete(sessionId);
+ }
+}
+
+/**
+ * Send an approval response to the backend.
+ */
+export async function respondToApproval(sessionId, requestId, approved, scope = 'once') {
+ try {
+ await fetch(apiUrl('/api/v2/approval/respond'), {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify({
+ session_id: sessionId,
+ request_id: requestId,
+ approved,
+ scope,
+ }),
+ });
+ } catch (err) {
+ console.error('[GitPilot] Approval response failed:', err);
+ }
+}
+
+/**
+ * Check if the backend supports the V2 streaming API.
+ * Call this once on startup to decide whether to use SSE or batch.
+ */
+export async function checkV2Support() {
+ try {
+ const res = await fetch(apiUrl('/api/status'));
+ if (!res.ok) return false;
+ // If the server is running, v2 endpoints are available
+ // (they're part of the same api.py)
+ return true;
+ } catch {
+ return false;
+ }
+}
diff --git a/frontend/utils/ws.js b/frontend/utils/ws.js
new file mode 100644
index 0000000000000000000000000000000000000000..18396cfb03f9c315ca0cbc66979ee0164f6d762a
--- /dev/null
+++ b/frontend/utils/ws.js
@@ -0,0 +1,168 @@
+/**
+ * WebSocket client for real-time session streaming.
+ *
+ * Provides auto-reconnection, heartbeat, and event dispatching.
+ * Falls back gracefully — callers should always have an HTTP fallback.
+ */
+
+const WS_RECONNECT_DELAYS = [1000, 2000, 4000, 8000, 16000];
+const HEARTBEAT_INTERVAL = 30000;
+const MAX_RECONNECT_ATTEMPTS = 5;
+// If a connection dies within this window it counts as unstable
+const MIN_STABLE_DURATION_MS = 3000;
+
+export class SessionWebSocket {
+ constructor(sessionId, { onMessage, onStatusChange, onError, onConnect, onDisconnect } = {}) {
+ this._sessionId = sessionId;
+ this._handlers = { onMessage, onStatusChange, onError, onConnect, onDisconnect };
+ this._ws = null;
+ this._reconnectAttempt = 0;
+ this._heartbeatTimer = null;
+ this._closed = false;
+ this._connectTime = 0;
+ }
+
+ connect() {
+ if (this._closed) return;
+
+ const protocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:';
+ const backendUrl = import.meta.env.VITE_BACKEND_URL || '';
+ let wsUrl;
+
+ if (backendUrl) {
+ // Production: replace http(s) with ws(s)
+ wsUrl = backendUrl.replace(/^http/, 'ws') + `/ws/sessions/${this._sessionId}`;
+ } else {
+ // Dev: same host (Vite proxy forwards /ws to backend)
+ wsUrl = `${protocol}//${window.location.host}/ws/sessions/${this._sessionId}`;
+ }
+
+ try {
+ this._ws = new WebSocket(wsUrl);
+ } catch {
+ // WebSocket constructor can throw if URL is invalid
+ this._scheduleReconnect();
+ return;
+ }
+
+ this._ws.onopen = () => {
+ this._connectTime = Date.now();
+ this._reconnectAttempt = 0;
+ this._startHeartbeat();
+ this._handlers.onConnect?.();
+ };
+
+ this._ws.onmessage = (event) => {
+ try {
+ const data = JSON.parse(event.data);
+ this._dispatch(data);
+ } catch (e) {
+ console.warn('[ws] Failed to parse message:', e);
+ }
+ };
+
+ this._ws.onclose = (event) => {
+ this._stopHeartbeat();
+ this._handlers.onDisconnect?.(event);
+
+ if (!this._closed) {
+ // If connection died very quickly, count it as unstable
+ const lived = Date.now() - (this._connectTime || 0);
+ if (lived < MIN_STABLE_DURATION_MS) {
+ this._reconnectAttempt++;
+ }
+
+ if (this._reconnectAttempt < MAX_RECONNECT_ATTEMPTS) {
+ this._scheduleReconnect();
+ } else {
+ console.warn('[ws] Max reconnect attempts reached, giving up.');
+ }
+ }
+ };
+
+ this._ws.onerror = () => {
+ // Suppress noisy console errors during reconnection attempts.
+ // The onclose handler already manages reconnection logic.
+ // Only notify the caller if we had a stable connection that broke.
+ if (this._connectTime && Date.now() - this._connectTime > MIN_STABLE_DURATION_MS) {
+ this._handlers.onError?.(new Error('WebSocket connection lost'));
+ }
+ };
+ }
+
+ send(data) {
+ if (this._ws?.readyState === WebSocket.OPEN) {
+ this._ws.send(JSON.stringify(data));
+ return true;
+ }
+ return false;
+ }
+
+ sendMessage(content) {
+ return this.send({ type: 'user_message', content });
+ }
+
+ cancel() {
+ return this.send({ type: 'cancel' });
+ }
+
+ close() {
+ this._closed = true;
+ this._stopHeartbeat();
+ if (this._ws) {
+ this._ws.close();
+ this._ws = null;
+ }
+ }
+
+ get connected() {
+ return this._ws?.readyState === WebSocket.OPEN;
+ }
+
+ _dispatch(data) {
+ const { type } = data;
+
+ switch (type) {
+ case 'agent_message':
+ case 'tool_use':
+ case 'tool_result':
+ case 'diff_update':
+ case 'session_restored':
+ case 'message_received':
+ this._handlers.onMessage?.(data);
+ break;
+ case 'status_change':
+ this._handlers.onStatusChange?.(data.status);
+ break;
+ case 'error':
+ this._handlers.onError?.(new Error(data.message));
+ break;
+ case 'pong':
+ break;
+ default:
+ this._handlers.onMessage?.(data);
+ }
+ }
+
+ _startHeartbeat() {
+ this._stopHeartbeat();
+ this._heartbeatTimer = setInterval(() => {
+ this.send({ type: 'ping' });
+ }, HEARTBEAT_INTERVAL);
+ }
+
+ _stopHeartbeat() {
+ if (this._heartbeatTimer) {
+ clearInterval(this._heartbeatTimer);
+ this._heartbeatTimer = null;
+ }
+ }
+
+ _scheduleReconnect() {
+ const delay = WS_RECONNECT_DELAYS[
+ Math.min(this._reconnectAttempt, WS_RECONNECT_DELAYS.length - 1)
+ ];
+ this._reconnectAttempt++;
+ setTimeout(() => this.connect(), delay);
+ }
+}
diff --git a/frontend/vite.config.js b/frontend/vite.config.js
new file mode 100644
index 0000000000000000000000000000000000000000..e6af0e66dfa29001f58cfe462f36500f56982d2c
--- /dev/null
+++ b/frontend/vite.config.js
@@ -0,0 +1,25 @@
+// frontend/vite.config.js
+import { defineConfig } from "vite";
+import react from "@vitejs/plugin-react";
+
+export default defineConfig({
+ plugins: [react()],
+ define: {
+ __APP_VERSION__: JSON.stringify(process.env.npm_package_version || "unknown"),
+ },
+ server: {
+ port: 5173,
+ host: true,
+ // Only proxy API requests when NOT running in Vercel dev
+ // (Vercel dev handles API routing to serverless functions)
+ proxy: process.env.VERCEL
+ ? undefined
+ : {
+ "/api": "http://localhost:8000",
+ "/ws": {
+ target: "ws://localhost:8000",
+ ws: true,
+ },
+ },
+ },
+});
\ No newline at end of file
diff --git a/gitpilot/__init__.py b/gitpilot/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..febd3bec09bbdd4c708d9dde1a6513130281818e
--- /dev/null
+++ b/gitpilot/__init__.py
@@ -0,0 +1,5 @@
+"""GitPilot package."""
+
+from .version import __version__
+
+__all__ = ["__version__"]
diff --git a/gitpilot/__main__.py b/gitpilot/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..46c73041d1fd9224b478b6fe032b704a206b565b
--- /dev/null
+++ b/gitpilot/__main__.py
@@ -0,0 +1,5 @@
+"""Allow running gitpilot as a module: python -m gitpilot"""
+from .cli import main
+
+if __name__ == "__main__":
+ main()
diff --git a/gitpilot/_api_core.py b/gitpilot/_api_core.py
new file mode 100644
index 0000000000000000000000000000000000000000..bea4091952425af8fb916c8fcb6faf4495787bff
--- /dev/null
+++ b/gitpilot/_api_core.py
@@ -0,0 +1,2417 @@
+# gitpilot/_api_core.py -- Original API module (re-exported by api.py)
+from __future__ import annotations
+
+from pathlib import Path
+from typing import List, Optional
+
+from fastapi import FastAPI, Query, Path as FPath, Header, HTTPException, UploadFile, File
+from fastapi.responses import FileResponse, JSONResponse
+from fastapi.staticfiles import StaticFiles
+from fastapi.middleware.cors import CORSMiddleware
+from pydantic import BaseModel, Field
+
+from .version import __version__
+from .github_api import (
+ list_user_repos,
+ list_user_repos_paginated, # Pagination support
+ search_user_repos, # Search across all repos
+ get_repo_tree,
+ get_file,
+ put_file,
+ execution_context,
+ github_request,
+)
+from .github_app import check_repo_write_access
+from .settings import AppSettings, get_settings, set_provider, update_settings, LLMProvider
+from .agentic import (
+ generate_plan,
+ execute_plan,
+ generate_plan_lite,
+ execute_plan_lite,
+ PlanResult,
+ get_flow_definition,
+ dispatch_request,
+ create_pr_after_execution,
+)
+from .agent_router import route as route_request
+from . import github_issues
+from . import github_pulls
+from . import github_search
+from .session import SessionManager, Session
+from .hooks import HookManager, HookEvent
+from .permissions import PermissionManager, PermissionMode
+from .memory import MemoryManager
+from .context_vault import ContextVault
+from .use_case import UseCaseManager
+from .mcp_client import MCPClient
+from .plugins import PluginManager
+from .skills import SkillManager
+from .smart_model_router import ModelRouter, ModelRouterConfig
+from .topology_registry import (
+ list_topologies as _list_topologies,
+ get_topology_graph as _get_topology_graph,
+ classify_message as _classify_message,
+ get_saved_topology_preference,
+ save_topology_preference,
+)
+from .agent_teams import AgentTeam
+
+
+def _is_lite_mode_active() -> bool:
+ """Check if Lite Mode should be used (setting OR topology)."""
+ s = get_settings()
+ if s.lite_mode:
+ return True
+ return get_saved_topology_preference() == "lite_mode"
+from .learning import LearningEngine
+from .cross_repo import CrossRepoAnalyzer
+from .predictions import PredictiveEngine
+from .security import SecurityScanner
+from .nl_database import NLQueryEngine, QueryDialect, SafetyLevel, TableSchema
+from .github_oauth import (
+ generate_authorization_url,
+ exchange_code_for_token,
+ validate_token,
+ initiate_device_flow,
+ poll_device_token,
+ AuthSession,
+ GitHubUser,
+)
+import os
+import logging
+from .model_catalog import list_models_for_provider
+
+# Optional A2A adapter (MCP ContextForge)
+from .a2a_adapter import router as a2a_router
+
+logger = logging.getLogger(__name__)
+
+# --- Phase 1 singletons ---
+_session_mgr = SessionManager()
+_hook_mgr = HookManager()
+_perm_mgr = PermissionManager()
+
+# --- Phase 2 singletons ---
+_mcp_client = MCPClient()
+_plugin_mgr = PluginManager()
+_skill_mgr = SkillManager()
+_model_router = ModelRouter()
+
+# --- Phase 3 singletons ---
+_agent_team = AgentTeam()
+_learning_engine = LearningEngine()
+_cross_repo = CrossRepoAnalyzer()
+_predictive_engine = PredictiveEngine()
+_security_scanner = SecurityScanner()
+_nl_engine = NLQueryEngine()
+
+app = FastAPI(
+ title="GitPilot API",
+ version=__version__,
+ description="Agentic AI assistant for GitHub repositories.",
+)
+
+# ==========================================================================
+# Optional A2A Adapter (MCP ContextForge)
+# ==========================================================================
+# This is feature-flagged and does not affect the existing UI/REST API unless
+# explicitly enabled.
+def _env_bool(name: str, default: bool) -> bool:
+ raw = os.getenv(name)
+ if raw is None:
+ return default
+ return raw.strip().lower() in {"1", "true", "yes", "y", "on"}
+
+
+if _env_bool("GITPILOT_ENABLE_A2A", False):
+ logger.info("A2A adapter enabled (mounting /a2a/* endpoints)")
+ app.include_router(a2a_router)
+else:
+ logger.info("A2A adapter disabled (set GITPILOT_ENABLE_A2A=true to enable)")
+
+# ============================================================================
+# CORS Configuration
+# ============================================================================
+# Enable CORS to allow frontend (local dev or Vercel) to connect to backend
+allowed_origins_str = os.getenv("CORS_ORIGINS", "http://localhost:5173")
+allowed_origins = [origin.strip() for origin in allowed_origins_str.split(",")]
+
+logger.info(f"CORS enabled for origins: {allowed_origins}")
+
+app.add_middleware(
+ CORSMiddleware,
+ allow_origins=allowed_origins,
+ allow_credentials=True,
+ allow_methods=["*"],
+ allow_headers=["*"],
+)
+
+
+def get_github_token(authorization: Optional[str] = Header(None)) -> Optional[str]:
+ """
+ Extract GitHub token from Authorization header.
+
+ Supports formats:
+ - Bearer
+ - token
+ -
+ """
+ if not authorization:
+ return None
+
+ if authorization.startswith("Bearer "):
+ return authorization[7:]
+ elif authorization.startswith("token "):
+ return authorization[6:]
+ else:
+ return authorization
+
+
+# --- FIXED: Added default_branch to model ---
+class RepoSummary(BaseModel):
+ id: int
+ name: str
+ full_name: str
+ private: bool
+ owner: str
+ default_branch: str = "main" # <--- CRITICAL FIX: Defaults to main, but can be master/dev
+
+
+class PaginatedReposResponse(BaseModel):
+ """Response model for paginated repository listing."""
+ repositories: List[RepoSummary]
+ page: int
+ per_page: int
+ total_count: Optional[int] = None
+ has_more: bool
+ query: Optional[str] = None
+
+
+class FileEntry(BaseModel):
+ path: str
+ type: str
+
+
+class FileTreeResponse(BaseModel):
+ files: List[FileEntry] = Field(default_factory=list)
+
+
+class FileContent(BaseModel):
+ path: str
+ encoding: str = "utf-8"
+ content: str
+
+
+class CommitRequest(BaseModel):
+ path: str
+ content: str
+ message: str
+
+
+class CommitResponse(BaseModel):
+ path: str
+ commit_sha: str
+ commit_url: Optional[str] = None
+
+
+class SettingsResponse(BaseModel):
+ provider: LLMProvider
+ providers: List[LLMProvider]
+ openai: dict
+ claude: dict
+ watsonx: dict
+ ollama: dict
+ langflow_url: str
+ has_langflow_plan_flow: bool
+ lite_mode: bool = False
+
+
+class ProviderModelsResponse(BaseModel):
+ provider: LLMProvider
+ models: List[str] = Field(default_factory=list)
+ error: Optional[str] = None
+
+
+class ProviderUpdate(BaseModel):
+ provider: LLMProvider
+
+
+class ChatPlanRequest(BaseModel):
+ repo_owner: str
+ repo_name: str
+ goal: str
+ branch_name: Optional[str] = None
+
+
+class ExecutePlanRequest(BaseModel):
+ repo_owner: str
+ repo_name: str
+ plan: PlanResult
+ branch_name: Optional[str] = None
+
+
+class AuthUrlResponse(BaseModel):
+ authorization_url: str
+ state: str
+
+
+class AuthCallbackRequest(BaseModel):
+ code: str
+ state: str
+
+
+class TokenValidationRequest(BaseModel):
+ access_token: str
+
+
+class UserInfoResponse(BaseModel):
+ user: GitHubUser
+ authenticated: bool
+
+
+class RepoAccessResponse(BaseModel):
+ can_write: bool
+ app_installed: bool
+ auth_type: str
+
+
+# --- v2 Request/Response models ---
+
+class ChatRequest(BaseModel):
+ """Unified chat request for the conversational dispatcher."""
+ repo_owner: str
+ repo_name: str
+ message: str
+ branch_name: Optional[str] = None
+ auto_pr: bool = False
+ topology_id: Optional[str] = None # Override topology for this request
+
+
+class IssueCreateRequest(BaseModel):
+ title: str
+ body: Optional[str] = None
+ labels: Optional[List[str]] = None
+ assignees: Optional[List[str]] = None
+ milestone: Optional[int] = None
+
+
+class IssueUpdateRequest(BaseModel):
+ title: Optional[str] = None
+ body: Optional[str] = None
+ state: Optional[str] = None
+ labels: Optional[List[str]] = None
+ assignees: Optional[List[str]] = None
+ milestone: Optional[int] = None
+
+
+class IssueCommentRequest(BaseModel):
+ body: str
+
+
+class PRCreateRequest(BaseModel):
+ title: str
+ head: str
+ base: str
+ body: Optional[str] = None
+ draft: bool = False
+
+
+class PRMergeRequest(BaseModel):
+ merge_method: str = "merge"
+ commit_title: Optional[str] = None
+ commit_message: Optional[str] = None
+
+
+class SearchRequest(BaseModel):
+ query: str
+ per_page: int = 30
+ page: int = 1
+
+
+# ============================================================================
+# Repository Endpoints - Enterprise Grade with Pagination & Search
+# ============================================================================
+
+@app.get("/api/repos", response_model=PaginatedReposResponse)
+async def api_list_repos(
+ query: Optional[str] = Query(None, description="Search query (searches across ALL repositories)"),
+ page: int = Query(1, ge=1, description="Page number (starts at 1)"),
+ per_page: int = Query(100, ge=1, le=100, description="Results per page (max 100)"),
+ authorization: Optional[str] = Header(None),
+):
+ """
+ List user repositories with enterprise-grade pagination and search.
+ Includes default_branch information for correct frontend routing.
+ """
+ token = get_github_token(authorization)
+
+ try:
+ if query:
+ # SEARCH MODE: Search across ALL repositories
+ result = await search_user_repos(
+ query=query,
+ page=page,
+ per_page=per_page,
+ token=token
+ )
+ else:
+ # PAGINATION MODE: Return repos page by page
+ result = await list_user_repos_paginated(
+ page=page,
+ per_page=per_page,
+ token=token
+ )
+
+ # --- FIXED: Mapping default_branch ---
+ repos = [
+ RepoSummary(
+ id=r["id"],
+ name=r["name"],
+ full_name=r["full_name"],
+ private=r["private"],
+ owner=r["owner"],
+ default_branch=r.get("default_branch", "main"), # <--- CRITICAL FIX
+ )
+ for r in result["repositories"]
+ ]
+
+ return PaginatedReposResponse(
+ repositories=repos,
+ page=result["page"],
+ per_page=result["per_page"],
+ total_count=result.get("total_count"),
+ has_more=result["has_more"],
+ query=query,
+ )
+
+ except Exception as e:
+ logging.exception("Error fetching repositories")
+ return JSONResponse(
+ content={
+ "error": f"Failed to fetch repositories: {str(e)}",
+ "repositories": [],
+ "page": page,
+ "per_page": per_page,
+ "has_more": False,
+ },
+ status_code=500
+ )
+
+
+@app.get("/api/repos/all")
+async def api_list_all_repos(
+ query: Optional[str] = Query(None, description="Search query"),
+ authorization: Optional[str] = Header(None),
+):
+ """
+ Fetch ALL user repositories at once (no pagination).
+ Useful for quick searches, but paginated endpoint is preferred.
+ """
+ token = get_github_token(authorization)
+
+ try:
+ # Fetch all repositories (this will make multiple API calls)
+ all_repos = []
+ page = 1
+ max_pages = 15 # Safety limit: 1500 repos max (15 * 100)
+
+ while page <= max_pages:
+ result = await list_user_repos_paginated(
+ page=page,
+ per_page=100,
+ token=token
+ )
+
+ all_repos.extend(result["repositories"])
+
+ if not result["has_more"]:
+ break
+
+ page += 1
+
+ # Filter by query if provided
+ if query:
+ query_lower = query.lower()
+ all_repos = [
+ r for r in all_repos
+ if query_lower in r["name"].lower() or query_lower in r["full_name"].lower()
+ ]
+
+ # --- FIXED: Mapping default_branch ---
+ repos = [
+ RepoSummary(
+ id=r["id"],
+ name=r["name"],
+ full_name=r["full_name"],
+ private=r["private"],
+ owner=r["owner"],
+ default_branch=r.get("default_branch", "main"), # <--- CRITICAL FIX
+ )
+ for r in all_repos
+ ]
+
+ return {
+ "repositories": repos,
+ "total_count": len(repos),
+ "query": query,
+ }
+
+ except Exception as e:
+ logging.exception("Error fetching all repositories")
+ return JSONResponse(
+ content={"error": f"Failed to fetch repositories: {str(e)}"},
+ status_code=500
+ )
+
+
+@app.get("/api/repos/{owner}/{repo}/tree", response_model=FileTreeResponse)
+async def api_repo_tree(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ ref: Optional[str] = Query(
+ None,
+ description="Git reference (branch, tag, or commit SHA). If omitted, defaults to HEAD.",
+ ),
+ authorization: Optional[str] = Header(None),
+):
+ """
+ Get the file tree for a repository.
+ Handles 'main' vs 'master' discrepancies and empty repositories gracefully.
+ """
+ token = get_github_token(authorization)
+
+ # Keep legacy behavior: missing/empty ref behaves like HEAD.
+ ref_value = (ref or "").strip() or "HEAD"
+
+ try:
+ tree = await get_repo_tree(owner, repo, token=token, ref=ref_value)
+ return FileTreeResponse(files=[FileEntry(**f) for f in tree])
+
+ except HTTPException as e:
+ if e.status_code == 409:
+ return FileTreeResponse(files=[])
+
+ if e.status_code == 404:
+ return JSONResponse(
+ status_code=404,
+ content={
+ "detail": f"Ref '{ref_value}' not found. The repository might be using a different default branch (e.g., 'master')."
+ }
+ )
+
+ raise e
+
+
+@app.get("/api/repos/{owner}/{repo}/file", response_model=FileContent)
+async def api_get_file(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ path: str = Query(...),
+ authorization: Optional[str] = Header(None),
+):
+ token = get_github_token(authorization)
+ content = await get_file(owner, repo, path, token=token)
+ return FileContent(path=path, content=content)
+
+
+@app.post("/api/repos/{owner}/{repo}/file", response_model=CommitResponse)
+async def api_put_file(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ payload: CommitRequest = ...,
+ authorization: Optional[str] = Header(None),
+):
+ token = get_github_token(authorization)
+ result = await put_file(
+ owner, repo, payload.path, payload.content, payload.message, token=token
+ )
+ return CommitResponse(**result)
+
+
+# ============================================================================
+# Settings Endpoints
+# ============================================================================
+
+@app.get("/api/settings", response_model=SettingsResponse)
+async def api_get_settings():
+ s: AppSettings = get_settings()
+ return SettingsResponse(
+ provider=s.provider,
+ providers=[LLMProvider.openai, LLMProvider.claude, LLMProvider.watsonx, LLMProvider.ollama],
+ openai=s.openai.model_dump(),
+ claude=s.claude.model_dump(),
+ watsonx=s.watsonx.model_dump(),
+ ollama=s.ollama.model_dump(),
+ langflow_url=s.langflow_url,
+ has_langflow_plan_flow=bool(s.langflow_plan_flow_id),
+ lite_mode=s.lite_mode,
+ )
+
+
+@app.get("/api/settings/models", response_model=ProviderModelsResponse)
+async def api_list_models(provider: Optional[LLMProvider] = Query(None)):
+ """
+ Return the list of LLM models available for a provider.
+
+ If 'provider' is not given, use the currently active provider from settings.
+ """
+ s: AppSettings = get_settings()
+ effective_provider = provider or s.provider
+
+ models, error = list_models_for_provider(effective_provider, s)
+
+ return ProviderModelsResponse(
+ provider=effective_provider,
+ models=models,
+ error=error,
+ )
+
+
+@app.post("/api/settings/provider", response_model=SettingsResponse)
+async def api_set_provider(update: ProviderUpdate):
+ s = set_provider(update.provider)
+ return SettingsResponse(
+ provider=s.provider,
+ providers=[LLMProvider.openai, LLMProvider.claude, LLMProvider.watsonx, LLMProvider.ollama],
+ openai=s.openai.model_dump(),
+ claude=s.claude.model_dump(),
+ watsonx=s.watsonx.model_dump(),
+ ollama=s.ollama.model_dump(),
+ langflow_url=s.langflow_url,
+ has_langflow_plan_flow=bool(s.langflow_plan_flow_id),
+ lite_mode=s.lite_mode,
+ )
+
+
+@app.put("/api/settings/llm", response_model=SettingsResponse)
+async def api_update_llm_settings(updates: dict):
+ """Update full LLM settings including provider-specific configs."""
+ s = update_settings(updates)
+ return SettingsResponse(
+ provider=s.provider,
+ providers=[LLMProvider.openai, LLMProvider.claude, LLMProvider.watsonx, LLMProvider.ollama],
+ openai=s.openai.model_dump(),
+ claude=s.claude.model_dump(),
+ watsonx=s.watsonx.model_dump(),
+ ollama=s.ollama.model_dump(),
+ langflow_url=s.langflow_url,
+ has_langflow_plan_flow=bool(s.langflow_plan_flow_id),
+ lite_mode=s.lite_mode,
+ )
+
+
+# ============================================================================
+# Chat Endpoints
+# ============================================================================
+
+@app.post("/api/chat/plan", response_model=PlanResult)
+async def api_chat_plan(req: ChatPlanRequest, authorization: Optional[str] = Header(None)):
+ token = get_github_token(authorization)
+
+ # ✅ Added logging for branch_name received
+ logger.info(
+ "PLAN REQUEST: %s/%s | branch_name=%r",
+ req.repo_owner,
+ req.repo_name,
+ req.branch_name,
+ )
+
+ with execution_context(token, ref=req.branch_name): # ✅ set ref context
+ full_name = f"{req.repo_owner}/{req.repo_name}"
+ plan = await generate_plan(req.goal, full_name, token=token, branch_name=req.branch_name)
+ return plan
+
+
+@app.post("/api/chat/execute")
+async def api_chat_execute(
+ req: ExecutePlanRequest,
+ authorization: Optional[str] = Header(None)
+):
+ token = get_github_token(authorization)
+
+ # ✅ FIX: use execution_context(token, ref=req.branch_name) so tool calls that rely on context
+ # never accidentally run on HEAD/default when branch_name is provided.
+ with execution_context(token, ref=req.branch_name):
+ full_name = f"{req.repo_owner}/{req.repo_name}"
+ # Use lite executor when Lite Mode is active
+ _executor = execute_plan_lite if _is_lite_mode_active() else execute_plan
+ result = await _executor(
+ req.plan, full_name, token=token, branch_name=req.branch_name
+ )
+ if isinstance(result, dict):
+ result.setdefault(
+ "mode",
+ "sticky" if req.branch_name else "hard-switch",
+ )
+ return result
+
+
+@app.get("/api/flow/current")
+async def api_get_flow(topology: Optional[str] = Query(None)):
+ """Return the agent flow definition as a graph.
+
+ If ``topology`` query param is provided, returns the graph for that
+ topology. Otherwise falls back to the user's saved preference, and
+ finally to the legacy ``get_flow_definition()`` output for full
+ backward compatibility.
+ """
+ tid = topology or get_saved_topology_preference()
+ if tid:
+ return _get_topology_graph(tid)
+ # Legacy path — returns the original hardcoded graph
+ flow = await get_flow_definition()
+ return flow
+
+
+# ============================================================================
+# Topology Registry Endpoints (additive — no existing behaviour changed)
+# ============================================================================
+
+@app.get("/api/flow/topologies")
+async def api_list_topologies():
+ """Return lightweight summaries of all available topology presets."""
+ return _list_topologies()
+
+
+@app.get("/api/flow/topology/{topology_id}")
+async def api_get_topology(topology_id: str):
+ """Return the full flow graph for a specific topology."""
+ return _get_topology_graph(topology_id)
+
+
+class ClassifyRequest(BaseModel):
+ message: str
+
+
+@app.post("/api/flow/classify")
+async def api_classify_message(req: ClassifyRequest):
+ """Auto-detect the best topology for a given user message.
+
+ Returns the recommended topology, confidence score, and up to 4
+ alternatives ranked by relevance.
+ """
+ result = _classify_message(req.message)
+ return result.to_dict()
+
+
+class TopologyPrefRequest(BaseModel):
+ topology: str
+
+
+@app.get("/api/settings/topology")
+async def api_get_topology_pref():
+ """Return the user's saved topology preference (or null)."""
+ pref = get_saved_topology_preference()
+ return {"topology": pref}
+
+
+@app.post("/api/settings/topology")
+async def api_set_topology_pref(req: TopologyPrefRequest):
+ """Save the user's preferred topology."""
+ save_topology_preference(req.topology)
+ return {"status": "ok", "topology": req.topology}
+
+
+class LiteModeRequest(BaseModel):
+ lite_mode: bool
+
+
+@app.get("/api/settings/lite-mode")
+async def api_get_lite_mode():
+ """Return current Lite Mode status."""
+ s = get_settings()
+ return {"lite_mode": s.lite_mode}
+
+
+@app.post("/api/settings/lite-mode")
+async def api_set_lite_mode(req: LiteModeRequest):
+ """Toggle Lite Mode on or off."""
+ s = update_settings({"lite_mode": req.lite_mode})
+ return {"status": "ok", "lite_mode": s.lite_mode}
+
+
+# ============================================================================
+# Conversational Chat Endpoint (v2 upgrade)
+# ============================================================================
+
+@app.post("/api/chat/message")
+async def api_chat_message(req: ChatRequest, authorization: Optional[str] = Header(None)):
+ """
+ Unified conversational endpoint. The router analyses the message and
+ dispatches to the appropriate agent (issue, PR, search, review, learning,
+ or the existing plan+execute pipeline).
+ """
+ token = get_github_token(authorization)
+
+ logger.info(
+ "CHAT MESSAGE: %s/%s | message=%r | branch=%r",
+ req.repo_owner,
+ req.repo_name,
+ req.message[:80],
+ req.branch_name,
+ )
+
+ with execution_context(token, ref=req.branch_name):
+ full_name = f"{req.repo_owner}/{req.repo_name}"
+ result = await dispatch_request(
+ req.message, full_name, token=token, branch_name=req.branch_name,
+ topology_id=req.topology_id,
+ )
+
+ # If auto_pr is requested and execution completed, create PR
+ if (
+ req.auto_pr
+ and isinstance(result, dict)
+ and result.get("category") == "plan_execute"
+ and result.get("plan")
+ ):
+ result["auto_pr_hint"] = (
+ "Plan generated. Execute it first, then auto-PR will be created."
+ )
+
+ return result
+
+
+@app.post("/api/chat/execute-with-pr")
+async def api_chat_execute_with_pr(
+ req: ExecutePlanRequest,
+ authorization: Optional[str] = Header(None),
+):
+ """Execute a plan AND automatically create a pull request afterwards."""
+ token = get_github_token(authorization)
+
+ with execution_context(token, ref=req.branch_name):
+ full_name = f"{req.repo_owner}/{req.repo_name}"
+ _executor = execute_plan_lite if _is_lite_mode_active() else execute_plan
+ result = await _executor(
+ req.plan, full_name, token=token, branch_name=req.branch_name,
+ )
+
+ if isinstance(result, dict) and result.get("status") == "completed":
+ branch = result.get("branch", req.branch_name)
+ if branch:
+ pr = await create_pr_after_execution(
+ full_name,
+ branch,
+ req.plan.goal,
+ result.get("executionLog", {}),
+ token=token,
+ )
+ if pr:
+ result["pull_request"] = {
+ "number": pr.get("number"),
+ "url": pr.get("html_url"),
+ "title": pr.get("title"),
+ }
+
+ result.setdefault(
+ "mode",
+ "sticky" if req.branch_name else "hard-switch",
+ )
+
+ return result
+
+
+# ============================================================================
+# Issue Endpoints (v2 upgrade)
+# ============================================================================
+
+@app.get("/api/repos/{owner}/{repo}/issues")
+async def api_list_issues(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ state: str = Query("open"),
+ labels: Optional[str] = Query(None),
+ per_page: int = Query(30, ge=1, le=100),
+ page: int = Query(1, ge=1),
+ authorization: Optional[str] = Header(None),
+):
+ """List issues for a repository."""
+ token = get_github_token(authorization)
+ issues = await github_issues.list_issues(
+ owner, repo, state=state, labels=labels,
+ per_page=per_page, page=page, token=token,
+ )
+ return {"issues": issues, "page": page, "per_page": per_page}
+
+
+@app.get("/api/repos/{owner}/{repo}/issues/{issue_number}")
+async def api_get_issue(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ issue_number: int = FPath(...),
+ authorization: Optional[str] = Header(None),
+):
+ """Get a single issue."""
+ token = get_github_token(authorization)
+ return await github_issues.get_issue(owner, repo, issue_number, token=token)
+
+
+@app.post("/api/repos/{owner}/{repo}/issues")
+async def api_create_issue(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ payload: IssueCreateRequest = ...,
+ authorization: Optional[str] = Header(None),
+):
+ """Create a new issue."""
+ token = get_github_token(authorization)
+ return await github_issues.create_issue(
+ owner, repo, payload.title,
+ body=payload.body, labels=payload.labels,
+ assignees=payload.assignees, milestone=payload.milestone,
+ token=token,
+ )
+
+
+@app.patch("/api/repos/{owner}/{repo}/issues/{issue_number}")
+async def api_update_issue(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ issue_number: int = FPath(...),
+ payload: IssueUpdateRequest = ...,
+ authorization: Optional[str] = Header(None),
+):
+ """Update an existing issue."""
+ token = get_github_token(authorization)
+ return await github_issues.update_issue(
+ owner, repo, issue_number,
+ title=payload.title, body=payload.body, state=payload.state,
+ labels=payload.labels, assignees=payload.assignees,
+ milestone=payload.milestone, token=token,
+ )
+
+
+@app.get("/api/repos/{owner}/{repo}/issues/{issue_number}/comments")
+async def api_list_issue_comments(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ issue_number: int = FPath(...),
+ authorization: Optional[str] = Header(None),
+):
+ """List comments on an issue."""
+ token = get_github_token(authorization)
+ return await github_issues.list_issue_comments(owner, repo, issue_number, token=token)
+
+
+@app.post("/api/repos/{owner}/{repo}/issues/{issue_number}/comments")
+async def api_add_issue_comment(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ issue_number: int = FPath(...),
+ payload: IssueCommentRequest = ...,
+ authorization: Optional[str] = Header(None),
+):
+ """Add a comment to an issue."""
+ token = get_github_token(authorization)
+ return await github_issues.add_issue_comment(
+ owner, repo, issue_number, payload.body, token=token,
+ )
+
+
+# ============================================================================
+# Pull Request Endpoints (v2 upgrade)
+# ============================================================================
+
+@app.get("/api/repos/{owner}/{repo}/pulls")
+async def api_list_pulls(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ state: str = Query("open"),
+ per_page: int = Query(30, ge=1, le=100),
+ page: int = Query(1, ge=1),
+ authorization: Optional[str] = Header(None),
+):
+ """List pull requests."""
+ token = get_github_token(authorization)
+ prs = await github_pulls.list_pull_requests(
+ owner, repo, state=state, per_page=per_page, page=page, token=token,
+ )
+ return {"pull_requests": prs, "page": page, "per_page": per_page}
+
+
+@app.get("/api/repos/{owner}/{repo}/pulls/{pull_number}")
+async def api_get_pull(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ pull_number: int = FPath(...),
+ authorization: Optional[str] = Header(None),
+):
+ """Get a single pull request."""
+ token = get_github_token(authorization)
+ return await github_pulls.get_pull_request(owner, repo, pull_number, token=token)
+
+
+@app.post("/api/repos/{owner}/{repo}/pulls")
+async def api_create_pull(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ payload: PRCreateRequest = ...,
+ authorization: Optional[str] = Header(None),
+):
+ """Create a new pull request."""
+ token = get_github_token(authorization)
+ return await github_pulls.create_pull_request(
+ owner, repo, title=payload.title, head=payload.head,
+ base=payload.base, body=payload.body, draft=payload.draft,
+ token=token,
+ )
+
+
+@app.put("/api/repos/{owner}/{repo}/pulls/{pull_number}/merge")
+async def api_merge_pull(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ pull_number: int = FPath(...),
+ payload: PRMergeRequest = ...,
+ authorization: Optional[str] = Header(None),
+):
+ """Merge a pull request."""
+ token = get_github_token(authorization)
+ return await github_pulls.merge_pull_request(
+ owner, repo, pull_number,
+ merge_method=payload.merge_method,
+ commit_title=payload.commit_title,
+ commit_message=payload.commit_message,
+ token=token,
+ )
+
+
+@app.get("/api/repos/{owner}/{repo}/pulls/{pull_number}/files")
+async def api_list_pr_files(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ pull_number: int = FPath(...),
+ authorization: Optional[str] = Header(None),
+):
+ """List files changed in a pull request."""
+ token = get_github_token(authorization)
+ return await github_pulls.list_pr_files(owner, repo, pull_number, token=token)
+
+
+# ============================================================================
+# Search Endpoints (v2 upgrade)
+# ============================================================================
+
+@app.get("/api/search/code")
+async def api_search_code(
+ q: str = Query(..., description="Search query"),
+ owner: Optional[str] = Query(None),
+ repo: Optional[str] = Query(None),
+ language: Optional[str] = Query(None),
+ per_page: int = Query(30, ge=1, le=100),
+ page: int = Query(1, ge=1),
+ authorization: Optional[str] = Header(None),
+):
+ """Search for code across GitHub."""
+ token = get_github_token(authorization)
+ return await github_search.search_code(
+ q, owner=owner, repo=repo, language=language,
+ per_page=per_page, page=page, token=token,
+ )
+
+
+@app.get("/api/search/issues")
+async def api_search_issues(
+ q: str = Query(..., description="Search query"),
+ owner: Optional[str] = Query(None),
+ repo: Optional[str] = Query(None),
+ state: Optional[str] = Query(None),
+ label: Optional[str] = Query(None),
+ per_page: int = Query(30, ge=1, le=100),
+ page: int = Query(1, ge=1),
+ authorization: Optional[str] = Header(None),
+):
+ """Search issues and pull requests."""
+ token = get_github_token(authorization)
+ return await github_search.search_issues(
+ q, owner=owner, repo=repo, state=state, label=label,
+ per_page=per_page, page=page, token=token,
+ )
+
+
+@app.get("/api/search/repositories")
+async def api_search_repositories(
+ q: str = Query(..., description="Search query"),
+ language: Optional[str] = Query(None),
+ sort: Optional[str] = Query(None),
+ per_page: int = Query(30, ge=1, le=100),
+ page: int = Query(1, ge=1),
+ authorization: Optional[str] = Header(None),
+):
+ """Search for repositories."""
+ token = get_github_token(authorization)
+ return await github_search.search_repositories(
+ q, language=language, sort=sort,
+ per_page=per_page, page=page, token=token,
+ )
+
+
+@app.get("/api/search/users")
+async def api_search_users(
+ q: str = Query(..., description="Search query"),
+ type_filter: Optional[str] = Query(None, alias="type"),
+ location: Optional[str] = Query(None),
+ language: Optional[str] = Query(None),
+ per_page: int = Query(30, ge=1, le=100),
+ page: int = Query(1, ge=1),
+ authorization: Optional[str] = Header(None),
+):
+ """Search for GitHub users and organizations."""
+ token = get_github_token(authorization)
+ return await github_search.search_users(
+ q, type_filter=type_filter, location=location, language=language,
+ per_page=per_page, page=page, token=token,
+ )
+
+
+# ============================================================================
+# Route Analysis Endpoint (v2 upgrade)
+# ============================================================================
+
+@app.post("/api/chat/route")
+async def api_chat_route(payload: dict):
+ """Preview how a message would be routed without executing it.
+
+ Useful for the frontend to display which agent(s) will handle the request.
+ """
+ message = payload.get("message", "")
+ if not message:
+ return JSONResponse({"error": "message is required"}, status_code=400)
+
+ workflow = route_request(message)
+ return {
+ "category": workflow.category.value,
+ "agents": [a.value for a in workflow.agents],
+ "description": workflow.description,
+ "requires_repo_context": workflow.requires_repo_context,
+ "entity_number": workflow.entity_number,
+ "metadata": workflow.metadata,
+ }
+
+
+# ============================================================================
+# Authentication Endpoints (Web Flow + Device Flow)
+# ============================================================================
+
+@app.get("/api/auth/url", response_model=AuthUrlResponse)
+async def api_get_auth_url():
+ """
+ Generate GitHub OAuth authorization URL (Web Flow).
+ Requires Client Secret to be configured.
+ """
+ auth_url, state = generate_authorization_url()
+ return AuthUrlResponse(authorization_url=auth_url, state=state)
+
+
+@app.post("/api/auth/callback", response_model=AuthSession)
+async def api_auth_callback(request: AuthCallbackRequest):
+ """
+ Handle GitHub OAuth callback (Web Flow).
+ Exchange the authorization code for an access token.
+ """
+ try:
+ session = await exchange_code_for_token(request.code, request.state)
+ return session
+ except ValueError as e:
+ return JSONResponse(
+ {"error": str(e)},
+ status_code=400,
+ )
+
+
+@app.post("/api/auth/validate", response_model=UserInfoResponse)
+async def api_validate_token(request: TokenValidationRequest):
+ """
+ Validate a GitHub access token and return user information.
+ """
+ user = await validate_token(request.access_token)
+ if user:
+ return UserInfoResponse(user=user, authenticated=True)
+ return UserInfoResponse(
+ user=GitHubUser(login="", id=0, avatar_url=""),
+ authenticated=False,
+ )
+
+
+@app.post("/api/auth/device/code")
+async def api_device_code():
+ """
+ Start the device login flow (Step 1).
+ Does NOT require a client secret.
+ """
+ try:
+ data = await initiate_device_flow()
+ return data
+ except Exception as e:
+ return JSONResponse({"error": str(e)}, status_code=500)
+
+
+@app.post("/api/auth/device/poll")
+async def api_device_poll(payload: dict):
+ """
+ Poll GitHub to check if user authorized the device (Step 2).
+ """
+ device_code = payload.get("device_code")
+ if not device_code:
+ return JSONResponse({"error": "Missing device_code"}, status_code=400)
+
+ try:
+ session = await poll_device_token(device_code)
+ if session:
+ return session
+
+ return JSONResponse({"status": "pending"}, status_code=202)
+ except ValueError as e:
+ return JSONResponse({"error": str(e)}, status_code=400)
+
+
+@app.get("/api/auth/status")
+async def api_auth_status():
+ """
+ Smart check: Do we have a secret (Web Flow) or just ID (Device Flow)?
+ This tells the frontend which UI to render.
+ """
+ has_secret = bool(os.getenv("GITHUB_CLIENT_SECRET"))
+ has_id = bool(os.getenv("GITHUB_CLIENT_ID", "Iv23litmRp80Z6wmlyRn"))
+
+ return {
+ "mode": "web" if has_secret else "device",
+ "configured": has_id,
+ "oauth_configured": has_secret,
+ "pat_configured": bool(os.getenv("GITPILOT_GITHUB_TOKEN") or os.getenv("GITHUB_TOKEN")),
+ }
+
+
+@app.get("/api/auth/app-url")
+async def api_get_app_url():
+ """Get GitHub App installation URL."""
+ app_slug = os.getenv("GITHUB_APP_SLUG", "gitpilota")
+ app_url = f"https://github.com/apps/{app_slug}"
+ return {
+ "app_url": app_url,
+ "app_slug": app_slug,
+ }
+
+
+@app.get("/api/auth/installation-status")
+async def api_check_installation_status():
+ """Check if GitHub App is installed for the current user."""
+ pat_token = os.getenv("GITPILOT_GITHUB_TOKEN") or os.getenv("GITHUB_TOKEN")
+
+ if pat_token:
+ user = await validate_token(pat_token)
+ if user:
+ return {
+ "installed": True,
+ "access_token": pat_token,
+ "user": user,
+ "auth_type": "pat",
+ }
+
+ github_app_id = os.getenv("GITHUB_APP_ID", "2313985")
+ if not github_app_id:
+ return {
+ "installed": False,
+ "message": "GitHub authentication not configured.",
+ "auth_type": "none",
+ }
+
+ return {
+ "installed": False,
+ "message": "GitHub App not installed.",
+ "auth_type": "github_app",
+ }
+
+
+@app.get("/api/auth/repo-access", response_model=RepoAccessResponse)
+async def api_check_repo_access(
+ owner: str = Query(...),
+ repo: str = Query(...),
+ authorization: Optional[str] = Header(None),
+):
+ """
+ Check if we have write access to a repository via User token or GitHub App.
+
+ This endpoint helps the frontend determine if it should show
+ installation prompts or if the user already has sufficient permissions.
+ """
+ token = get_github_token(authorization)
+ access_info = await check_repo_write_access(owner, repo, user_token=token)
+
+ return RepoAccessResponse(
+ can_write=access_info["can_write"],
+ app_installed=access_info["app_installed"],
+ auth_type=access_info["auth_type"],
+ )
+
+
+# ============================================================================
+# Session Endpoints (Phase 1)
+# ============================================================================
+
+@app.get("/api/sessions")
+async def api_list_sessions():
+ """List all saved sessions."""
+ return {"sessions": _session_mgr.list_sessions()}
+
+
+@app.post("/api/sessions")
+async def api_create_session(payload: dict):
+ """Create a new session.
+
+ Accepts either legacy single-repo or multi-repo format:
+ Legacy: {"repo_full_name": "owner/repo", "branch": "main"}
+ Multi: {"repos": [{full_name, branch, mode}], "active_repo": "owner/repo"}
+ """
+ repo = payload.get("repo_full_name", "")
+ branch = payload.get("branch")
+ name = payload.get("name") # optional — derived from first user prompt
+ session = _session_mgr.create(repo_full_name=repo, branch=branch, name=name)
+
+ # Multi-repo context support
+ if payload.get("repos"):
+ session.repos = payload["repos"]
+ session.active_repo = payload.get("active_repo", repo)
+ elif repo:
+ session.repos = [{"full_name": repo, "branch": branch or "main", "mode": "write"}]
+ session.active_repo = repo
+
+ _session_mgr.save(session)
+ return {"session_id": session.id, "status": session.status}
+
+
+@app.get("/api/sessions/{session_id}")
+async def api_get_session(session_id: str):
+ """Get session details."""
+ session = _session_mgr.load(session_id)
+ if not session:
+ raise HTTPException(status_code=404, detail="Session not found")
+ return {
+ "id": session.id,
+ "status": session.status,
+ "repo_full_name": session.repo_full_name,
+ "branch": session.branch,
+ "created_at": session.created_at,
+ "message_count": len(session.messages),
+ "checkpoint_count": len(session.checkpoints),
+ "repos": session.repos,
+ "active_repo": session.active_repo,
+ }
+
+
+@app.delete("/api/sessions/{session_id}")
+async def api_delete_session(session_id: str):
+ """Delete a session."""
+ deleted = _session_mgr.delete(session_id)
+ if not deleted:
+ raise HTTPException(status_code=404, detail="Session not found")
+ return {"deleted": True}
+
+
+@app.patch("/api/sessions/{session_id}/context")
+async def api_update_session_context(session_id: str, payload: dict):
+ """Add, remove, or activate repos in a session's multi-repo context.
+
+ Actions:
+ {"action": "add", "repo_full_name": "owner/repo", "branch": "main"}
+ {"action": "remove", "repo_full_name": "owner/repo"}
+ {"action": "set_active", "repo_full_name": "owner/repo"}
+ """
+ session = _session_mgr.load(session_id)
+ if not session:
+ raise HTTPException(status_code=404, detail="Session not found")
+
+ action = payload.get("action")
+ repo_name = payload.get("repo_full_name")
+ if not action or not repo_name:
+ raise HTTPException(status_code=400, detail="action and repo_full_name required")
+
+ if action == "add":
+ branch = payload.get("branch", "main")
+ if not any(r.get("full_name") == repo_name for r in session.repos):
+ session.repos.append({
+ "full_name": repo_name,
+ "branch": branch,
+ "mode": "read",
+ })
+ if not session.active_repo:
+ session.active_repo = repo_name
+ elif action == "remove":
+ session.repos = [r for r in session.repos if r.get("full_name") != repo_name]
+ if session.active_repo == repo_name:
+ session.active_repo = session.repos[0]["full_name"] if session.repos else None
+ elif action == "set_active":
+ if any(r.get("full_name") == repo_name for r in session.repos):
+ # Update mode flags
+ for r in session.repos:
+ r["mode"] = "write" if r.get("full_name") == repo_name else "read"
+ session.active_repo = repo_name
+ else:
+ raise HTTPException(status_code=400, detail="Repo not in session context")
+ else:
+ raise HTTPException(status_code=400, detail=f"Unknown action: {action}")
+
+ _session_mgr.save(session)
+ return {
+ "repos": session.repos,
+ "active_repo": session.active_repo,
+ }
+
+
+@app.post("/api/sessions/{session_id}/checkpoint")
+async def api_create_checkpoint(session_id: str, payload: dict):
+ """Create a checkpoint for a session."""
+ session = _session_mgr.load(session_id)
+ if not session:
+ raise HTTPException(status_code=404, detail="Session not found")
+ label = payload.get("label", "checkpoint")
+ cp = _session_mgr.create_checkpoint(session, label=label)
+ return {"checkpoint_id": cp.id, "label": cp.label, "created_at": cp.created_at}
+
+
+# ============================================================================
+# Hooks Endpoints (Phase 1)
+# ============================================================================
+
+@app.get("/api/hooks")
+async def api_list_hooks():
+ """List registered hooks."""
+ return {"hooks": _hook_mgr.list_hooks()}
+
+
+@app.post("/api/hooks")
+async def api_register_hook(payload: dict):
+ """Register a new hook."""
+ from .hooks import HookDefinition
+ try:
+ hook = HookDefinition(
+ event=HookEvent(payload["event"]),
+ name=payload["name"],
+ command=payload.get("command"),
+ blocking=payload.get("blocking", False),
+ timeout=payload.get("timeout", 30),
+ )
+ _hook_mgr.register(hook)
+ return {"registered": True, "name": hook.name, "event": hook.event.value}
+ except (KeyError, ValueError) as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+
+@app.delete("/api/hooks/{event}/{name}")
+async def api_unregister_hook(event: str, name: str):
+ """Unregister a hook by event and name."""
+ try:
+ _hook_mgr.unregister(HookEvent(event), name)
+ return {"unregistered": True}
+ except ValueError as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+
+# ============================================================================
+# Permissions Endpoints (Phase 1)
+# ============================================================================
+
+@app.get("/api/permissions")
+async def api_get_permissions():
+ """Get current permission policy."""
+ return _perm_mgr.to_dict()
+
+
+@app.put("/api/permissions/mode")
+async def api_set_permission_mode(payload: dict):
+ """Set the permission mode (normal, plan, auto)."""
+ mode_str = payload.get("mode", "normal")
+ try:
+ _perm_mgr.policy.mode = PermissionMode(mode_str)
+ return {"mode": _perm_mgr.policy.mode.value}
+ except ValueError:
+ raise HTTPException(status_code=400, detail=f"Invalid mode: {mode_str}")
+
+
+# ============================================================================
+# Project Context / Memory Endpoints (Phase 1)
+# ============================================================================
+
+@app.get("/api/repos/{owner}/{repo}/context")
+async def api_get_project_context(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+):
+ """Get project conventions and memory for a repository workspace."""
+ from pathlib import Path as StdPath
+ workspace_path = StdPath.home() / ".gitpilot" / "workspaces" / owner / repo
+ if not workspace_path.exists():
+ return {"conventions": "", "rules": [], "auto_memory": {}, "system_prompt": ""}
+ mgr = MemoryManager(workspace_path)
+ ctx = mgr.load_context()
+ return {
+ "conventions": ctx.conventions,
+ "rules": ctx.rules,
+ "auto_memory": ctx.auto_memory,
+ "system_prompt": ctx.to_system_prompt(),
+ }
+
+
+@app.post("/api/repos/{owner}/{repo}/context/init")
+async def api_init_project_context(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+):
+ """Initialize .gitpilot/ directory with template GITPILOT.md."""
+ from pathlib import Path as StdPath
+ workspace_path = StdPath.home() / ".gitpilot" / "workspaces" / owner / repo
+ workspace_path.mkdir(parents=True, exist_ok=True)
+ mgr = MemoryManager(workspace_path)
+ md_path = mgr.init_project()
+ return {"initialized": True, "path": str(md_path)}
+
+
+@app.post("/api/repos/{owner}/{repo}/context/pattern")
+async def api_add_learned_pattern(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ payload: dict = ...,
+):
+ """Add a learned pattern to auto-memory."""
+ from pathlib import Path as StdPath
+ pattern = payload.get("pattern", "")
+ if not pattern:
+ raise HTTPException(status_code=400, detail="pattern is required")
+ workspace_path = StdPath.home() / ".gitpilot" / "workspaces" / owner / repo
+ workspace_path.mkdir(parents=True, exist_ok=True)
+ mgr = MemoryManager(workspace_path)
+ mgr.add_learned_pattern(pattern)
+ return {"added": True, "pattern": pattern}
+
+
+# ============================================================================
+# Context Vault Endpoints (additive — Context + Use Case system)
+# ============================================================================
+
+def _workspace_path(owner: str, repo: str) -> Path:
+ """Resolve the local workspace path for a repo."""
+ return Path.home() / ".gitpilot" / "workspaces" / owner / repo
+
+
+@app.get("/api/repos/{owner}/{repo}/context/assets")
+async def api_list_context_assets(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+):
+ """List all uploaded context assets for a repository."""
+ vault = ContextVault(_workspace_path(owner, repo))
+ assets = vault.list_assets()
+ return {"assets": [a.to_dict() for a in assets]}
+
+
+@app.post("/api/repos/{owner}/{repo}/context/assets/upload")
+async def api_upload_context_asset(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ file: UploadFile = File(...),
+):
+ """Upload a file to the project context vault."""
+ vault = ContextVault(_workspace_path(owner, repo))
+ content = await file.read()
+ mime = file.content_type or ""
+ filename = file.filename or "upload"
+
+ try:
+ meta = vault.upload_asset(filename, content, mime=mime)
+ return {"asset": meta.to_dict()}
+ except ValueError as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+
+@app.delete("/api/repos/{owner}/{repo}/context/assets/{asset_id}")
+async def api_delete_context_asset(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ asset_id: str = FPath(...),
+):
+ """Delete a context asset."""
+ vault = ContextVault(_workspace_path(owner, repo))
+ vault.delete_asset(asset_id)
+ return {"deleted": True, "asset_id": asset_id}
+
+
+@app.get("/api/repos/{owner}/{repo}/context/assets/{asset_id}/download")
+async def api_download_context_asset(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ asset_id: str = FPath(...),
+):
+ """Download a raw context asset file."""
+ vault = ContextVault(_workspace_path(owner, repo))
+ asset_path = vault.get_asset_path(asset_id)
+ if not asset_path:
+ raise HTTPException(status_code=404, detail="Asset not found")
+ filename = vault.get_asset_filename(asset_id)
+ return FileResponse(asset_path, filename=filename)
+
+
+# ============================================================================
+# Use Case Endpoints (additive — guided requirement clarification)
+# ============================================================================
+
+@app.get("/api/repos/{owner}/{repo}/use-cases")
+async def api_list_use_cases(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+):
+ """List all use cases for a repository."""
+ mgr = UseCaseManager(_workspace_path(owner, repo))
+ return {"use_cases": mgr.list_use_cases()}
+
+
+@app.post("/api/repos/{owner}/{repo}/use-cases")
+async def api_create_use_case(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ payload: dict = ...,
+):
+ """Create a new use case."""
+ title = payload.get("title", "New Use Case")
+ initial_notes = payload.get("initial_notes", "")
+ mgr = UseCaseManager(_workspace_path(owner, repo))
+ uc = mgr.create_use_case(title=title, initial_notes=initial_notes)
+ return {"use_case": uc.to_dict()}
+
+
+@app.get("/api/repos/{owner}/{repo}/use-cases/{use_case_id}")
+async def api_get_use_case(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ use_case_id: str = FPath(...),
+):
+ """Get a single use case with messages and spec."""
+ mgr = UseCaseManager(_workspace_path(owner, repo))
+ uc = mgr.get_use_case(use_case_id)
+ if not uc:
+ raise HTTPException(status_code=404, detail="Use case not found")
+ return {"use_case": uc.to_dict()}
+
+
+@app.post("/api/repos/{owner}/{repo}/use-cases/{use_case_id}/chat")
+async def api_use_case_chat(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ use_case_id: str = FPath(...),
+ payload: dict = ...,
+):
+ """Send a guided chat message and get assistant response + updated spec."""
+ message = payload.get("message", "")
+ if not message:
+ raise HTTPException(status_code=400, detail="message is required")
+ mgr = UseCaseManager(_workspace_path(owner, repo))
+ uc = mgr.chat(use_case_id, message)
+ if not uc:
+ raise HTTPException(status_code=404, detail="Use case not found")
+ return {"use_case": uc.to_dict()}
+
+
+@app.post("/api/repos/{owner}/{repo}/use-cases/{use_case_id}/finalize")
+async def api_finalize_use_case(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ use_case_id: str = FPath(...),
+):
+ """Finalize a use case: mark active, export markdown spec."""
+ mgr = UseCaseManager(_workspace_path(owner, repo))
+ uc = mgr.finalize(use_case_id)
+ if not uc:
+ raise HTTPException(status_code=404, detail="Use case not found")
+ return {"use_case": uc.to_dict()}
+
+
+# ============================================================================
+# MCP Endpoints (Phase 2)
+# ============================================================================
+
+@app.get("/api/mcp/servers")
+async def api_mcp_list_servers():
+ """List configured MCP servers and their connection status."""
+ return _mcp_client.to_dict()
+
+
+@app.post("/api/mcp/connect/{server_name}")
+async def api_mcp_connect(server_name: str):
+ """Connect to a named MCP server."""
+ try:
+ conn = await _mcp_client.connect(server_name)
+ return {
+ "connected": True,
+ "server": server_name,
+ "tools": [{"name": t.name, "description": t.description} for t in conn.tools],
+ }
+ except Exception as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+
+@app.post("/api/mcp/disconnect/{server_name}")
+async def api_mcp_disconnect(server_name: str):
+ """Disconnect from a named MCP server."""
+ await _mcp_client.disconnect(server_name)
+ return {"disconnected": True, "server": server_name}
+
+
+@app.post("/api/mcp/call")
+async def api_mcp_call_tool(payload: dict):
+ """Call a tool on a connected MCP server."""
+ server = payload.get("server", "")
+ tool_name = payload.get("tool", "")
+ params = payload.get("params", {})
+ if not server or not tool_name:
+ raise HTTPException(status_code=400, detail="server and tool are required")
+ conn = _mcp_client._connections.get(server)
+ if not conn:
+ raise HTTPException(status_code=404, detail=f"Not connected to server: {server}")
+ try:
+ result = await _mcp_client.call_tool(conn, tool_name, params)
+ return {"result": result}
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+# ============================================================================
+# Plugin Endpoints (Phase 2)
+# ============================================================================
+
+@app.get("/api/plugins")
+async def api_list_plugins():
+ """List installed plugins."""
+ plugins = _plugin_mgr.list_installed()
+ return {"plugins": [p.to_dict() for p in plugins]}
+
+
+@app.post("/api/plugins/install")
+async def api_install_plugin(payload: dict):
+ """Install a plugin from a git URL or local path."""
+ source = payload.get("source", "")
+ if not source:
+ raise HTTPException(status_code=400, detail="source is required")
+ try:
+ info = _plugin_mgr.install(source)
+ return {"installed": True, "plugin": info.to_dict()}
+ except Exception as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+
+@app.delete("/api/plugins/{name}")
+async def api_uninstall_plugin(name: str):
+ """Uninstall a plugin by name."""
+ removed = _plugin_mgr.uninstall(name)
+ if not removed:
+ raise HTTPException(status_code=404, detail=f"Plugin not found: {name}")
+ return {"uninstalled": True, "name": name}
+
+
+# ============================================================================
+# Skills Endpoints (Phase 2)
+# ============================================================================
+
+@app.get("/api/skills")
+async def api_list_skills():
+ """List all available skills."""
+ return {"skills": _skill_mgr.list_skills()}
+
+
+@app.post("/api/skills/invoke")
+async def api_invoke_skill(payload: dict):
+ """Invoke a skill by name."""
+ name = payload.get("name", "")
+ context = payload.get("context", {})
+ if not name:
+ raise HTTPException(status_code=400, detail="name is required")
+ prompt = _skill_mgr.invoke(name, context)
+ if prompt is None:
+ raise HTTPException(status_code=404, detail=f"Skill not found: {name}")
+ return {"skill": name, "rendered_prompt": prompt}
+
+
+@app.post("/api/skills/reload")
+async def api_reload_skills():
+ """Reload skills from all sources."""
+ count = _skill_mgr.load_all()
+ return {"reloaded": True, "count": count}
+
+
+# ============================================================================
+# Vision Endpoints (Phase 2)
+# ============================================================================
+
+@app.post("/api/vision/analyze")
+async def api_vision_analyze(payload: dict):
+ """Analyze an image with a text prompt."""
+ from .vision import VisionAnalyzer
+ image_path = payload.get("image_path", "")
+ prompt = payload.get("prompt", "Describe this image.")
+ provider = payload.get("provider", "openai")
+ if not image_path:
+ raise HTTPException(status_code=400, detail="image_path is required")
+ try:
+ analyzer = VisionAnalyzer(provider=provider)
+ result = await analyzer.analyze_image(Path(image_path), prompt)
+ return result.to_dict()
+ except Exception as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+
+# ============================================================================
+# Model Router Endpoints (Phase 2)
+# ============================================================================
+
+@app.post("/api/model-router/select")
+async def api_model_select(payload: dict):
+ """Preview which model would be selected for a request."""
+ request = payload.get("request", "")
+ category = payload.get("category")
+ if not request:
+ raise HTTPException(status_code=400, detail="request is required")
+ selection = _model_router.select(request, category)
+ return {
+ "model": selection.model,
+ "tier": selection.tier.value,
+ "complexity": selection.complexity.value,
+ "provider": selection.provider,
+ "reason": selection.reason,
+ }
+
+
+@app.get("/api/model-router/usage")
+async def api_model_usage():
+ """Get model usage summary and budget status."""
+ return _model_router.get_usage_summary()
+
+
+# ============================================================================
+# Agent Teams Endpoints (Phase 3)
+# ============================================================================
+
+@app.post("/api/agent-teams/plan")
+async def api_team_plan(payload: dict):
+ """Split a complex task into parallel subtasks."""
+ task = payload.get("task", "")
+ if not task:
+ raise HTTPException(status_code=400, detail="task is required")
+ subtasks = _agent_team.plan_and_split(task)
+ return {"subtasks": [{"id": s.id, "title": s.title, "description": s.description} for s in subtasks]}
+
+
+@app.post("/api/agent-teams/execute")
+async def api_team_execute(payload: dict):
+ """Execute subtasks in parallel and merge results."""
+ task = payload.get("task", "")
+ if not task:
+ raise HTTPException(status_code=400, detail="task is required")
+ subtasks = _agent_team.plan_and_split(task)
+ result = await _agent_team.execute_parallel(subtasks)
+ return result.to_dict()
+
+
+# ============================================================================
+# Learning Engine Endpoints (Phase 3)
+# ============================================================================
+
+@app.post("/api/learning/evaluate")
+async def api_learning_evaluate(payload: dict):
+ """Evaluate an action outcome for learning."""
+ action = payload.get("action", "")
+ outcome = payload.get("outcome", {})
+ repo = payload.get("repo", "")
+ if not action:
+ raise HTTPException(status_code=400, detail="action is required")
+ evaluation = _learning_engine.evaluate_outcome(action, outcome, repo=repo)
+ return {
+ "action": evaluation.action,
+ "success": evaluation.success,
+ "score": evaluation.score,
+ "feedback": evaluation.feedback,
+ }
+
+
+@app.get("/api/learning/insights/{owner}/{repo}")
+async def api_learning_insights(owner: str = FPath(...), repo: str = FPath(...)):
+ """Get learned insights for a repository."""
+ repo_name = f"{owner}/{repo}"
+ insights = _learning_engine.get_repo_insights(repo_name)
+ return {
+ "repo": repo_name,
+ "patterns": insights.patterns,
+ "preferred_style": insights.preferred_style,
+ "success_rate": insights.success_rate,
+ "total_evaluations": insights.total_evaluations,
+ }
+
+
+@app.post("/api/learning/style")
+async def api_learning_set_style(payload: dict):
+ """Set preferred coding style for a repository."""
+ repo = payload.get("repo", "")
+ style = payload.get("style", {})
+ if not repo:
+ raise HTTPException(status_code=400, detail="repo is required")
+ _learning_engine.set_preferred_style(repo, style)
+ return {"repo": repo, "style": style}
+
+
+# ============================================================================
+# Cross-Repo Intelligence Endpoints (Phase 3)
+# ============================================================================
+
+@app.post("/api/cross-repo/dependencies")
+async def api_cross_repo_dependencies(payload: dict):
+ """Analyze dependencies from provided file contents."""
+ files = payload.get("files", {})
+ if not files:
+ raise HTTPException(status_code=400, detail="files dict is required (filename -> content)")
+ graph = _cross_repo.analyze_dependencies_from_files(files)
+ return graph.to_dict()
+
+
+@app.post("/api/cross-repo/impact")
+async def api_cross_repo_impact(payload: dict):
+ """Analyze impact of updating a package."""
+ files = payload.get("files", {})
+ package_name = payload.get("package", "")
+ new_version = payload.get("new_version")
+ if not package_name:
+ raise HTTPException(status_code=400, detail="package is required")
+ graph = _cross_repo.analyze_dependencies_from_files(files)
+ report = _cross_repo.impact_analysis(graph, package_name, new_version)
+ return report.to_dict()
+
+
+# ============================================================================
+# Predictions Endpoints (Phase 3)
+# ============================================================================
+
+@app.post("/api/predictions/suggest")
+async def api_predictions_suggest(payload: dict):
+ """Get proactive suggestions based on context."""
+ context = payload.get("context", "")
+ if not context:
+ raise HTTPException(status_code=400, detail="context is required")
+ suggestions = _predictive_engine.predict(context)
+ return {"suggestions": [s.to_dict() for s in suggestions]}
+
+
+@app.get("/api/predictions/rules")
+async def api_predictions_rules():
+ """List all prediction rules."""
+ return {"rules": _predictive_engine.list_rules()}
+
+
+# ============================================================================
+# Security Scanner Endpoints (Phase 3)
+# ============================================================================
+
+@app.post("/api/security/scan-file")
+async def api_security_scan_file(payload: dict):
+ """Scan a single file for security issues."""
+ file_path = payload.get("file_path", "")
+ if not file_path:
+ raise HTTPException(status_code=400, detail="file_path is required")
+ findings = _security_scanner.scan_file(file_path)
+ return {"findings": [f.to_dict() for f in findings], "count": len(findings)}
+
+
+@app.post("/api/security/scan-directory")
+async def api_security_scan_directory(payload: dict):
+ """Recursively scan a directory for security issues."""
+ directory = payload.get("directory", "")
+ if not directory:
+ raise HTTPException(status_code=400, detail="directory is required")
+ result = _security_scanner.scan_directory(directory)
+ return result.to_dict()
+
+
+@app.post("/api/security/scan-diff")
+async def api_security_scan_diff(payload: dict):
+ """Scan a git diff for security issues in added lines."""
+ diff_text = payload.get("diff", "")
+ if not diff_text:
+ raise HTTPException(status_code=400, detail="diff is required")
+ findings = _security_scanner.scan_diff(diff_text)
+ return {"findings": [f.to_dict() for f in findings], "count": len(findings)}
+
+
+# ============================================================================
+# Natural Language Database Endpoints (Phase 3)
+# ============================================================================
+
+@app.post("/api/nl-database/translate")
+async def api_nl_translate(payload: dict):
+ """Translate natural language to SQL."""
+ question = payload.get("question", "")
+ dialect = payload.get("dialect", "postgresql")
+ tables = payload.get("tables", [])
+ if not question:
+ raise HTTPException(status_code=400, detail="question is required")
+ engine = NLQueryEngine(dialect=QueryDialect(dialect))
+ for t in tables:
+ engine.add_table(TableSchema(
+ name=t["name"],
+ columns=t.get("columns", []),
+ primary_key=t.get("primary_key"),
+ ))
+ sql = engine.translate(question)
+ error = engine.validate_query(sql)
+ return {"question": question, "sql": sql, "valid": error is None, "error": error}
+
+
+@app.post("/api/nl-database/explain")
+async def api_nl_explain(payload: dict):
+ """Explain what a SQL query does in plain English."""
+ sql = payload.get("sql", "")
+ if not sql:
+ raise HTTPException(status_code=400, detail="sql is required")
+ explanation = _nl_engine.explain(sql)
+ return {"sql": sql, "explanation": explanation}
+
+
+# ============================================================================
+# Branch Listing Endpoint (Claude-Code-on-Web Parity)
+# ============================================================================
+
+class BranchInfo(BaseModel):
+ name: str
+ is_default: bool = False
+ protected: bool = False
+ commit_sha: Optional[str] = None
+
+
+class BranchListResponse(BaseModel):
+ repository: str
+ default_branch: str
+ page: int
+ per_page: int
+ has_more: bool
+ branches: List[BranchInfo]
+
+
+@app.get("/api/repos/{owner}/{repo}/branches", response_model=BranchListResponse)
+async def api_list_branches(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ page: int = Query(1, ge=1),
+ per_page: int = Query(100, ge=1, le=100),
+ query: Optional[str] = Query(None, description="Substring filter"),
+ authorization: Optional[str] = Header(None),
+):
+ """List branches for a repository with optional search filtering."""
+ import httpx as _httpx
+
+ token = get_github_token(authorization)
+ if not token:
+ raise HTTPException(status_code=401, detail="GitHub token required")
+
+ headers = {
+ "Authorization": f"Bearer {token}",
+ "Accept": "application/vnd.github+json",
+ "User-Agent": "gitpilot",
+ }
+ timeout = _httpx.Timeout(connect=10.0, read=30.0, write=30.0, pool=10.0)
+
+ async with _httpx.AsyncClient(
+ base_url="https://api.github.com", headers=headers, timeout=timeout
+ ) as client:
+ # Fetch repo info for default_branch
+ repo_resp = await client.get(f"/repos/{owner}/{repo}")
+ if repo_resp.status_code >= 400:
+ logging.warning(
+ "branches: repo lookup failed %s/%s → %s %s",
+ owner, repo, repo_resp.status_code, repo_resp.text[:200],
+ )
+ raise HTTPException(
+ status_code=repo_resp.status_code,
+ detail=f"Cannot access repository: {repo_resp.status_code}",
+ )
+
+ repo_data = repo_resp.json()
+ default_branch_name = repo_data.get("default_branch", "main")
+
+ # Fetch ALL branch pages (GitHub caps at 100 per page)
+ all_raw = []
+ current_page = page
+ while True:
+ branch_resp = await client.get(
+ f"/repos/{owner}/{repo}/branches",
+ params={"page": current_page, "per_page": per_page},
+ )
+ if branch_resp.status_code >= 400:
+ logging.warning(
+ "branches: list failed %s/%s page=%s → %s %s",
+ owner, repo, current_page, branch_resp.status_code, branch_resp.text[:200],
+ )
+ raise HTTPException(
+ status_code=branch_resp.status_code,
+ detail=f"Failed to list branches: {branch_resp.status_code}",
+ )
+
+ page_data = branch_resp.json() if isinstance(branch_resp.json(), list) else []
+ all_raw.extend(page_data)
+
+ # Check if there are more pages
+ link_header = branch_resp.headers.get("Link", "") or ""
+ if 'rel="next"' not in link_header or len(page_data) < per_page:
+ break
+ current_page += 1
+ # Safety: cap at 10 pages (1000 branches)
+ if current_page - page >= 10:
+ break
+
+ q = (query or "").strip().lower()
+
+ branches = []
+ for b in all_raw:
+ name = (b.get("name") or "").strip()
+ if not name:
+ continue
+ if q and q not in name.lower():
+ continue
+ branches.append(BranchInfo(
+ name=name,
+ is_default=(name == default_branch_name),
+ protected=bool(b.get("protected", False)),
+ commit_sha=(b.get("commit") or {}).get("sha"),
+ ))
+
+ # Sort: default branch first, then alphabetical
+ branches.sort(key=lambda x: (0 if x.is_default else 1, x.name.lower()))
+
+ return BranchListResponse(
+ repository=f"{owner}/{repo}",
+ default_branch=default_branch_name,
+ page=page,
+ per_page=per_page,
+ has_more=False,
+ branches=branches,
+ )
+
+
+# ============================================================================
+# Environment Configuration Endpoints (Claude-Code-on-Web Parity)
+# ============================================================================
+
+import json as _json
+_ENV_ROOT = Path.home() / ".gitpilot" / "environments"
+
+
+class EnvironmentConfig(BaseModel):
+ id: Optional[str] = None
+ name: str = "Default"
+ network_access: str = Field("limited", description="limited | full | none")
+ env_vars: dict = Field(default_factory=dict)
+
+
+class EnvironmentListResponse(BaseModel):
+ environments: List[EnvironmentConfig]
+
+
+@app.get("/api/environments", response_model=EnvironmentListResponse)
+async def api_list_environments():
+ """List all environment configurations."""
+ _ENV_ROOT.mkdir(parents=True, exist_ok=True)
+ envs = []
+ for path in sorted(_ENV_ROOT.glob("*.json")):
+ try:
+ data = _json.loads(path.read_text())
+ envs.append(EnvironmentConfig(**data))
+ except Exception:
+ continue
+ if not envs:
+ envs.append(EnvironmentConfig(id="default", name="Default", network_access="limited"))
+ return EnvironmentListResponse(environments=envs)
+
+
+@app.post("/api/environments")
+async def api_create_environment(config: EnvironmentConfig):
+ """Create a new environment configuration."""
+ import uuid
+ _ENV_ROOT.mkdir(parents=True, exist_ok=True)
+ config.id = config.id or uuid.uuid4().hex[:12]
+ path = _ENV_ROOT / f"{config.id}.json"
+ path.write_text(_json.dumps(config.model_dump(), indent=2))
+ return config.model_dump()
+
+
+@app.put("/api/environments/{env_id}")
+async def api_update_environment(env_id: str, config: EnvironmentConfig):
+ """Update an environment configuration."""
+ _ENV_ROOT.mkdir(parents=True, exist_ok=True)
+ path = _ENV_ROOT / f"{env_id}.json"
+ config.id = env_id
+ path.write_text(_json.dumps(config.model_dump(), indent=2))
+ return config.model_dump()
+
+
+@app.delete("/api/environments/{env_id}")
+async def api_delete_environment(env_id: str):
+ """Delete an environment configuration."""
+ path = _ENV_ROOT / f"{env_id}.json"
+ if path.exists():
+ path.unlink()
+ return {"deleted": True}
+ raise HTTPException(status_code=404, detail="Environment not found")
+
+
+# ============================================================================
+# Session Messages + Diff Endpoints (Claude-Code-on-Web Parity)
+# ============================================================================
+
+@app.post("/api/sessions/{session_id}/message")
+async def api_add_session_message(session_id: str, payload: dict):
+ """Add a message to a session's conversation history."""
+ try:
+ session = _session_mgr.load(session_id)
+ except FileNotFoundError:
+ raise HTTPException(status_code=404, detail="Session not found")
+ role = payload.get("role", "user")
+ content = payload.get("content", "")
+ session.add_message(role, content, **payload.get("metadata", {}))
+ _session_mgr.save(session)
+ return {"message_count": len(session.messages)}
+
+
+@app.get("/api/sessions/{session_id}/messages")
+async def api_get_session_messages(session_id: str):
+ """Get all messages for a session."""
+ try:
+ session = _session_mgr.load(session_id)
+ except FileNotFoundError:
+ raise HTTPException(status_code=404, detail="Session not found")
+ return {
+ "session_id": session.id,
+ "messages": [
+ {
+ "role": m.role,
+ "content": m.content,
+ "timestamp": m.timestamp,
+ "metadata": m.metadata,
+ }
+ for m in session.messages
+ ],
+ }
+
+
+@app.get("/api/sessions/{session_id}/diff")
+async def api_get_session_diff(session_id: str):
+ """Get diff stats for a session (placeholder for sandbox integration)."""
+ try:
+ session = _session_mgr.load(session_id)
+ except FileNotFoundError:
+ raise HTTPException(status_code=404, detail="Session not found")
+ diff = session.metadata.get("diff", {
+ "files_changed": 0,
+ "additions": 0,
+ "deletions": 0,
+ "files": [],
+ })
+ return {"session_id": session.id, "diff": diff}
+
+
+@app.post("/api/sessions/{session_id}/status")
+async def api_update_session_status(session_id: str, payload: dict):
+ """Update session status (active, completed, failed, waiting)."""
+ try:
+ session = _session_mgr.load(session_id)
+ except FileNotFoundError:
+ raise HTTPException(status_code=404, detail="Session not found")
+ new_status = payload.get("status", "active")
+ if new_status not in ("active", "paused", "completed", "failed", "waiting"):
+ raise HTTPException(status_code=400, detail="Invalid status")
+ session.status = new_status
+ _session_mgr.save(session)
+ return {"session_id": session.id, "status": session.status}
+
+
+# ============================================================================
+# WebSocket Streaming Endpoint (Claude-Code-on-Web Parity)
+# ============================================================================
+
+from fastapi import WebSocket, WebSocketDisconnect
+
+
+@app.websocket("/ws/sessions/{session_id}")
+async def session_websocket(websocket: WebSocket, session_id: str):
+ """
+ Real-time bidirectional communication for a coding session.
+
+ Server events:
+ { type: "agent_message", content: "..." }
+ { type: "tool_use", tool: "bash", input: "npm test" }
+ { type: "tool_result", tool: "bash", output: "All tests passed" }
+ { type: "diff_update", stats: { additions: N, deletions: N, files: N } }
+ { type: "status_change", status: "completed" }
+ { type: "error", message: "..." }
+
+ Client events:
+ { type: "user_message", content: "..." }
+ { type: "cancel" }
+ """
+ await websocket.accept()
+
+ # Verify session exists
+ try:
+ session = _session_mgr.load(session_id)
+ except FileNotFoundError:
+ await websocket.send_json({"type": "error", "message": "Session not found"})
+ await websocket.close()
+ return
+
+ # Send session history on connect
+ await websocket.send_json({
+ "type": "session_restored",
+ "session_id": session.id,
+ "status": session.status,
+ "message_count": len(session.messages),
+ })
+
+ try:
+ while True:
+ data = await websocket.receive_json()
+ event_type = data.get("type", "")
+
+ if event_type == "user_message":
+ content = data.get("content", "")
+ session.add_message("user", content)
+ _session_mgr.save(session)
+
+ # Acknowledge receipt
+ await websocket.send_json({
+ "type": "message_received",
+ "message_index": len(session.messages) - 1,
+ })
+
+ # Stream agent response (integration point for agentic.py)
+ await websocket.send_json({
+ "type": "status_change",
+ "status": "active",
+ })
+
+ # Agent processing hook — when the agent orchestrator is wired,
+ # replace this with actual streaming from agentic.py
+ try:
+ repo_full = session.repo_full_name or ""
+ parts = repo_full.split("/", 1)
+ if len(parts) == 2 and content.strip():
+ # Use canonical dispatcher signature
+ result = await dispatch_request(
+ user_request=content,
+ repo_full_name=f"{parts[0]}/{parts[1]}",
+ branch_name=session.branch,
+ )
+ answer = ""
+ if isinstance(result, dict):
+ answer = (
+ result.get("result")
+ or result.get("answer")
+ or result.get("message")
+ or result.get("summary")
+ or (result.get("plan", {}) or {}).get("summary")
+ or str(result)
+ )
+ else:
+ answer = str(result)
+
+ # Stream the response
+ await websocket.send_json({
+ "type": "agent_message",
+ "content": answer,
+ })
+
+ session.add_message("assistant", answer)
+ _session_mgr.save(session)
+ else:
+ await websocket.send_json({
+ "type": "agent_message",
+ "content": "Session is not connected to a repository.",
+ })
+ except Exception as agent_err:
+ logger.error(f"Agent error in WS session {session_id}: {agent_err}")
+ await websocket.send_json({
+ "type": "error",
+ "message": str(agent_err),
+ })
+
+ await websocket.send_json({
+ "type": "status_change",
+ "status": "waiting",
+ })
+
+ elif event_type == "cancel":
+ await websocket.send_json({
+ "type": "status_change",
+ "status": "waiting",
+ })
+
+ elif event_type == "ping":
+ await websocket.send_json({"type": "pong"})
+
+ except WebSocketDisconnect:
+ logger.info(f"WebSocket disconnected for session {session_id}")
+ except Exception as e:
+ logger.error(f"WebSocket error for session {session_id}: {e}")
+ try:
+ await websocket.send_json({"type": "error", "message": str(e)})
+ except Exception:
+ pass
+
+
+# ============================================================================
+# Static Files & Frontend Serving (SPA Support)
+# ============================================================================
+
+STATIC_DIR = Path(__file__).resolve().parent / "web"
+ASSETS_DIR = STATIC_DIR / "assets"
+
+if ASSETS_DIR.exists():
+ app.mount("/assets", StaticFiles(directory=ASSETS_DIR), name="assets")
+
+if STATIC_DIR.exists():
+ app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static")
+
+
+@app.get("/api/health")
+async def health_check():
+ """Health check endpoint for monitoring and diagnostics."""
+ return {"status": "healthy", "service": "gitpilot-backend"}
+
+
+@app.get("/healthz")
+async def healthz():
+ """Health check endpoint (Render/Kubernetes standard)."""
+ return {"status": "healthy", "service": "gitpilot-backend"}
+
+
+@app.get("/", include_in_schema=False)
+async def index():
+ """Serve the React App entry point."""
+ index_file = STATIC_DIR / "index.html"
+ if index_file.exists():
+ return FileResponse(index_file)
+ return JSONResponse(
+ {"message": "GitPilot UI not built. The static files directory is missing."},
+ status_code=500,
+ )
+
+
+@app.get("/{full_path:path}", include_in_schema=False)
+async def catch_all_spa_routes(full_path: str):
+ """
+ Catch-all route to serve index.html for frontend routing.
+ Excludes '/api' paths to ensure genuine API 404s are returned as JSON.
+ """
+ if full_path.startswith("api/"):
+ return JSONResponse({"detail": "Not Found"}, status_code=404)
+
+ index_file = STATIC_DIR / "index.html"
+ if index_file.exists():
+ return FileResponse(index_file)
+
+ return JSONResponse(
+ {"message": "GitPilot UI not built. The static files directory is missing."},
+ status_code=500,
+ )
diff --git a/gitpilot/_deprecation.py b/gitpilot/_deprecation.py
new file mode 100644
index 0000000000000000000000000000000000000000..c21b0d3ffbfcb728fae3ccd75ca44fd3c38d1fb9
--- /dev/null
+++ b/gitpilot/_deprecation.py
@@ -0,0 +1,119 @@
+# gitpilot/_deprecation.py
+"""Deprecation helpers used by :mod:`gitpilot.public_api` — Batch P4-C.
+
+This module is intentionally internal (leading underscore) and tiny.
+It provides one decorator and one alias factory so that every
+deprecated symbol on the stable surface behaves the same way:
+
+* a single :class:`DeprecationWarning` is emitted at the first call
+ through that symbol (per process, to avoid log spam)
+* the warning text follows a fixed template:
+ ``" is deprecated; use instead (will be removed in vX.Y)"``
+* original behaviour is preserved — no breaking change to callers
+
+Use it from the public-API package like this::
+
+ from gitpilot._deprecation import deprecated_alias
+
+ parse_mentions = deprecated_alias(
+ "parse_mentions", expand_mentions,
+ replacement="gitpilot.public_api.expand_mentions",
+ removed_in="2.0",
+ )
+
+The corresponding entry in :doc:`API_STABILITY.md` documents the
+removal milestone.
+"""
+from __future__ import annotations
+
+import functools
+import threading
+import warnings
+from typing import Any, Callable, TypeVar
+
+F = TypeVar("F", bound=Callable[..., Any])
+
+
+_WARNED: set[str] = set()
+_LOCK = threading.RLock()
+
+
+def _emit_once(key: str, message: str, stacklevel: int = 3) -> None:
+ """Emit ``DeprecationWarning(message)`` at most once per key."""
+ with _LOCK:
+ if key in _WARNED:
+ return
+ _WARNED.add(key)
+ warnings.warn(message, DeprecationWarning, stacklevel=stacklevel)
+
+
+def deprecated(
+ *,
+ replacement: str,
+ removed_in: str,
+ legacy_name: str | None = None,
+) -> Callable[[F], F]:
+ """Decorator: emit a :class:`DeprecationWarning` on first call.
+
+ Parameters
+ ----------
+ replacement
+ Dotted path the caller should use instead, e.g.
+ ``"gitpilot.public_api.run_wizard"``.
+ removed_in
+ Version that will drop the symbol, e.g. ``"2.0"``. Surfaces in
+ the warning text so users can plan the migration.
+ legacy_name
+ Override for the symbol's display name; defaults to the
+ wrapped function's ``__qualname__``.
+ """
+
+ def _wrap(fn: F) -> F:
+ name = legacy_name or fn.__qualname__
+
+ @functools.wraps(fn)
+ def _wrapper(*args: Any, **kwargs: Any) -> Any:
+ _emit_once(
+ key=f"call:{name}",
+ message=(
+ f"{name} is deprecated; use {replacement} instead "
+ f"(will be removed in v{removed_in})"
+ ),
+ )
+ return fn(*args, **kwargs)
+
+ # Surface the deprecation metadata for tooling / docs generation.
+ _wrapper.__gitpilot_deprecated__ = { # type: ignore[attr-defined]
+ "legacy_name": name,
+ "replacement": replacement,
+ "removed_in": removed_in,
+ }
+ return _wrapper # type: ignore[return-value]
+
+ return _wrap
+
+
+def deprecated_alias(
+ legacy_name: str,
+ target: F,
+ *,
+ replacement: str,
+ removed_in: str,
+) -> F:
+ """Build a deprecated alias that delegates to ``target``.
+
+ Use this when you keep two names for the same callable for
+ backwards compatibility — the alias warns on use; the canonical
+ name does not.
+ """
+ return deprecated(
+ replacement=replacement,
+ removed_in=removed_in,
+ legacy_name=legacy_name,
+ )(target)
+
+
+def reset_deprecation_log_for_tests() -> None:
+ """Forget every emit-once key. Test-only."""
+ with _LOCK:
+ _WARNED.clear()
diff --git a/gitpilot/a2a_adapter.py b/gitpilot/a2a_adapter.py
new file mode 100644
index 0000000000000000000000000000000000000000..46ca4659db601f09477b71755cca8309c4f678b0
--- /dev/null
+++ b/gitpilot/a2a_adapter.py
@@ -0,0 +1,560 @@
+"""Optional A2A adapter for GitPilot (MCP ContextForge compatible).
+
+This module is feature-flagged. Nothing changes in GitPilot unless the main app
+mounts this router when GITPILOT_ENABLE_A2A=true.
+
+Supported protocols
+- JSON-RPC 2.0 (preferred)
+- ContextForge custom A2A envelope (fallback)
+
+Security model (recommended)
+- Gateway injects a shared secret:
+ X-A2A-Secret:
+ or
+ Authorization: Bearer
+
+- GitHub token (if needed) should be provided via:
+ X-Github-Token:
+ (avoid passing tokens in JSON bodies to reduce leak risk in logs)
+
+Environment
+- GITPILOT_A2A_REQUIRE_AUTH=true
+- GITPILOT_A2A_SHARED_SECRET=
+- GITPILOT_A2A_MAX_BODY_MB=2
+- GITPILOT_A2A_ALLOW_GITHUB_TOKEN_IN_PARAMS=false
+"""
+
+from __future__ import annotations
+
+import os
+import time
+import uuid
+from typing import Any, Dict, Optional, Tuple
+
+from fastapi import APIRouter, Header, HTTPException, Request
+from fastapi.responses import JSONResponse
+
+from .agentic import PlanResult, execute_plan, generate_plan, dispatch_request
+from .github_api import get_file, get_repo_tree, github_request, put_file
+from . import github_issues
+from . import github_pulls
+from . import github_search
+
+router = APIRouter(tags=["a2a"])
+
+
+def _env_bool(name: str, default: bool) -> bool:
+ raw = os.getenv(name)
+ if raw is None:
+ return default
+ return raw.strip().lower() in {"1", "true", "yes", "y", "on"}
+
+
+def _env_int(name: str, default: int) -> int:
+ raw = os.getenv(name)
+ if raw is None:
+ return default
+ try:
+ return int(raw.strip())
+ except Exception:
+ return default
+
+
+def _extract_bearer(value: Optional[str]) -> Optional[str]:
+ if not value:
+ return None
+ if value.startswith("Bearer "):
+ return value[7:]
+ if value.startswith("token "):
+ return value[6:]
+ return value
+
+
+def _get_trace_id(x_request_id: Optional[str]) -> str:
+ return (x_request_id or "").strip() or str(uuid.uuid4())
+
+
+def _require_gateway_secret(authorization: Optional[str], x_a2a_secret: Optional[str]) -> None:
+ require_auth = _env_bool("GITPILOT_A2A_REQUIRE_AUTH", True)
+ if not require_auth:
+ return
+
+ expected = os.getenv("GITPILOT_A2A_SHARED_SECRET", "").strip()
+ if not expected:
+ raise HTTPException(
+ status_code=500,
+ detail="A2A is enabled but GITPILOT_A2A_SHARED_SECRET is not set",
+ )
+
+ candidate = _extract_bearer(authorization) or (x_a2a_secret or "").strip()
+ if not candidate or candidate != expected:
+ raise HTTPException(status_code=401, detail="Unauthorized")
+
+
+def _split_full_name(repo_full_name: str) -> Tuple[str, str]:
+ if not repo_full_name or "/" not in repo_full_name:
+ raise HTTPException(status_code=400, detail="repo_full_name must be 'owner/repo'")
+ owner, repo = repo_full_name.split("/", 1)
+ owner, repo = owner.strip(), repo.strip()
+ if not owner or not repo:
+ raise HTTPException(status_code=400, detail="repo_full_name must be 'owner/repo'")
+ return owner, repo
+
+
+def _jsonrpc_error(id_value: Any, code: int, message: str, data: Any = None) -> Dict[str, Any]:
+ err: Dict[str, Any] = {"code": code, "message": message}
+ if data is not None:
+ err["data"] = data
+ return {"jsonrpc": "2.0", "error": err, "id": id_value}
+
+
+def _jsonrpc_result(id_value: Any, result: Any) -> Dict[str, Any]:
+ return {"jsonrpc": "2.0", "result": result, "id": id_value}
+
+
+async def _dispatch(method: str, params: Dict[str, Any], github_token: Optional[str]) -> Any:
+ if method == "repo.connect":
+ repo_full_name = params.get("repo_full_name")
+ owner, repo = _split_full_name(str(repo_full_name))
+ info = await github_request(f"/repos/{owner}/{repo}", token=github_token)
+ return {
+ "repo": {
+ "id": info.get("id"),
+ "full_name": info.get("full_name"),
+ "private": info.get("private"),
+ "html_url": info.get("html_url"),
+ },
+ "default_branch": info.get("default_branch"),
+ "permissions": info.get("permissions"),
+ }
+
+ if method == "repo.tree":
+ repo_full_name = params.get("repo_full_name")
+ ref = (params.get("ref") or "").strip() or "HEAD"
+ owner, repo = _split_full_name(str(repo_full_name))
+ tree = await get_repo_tree(owner, repo, token=github_token, ref=ref)
+ return {"entries": tree, "ref": ref}
+
+ if method == "repo.read":
+ repo_full_name = params.get("repo_full_name")
+ path = params.get("path")
+ if not path:
+ raise HTTPException(status_code=400, detail="Missing required param: path")
+ owner, repo = _split_full_name(str(repo_full_name))
+ # NOTE: current get_file() reads from default branch/ref in this repo.
+ # You can extend github_api.get_file to accept ref and pass it here later.
+ content = await get_file(owner, repo, str(path), token=github_token)
+ return {"path": str(path), "content": content, "encoding": "utf-8"}
+
+ if method == "repo.write":
+ repo_full_name = params.get("repo_full_name")
+ path = params.get("path")
+ content = params.get("content")
+ message = params.get("message") or "Update via GitPilot A2A"
+ branch = params.get("branch") or params.get("branch_name")
+ if not path:
+ raise HTTPException(status_code=400, detail="Missing required param: path")
+ if content is None:
+ raise HTTPException(status_code=400, detail="Missing required param: content")
+ owner, repo = _split_full_name(str(repo_full_name))
+ result = await put_file(
+ owner,
+ repo,
+ str(path),
+ str(content),
+ str(message),
+ token=github_token,
+ branch=branch,
+ )
+ return result
+
+ if method == "plan.generate":
+ repo_full_name = params.get("repo_full_name")
+ goal = params.get("goal")
+ branch_name = params.get("branch") or params.get("branch_name")
+ if not goal:
+ raise HTTPException(status_code=400, detail="Missing required param: goal")
+ if not repo_full_name:
+ raise HTTPException(status_code=400, detail="Missing required param: repo_full_name")
+ plan = await generate_plan(str(goal), str(repo_full_name), token=github_token, branch_name=branch_name)
+ return plan.model_dump() if hasattr(plan, "model_dump") else plan
+
+ if method == "plan.execute":
+ repo_full_name = params.get("repo_full_name")
+ branch_name = params.get("branch") or params.get("branch_name")
+ plan_raw = params.get("plan")
+ if not repo_full_name:
+ raise HTTPException(status_code=400, detail="Missing required param: repo_full_name")
+ if plan_raw is None:
+ raise HTTPException(status_code=400, detail="Missing required param: plan")
+ if isinstance(plan_raw, PlanResult):
+ plan_obj = plan_raw
+ else:
+ try:
+ plan_obj = PlanResult.model_validate(plan_raw) # pydantic v2
+ except Exception:
+ plan_obj = PlanResult.parse_obj(plan_raw) # pydantic v1
+ result = await execute_plan(plan_obj, str(repo_full_name), token=github_token, branch_name=branch_name)
+ return result
+
+ if method == "repo.search":
+ query = params.get("query")
+ if not query:
+ raise HTTPException(status_code=400, detail="Missing required param: query")
+ result = await github_request(
+ "/search/repositories",
+ params={"q": str(query), "per_page": 20},
+ token=github_token,
+ )
+ items = (result or {}).get("items", []) if isinstance(result, dict) else []
+ return {
+ "repos": [
+ {
+ "full_name": i.get("full_name"),
+ "private": i.get("private"),
+ "html_url": i.get("html_url"),
+ "description": i.get("description"),
+ "default_branch": i.get("default_branch"),
+ }
+ for i in items
+ ]
+ }
+
+ # --- v2 methods: issues, pulls, search, chat --------------------------
+
+ if method == "issue.list":
+ repo_full_name = params.get("repo_full_name")
+ owner, repo = _split_full_name(str(repo_full_name))
+ issues = await github_issues.list_issues(
+ owner, repo, state=params.get("state", "open"),
+ labels=params.get("labels"), per_page=params.get("per_page", 30),
+ token=github_token,
+ )
+ return {"issues": issues}
+
+ if method == "issue.get":
+ repo_full_name = params.get("repo_full_name")
+ issue_number = params.get("issue_number")
+ if not issue_number:
+ raise HTTPException(status_code=400, detail="Missing required param: issue_number")
+ owner, repo = _split_full_name(str(repo_full_name))
+ return await github_issues.get_issue(owner, repo, int(issue_number), token=github_token)
+
+ if method == "issue.create":
+ repo_full_name = params.get("repo_full_name")
+ title = params.get("title")
+ if not title:
+ raise HTTPException(status_code=400, detail="Missing required param: title")
+ owner, repo = _split_full_name(str(repo_full_name))
+ return await github_issues.create_issue(
+ owner, repo, str(title),
+ body=params.get("body"), labels=params.get("labels"),
+ assignees=params.get("assignees"), token=github_token,
+ )
+
+ if method == "issue.update":
+ repo_full_name = params.get("repo_full_name")
+ issue_number = params.get("issue_number")
+ if not issue_number:
+ raise HTTPException(status_code=400, detail="Missing required param: issue_number")
+ owner, repo = _split_full_name(str(repo_full_name))
+ return await github_issues.update_issue(
+ owner, repo, int(issue_number),
+ title=params.get("title"), body=params.get("body"),
+ state=params.get("state"), labels=params.get("labels"),
+ assignees=params.get("assignees"), token=github_token,
+ )
+
+ if method == "issue.comment":
+ repo_full_name = params.get("repo_full_name")
+ issue_number = params.get("issue_number")
+ body = params.get("body")
+ if not issue_number:
+ raise HTTPException(status_code=400, detail="Missing required param: issue_number")
+ if not body:
+ raise HTTPException(status_code=400, detail="Missing required param: body")
+ owner, repo = _split_full_name(str(repo_full_name))
+ return await github_issues.add_issue_comment(
+ owner, repo, int(issue_number), str(body), token=github_token,
+ )
+
+ if method == "pr.list":
+ repo_full_name = params.get("repo_full_name")
+ owner, repo = _split_full_name(str(repo_full_name))
+ return await github_pulls.list_pull_requests(
+ owner, repo, state=params.get("state", "open"),
+ per_page=params.get("per_page", 30), token=github_token,
+ )
+
+ if method == "pr.create":
+ repo_full_name = params.get("repo_full_name")
+ title = params.get("title")
+ head = params.get("head")
+ base = params.get("base")
+ if not title or not head or not base:
+ raise HTTPException(status_code=400, detail="Missing required params: title, head, base")
+ owner, repo = _split_full_name(str(repo_full_name))
+ return await github_pulls.create_pull_request(
+ owner, repo, title=str(title), head=str(head), base=str(base),
+ body=params.get("body"), token=github_token,
+ )
+
+ if method == "pr.merge":
+ repo_full_name = params.get("repo_full_name")
+ pull_number = params.get("pull_number")
+ if not pull_number:
+ raise HTTPException(status_code=400, detail="Missing required param: pull_number")
+ owner, repo = _split_full_name(str(repo_full_name))
+ return await github_pulls.merge_pull_request(
+ owner, repo, int(pull_number),
+ merge_method=params.get("merge_method", "merge"),
+ token=github_token,
+ )
+
+ if method == "search.code":
+ query = params.get("query")
+ if not query:
+ raise HTTPException(status_code=400, detail="Missing required param: query")
+ return await github_search.search_code(
+ str(query), owner=params.get("owner"), repo=params.get("repo"),
+ language=params.get("language"), per_page=params.get("per_page", 20),
+ token=github_token,
+ )
+
+ if method == "search.issues":
+ query = params.get("query")
+ if not query:
+ raise HTTPException(status_code=400, detail="Missing required param: query")
+ return await github_search.search_issues(
+ str(query), owner=params.get("owner"), repo=params.get("repo"),
+ state=params.get("state"), label=params.get("label"),
+ per_page=params.get("per_page", 20), token=github_token,
+ )
+
+ if method == "search.users":
+ query = params.get("query")
+ if not query:
+ raise HTTPException(status_code=400, detail="Missing required param: query")
+ return await github_search.search_users(
+ str(query), type_filter=params.get("type"),
+ location=params.get("location"), language=params.get("language"),
+ per_page=params.get("per_page", 20), token=github_token,
+ )
+
+ if method == "chat.message":
+ repo_full_name = params.get("repo_full_name")
+ message = params.get("message")
+ if not message:
+ raise HTTPException(status_code=400, detail="Missing required param: message")
+ if not repo_full_name:
+ raise HTTPException(status_code=400, detail="Missing required param: repo_full_name")
+ return await dispatch_request(
+ str(message), str(repo_full_name),
+ token=github_token,
+ branch_name=params.get("branch") or params.get("branch_name"),
+ )
+
+ raise HTTPException(status_code=404, detail=f"Unknown method: {method}")
+
+
+@router.get("/a2a/health")
+async def a2a_health() -> Dict[str, Any]:
+ return {"status": "ok", "ts": int(time.time())}
+
+
+@router.get("/a2a/manifest")
+async def a2a_manifest() -> Dict[str, Any]:
+ # Best-effort schemas (kept intentionally simple and stable)
+ return {
+ "name": "gitpilot",
+ "a2a_version": "1.0",
+ "protocols": ["jsonrpc-2.0", "a2a-envelope-1.0"],
+ "auth": {"type": "shared_secret", "header": "X-A2A-Secret"},
+ "rate_limits": {"hint": "apply gateway rate limiting; server enforces body size"},
+ "methods": {
+ "repo.connect": {
+ "params": {"repo_full_name": "string"},
+ "result": {"repo": "object", "default_branch": "string", "permissions": "object?"},
+ },
+ "repo.tree": {
+ "params": {"repo_full_name": "string", "ref": "string?"},
+ "result": {"entries": "array", "ref": "string"},
+ },
+ "repo.read": {
+ "params": {"repo_full_name": "string", "path": "string"},
+ "result": {"path": "string", "content": "string"},
+ },
+ "repo.write": {
+ "params": {
+ "repo_full_name": "string",
+ "path": "string",
+ "content": "string",
+ "message": "string?",
+ "branch": "string?",
+ },
+ "result": "object",
+ },
+ "plan.generate": {
+ "params": {"repo_full_name": "string", "goal": "string", "branch": "string?"},
+ "result": "PlanResult",
+ },
+ "plan.execute": {
+ "params": {"repo_full_name": "string", "plan": "PlanResult", "branch": "string?"},
+ "result": "object",
+ },
+ "repo.search": {
+ "params": {"query": "string"},
+ "result": {"repos": "array"},
+ },
+ # v2 methods
+ "issue.list": {
+ "params": {"repo_full_name": "string", "state": "string?", "labels": "string?"},
+ "result": {"issues": "array"},
+ },
+ "issue.get": {
+ "params": {"repo_full_name": "string", "issue_number": "integer"},
+ "result": "object",
+ },
+ "issue.create": {
+ "params": {"repo_full_name": "string", "title": "string", "body": "string?", "labels": "array?", "assignees": "array?"},
+ "result": "object",
+ },
+ "issue.update": {
+ "params": {"repo_full_name": "string", "issue_number": "integer", "title": "string?", "body": "string?", "state": "string?"},
+ "result": "object",
+ },
+ "issue.comment": {
+ "params": {"repo_full_name": "string", "issue_number": "integer", "body": "string"},
+ "result": "object",
+ },
+ "pr.list": {
+ "params": {"repo_full_name": "string", "state": "string?"},
+ "result": "array",
+ },
+ "pr.create": {
+ "params": {"repo_full_name": "string", "title": "string", "head": "string", "base": "string", "body": "string?"},
+ "result": "object",
+ },
+ "pr.merge": {
+ "params": {"repo_full_name": "string", "pull_number": "integer", "merge_method": "string?"},
+ "result": "object",
+ },
+ "search.code": {
+ "params": {"query": "string", "owner": "string?", "repo": "string?", "language": "string?"},
+ "result": {"total_count": "integer", "items": "array"},
+ },
+ "search.issues": {
+ "params": {"query": "string", "owner": "string?", "repo": "string?", "state": "string?"},
+ "result": {"total_count": "integer", "items": "array"},
+ },
+ "search.users": {
+ "params": {"query": "string", "type": "string?", "location": "string?"},
+ "result": {"total_count": "integer", "items": "array"},
+ },
+ "chat.message": {
+ "params": {"repo_full_name": "string", "message": "string", "branch": "string?"},
+ "result": "object",
+ },
+ },
+ }
+
+
+async def _handle_invoke(
+ request: Request,
+ authorization: Optional[str],
+ x_a2a_secret: Optional[str],
+ x_github_token: Optional[str],
+ x_request_id: Optional[str],
+) -> JSONResponse:
+ trace_id = _get_trace_id(x_request_id)
+ _require_gateway_secret(authorization=authorization, x_a2a_secret=x_a2a_secret)
+
+ # Body size guard (helps protect from abuse)
+ max_mb = _env_int("GITPILOT_A2A_MAX_BODY_MB", 2)
+ cl = request.headers.get("content-length")
+ if cl:
+ try:
+ if int(cl) > max_mb * 1024 * 1024:
+ raise HTTPException(status_code=413, detail="Request entity too large")
+ except ValueError:
+ pass
+
+ started = time.time()
+ payload = await request.json()
+
+ github_token = _extract_bearer(x_github_token) or None
+ if not github_token:
+ github_token = _extract_bearer(authorization)
+
+ # JSON-RPC mode
+ if isinstance(payload, dict) and payload.get("jsonrpc") == "2.0" and "method" in payload:
+ rpc_id = payload.get("id")
+ method = payload.get("method")
+ params = payload.get("params") or {}
+ if not isinstance(params, dict):
+ return JSONResponse(_jsonrpc_error(rpc_id, -32602, "Invalid params"), status_code=400)
+
+ allow_in_params = _env_bool("GITPILOT_A2A_ALLOW_GITHUB_TOKEN_IN_PARAMS", False)
+ if allow_in_params and not github_token:
+ github_token = _extract_bearer(params.get("github_token"))
+
+ try:
+ result = await _dispatch(str(method), params, github_token)
+ resp = _jsonrpc_result(rpc_id, result)
+ return JSONResponse(resp, headers={"X-Trace-Id": trace_id})
+ except HTTPException as e:
+ resp = _jsonrpc_error(rpc_id, e.status_code, str(e.detail), {"trace_id": trace_id})
+ return JSONResponse(resp, status_code=200, headers={"X-Trace-Id": trace_id})
+ except Exception as e:
+ resp = _jsonrpc_error(rpc_id, -32000, "Server error", {"trace_id": trace_id, "error": str(e)})
+ return JSONResponse(resp, status_code=200, headers={"X-Trace-Id": trace_id})
+ finally:
+ _ = time.time() - started
+
+ # Custom envelope fallback
+ if isinstance(payload, dict) and payload.get("interaction_type"):
+ interaction_type = str(payload.get("interaction_type"))
+ parameters = payload.get("parameters") or {}
+ if not isinstance(parameters, dict):
+ raise HTTPException(status_code=400, detail="Invalid parameters")
+
+ if interaction_type == "query":
+ repo_full_name = parameters.get("repo_full_name")
+ goal = parameters.get("query") or parameters.get("goal")
+ params = {
+ "repo_full_name": repo_full_name,
+ "goal": goal,
+ "branch": parameters.get("branch") or parameters.get("branch_name"),
+ }
+ result = await _dispatch("plan.generate", params, github_token)
+ return JSONResponse(
+ {"response": result, "protocol_version": payload.get("protocol_version", "1.0")},
+ headers={"X-Trace-Id": trace_id},
+ )
+
+ raise HTTPException(status_code=404, detail=f"Unsupported interaction_type: {interaction_type}")
+
+ raise HTTPException(status_code=400, detail=f"Invalid A2A payload (trace_id={trace_id})")
+
+
+@router.post("/a2a/invoke")
+async def a2a_invoke(
+ request: Request,
+ authorization: Optional[str] = Header(None),
+ x_a2a_secret: Optional[str] = Header(None, alias="X-A2A-Secret"),
+ x_github_token: Optional[str] = Header(None, alias="X-Github-Token"),
+ x_request_id: Optional[str] = Header(None, alias="X-Request-Id"),
+) -> JSONResponse:
+ return await _handle_invoke(request, authorization, x_a2a_secret, x_github_token, x_request_id)
+
+
+@router.post("/a2a/v1/invoke")
+async def a2a_v1_invoke(
+ request: Request,
+ authorization: Optional[str] = Header(None),
+ x_a2a_secret: Optional[str] = Header(None, alias="X-A2A-Secret"),
+ x_github_token: Optional[str] = Header(None, alias="X-Github-Token"),
+ x_request_id: Optional[str] = Header(None, alias="X-Request-Id"),
+) -> JSONResponse:
+ # Alias for versioned clients. Keep behavior identical to /a2a/invoke.
+ return await _handle_invoke(request, authorization, x_a2a_secret, x_github_token, x_request_id)
diff --git a/gitpilot/agent_events.py b/gitpilot/agent_events.py
new file mode 100644
index 0000000000000000000000000000000000000000..65faa7652f10322747e7577eafc6a1730868ea8c
--- /dev/null
+++ b/gitpilot/agent_events.py
@@ -0,0 +1,297 @@
+# gitpilot/agent_events.py
+"""
+Unified agent event protocol.
+
+Every agent action emits events through an AgentEventBus. Consumers
+(WebSocket v2, SSE, VS Code postMessage) subscribe and forward to clients.
+
+Event types mirror Claude Code's streaming model:
+ - text_delta incremental response text
+ - tool_start agent is calling a tool
+ - tool_result tool returned a result
+ - file_write agent wrote/edited a file
+ - approval_needed agent needs user permission
+ - approval_resolved user responded to approval
+ - plan_step plan step status change
+ - terminal_output shell stdout/stderr line
+ - terminal_exit shell process exited
+ - test_result structured test pass/fail
+ - diagnostics lint/type errors
+ - status_change idle/planning/generating/etc.
+ - done agent finished
+ - error agent failed
+
+All platforms consume the same JSON event shape. The only difference
+is the transport: WebSocket, SSE, or VS Code postMessage.
+"""
+from __future__ import annotations
+
+import asyncio
+import enum
+import logging
+import time
+import uuid
+from dataclasses import dataclass, field
+from typing import Any, AsyncIterator, Dict
+
+logger = logging.getLogger(__name__)
+
+
+class EventType(str, enum.Enum):
+ TEXT_DELTA = "text_delta"
+ TOOL_START = "tool_start"
+ TOOL_RESULT = "tool_result"
+ FILE_WRITE = "file_write"
+ APPROVAL_NEEDED = "approval_needed"
+ APPROVAL_RESOLVED = "approval_resolved"
+ PLAN_STEP = "plan_step"
+ TERMINAL_OUTPUT = "terminal_output"
+ TERMINAL_EXIT = "terminal_exit"
+ TEST_RESULT = "test_result"
+ DIAGNOSTICS = "diagnostics"
+ STATUS_CHANGE = "status_change"
+ DONE = "done"
+ ERROR = "error"
+
+
+@dataclass
+class AgentEvent:
+ """Single event emitted by the agent during execution."""
+
+ type: EventType
+ data: Dict[str, Any] = field(default_factory=dict)
+ timestamp: float = field(default_factory=time.time)
+ id: str = field(default_factory=lambda: uuid.uuid4().hex[:12])
+
+ def to_dict(self) -> dict:
+ return {
+ "type": self.type.value,
+ "id": self.id,
+ "ts": self.timestamp,
+ **self.data,
+ }
+
+ def to_sse(self) -> str:
+ """Format as a Server-Sent Event line."""
+ import json
+
+ return f"data: {json.dumps(self.to_dict())}\n\n"
+
+
+# ---------------------------------------------------------------------------
+# Factory functions for clean event creation
+# ---------------------------------------------------------------------------
+
+
+def text_delta(text: str) -> AgentEvent:
+ return AgentEvent(type=EventType.TEXT_DELTA, data={"text": text})
+
+
+def tool_start(tool_id: str, name: str, args: dict) -> AgentEvent:
+ return AgentEvent(
+ type=EventType.TOOL_START,
+ data={"tool_id": tool_id, "name": name, "arguments": args},
+ )
+
+
+def tool_result(
+ tool_id: str, name: str, result: str, is_error: bool = False
+) -> AgentEvent:
+ return AgentEvent(
+ type=EventType.TOOL_RESULT,
+ data={
+ "tool_id": tool_id,
+ "name": name,
+ "result": result[:2000],
+ "is_error": is_error,
+ },
+ )
+
+
+def file_write(path: str, action: str = "modify") -> AgentEvent:
+ return AgentEvent(
+ type=EventType.FILE_WRITE, data={"path": path, "action": action}
+ )
+
+
+def approval_needed(
+ request_id: str,
+ tool: str,
+ args: dict,
+ summary: str,
+ diff_preview: str | None = None,
+ risk: str = "medium",
+) -> AgentEvent:
+ return AgentEvent(
+ type=EventType.APPROVAL_NEEDED,
+ data={
+ "request_id": request_id,
+ "tool": tool,
+ "arguments": args,
+ "summary": summary,
+ "diff_preview": diff_preview,
+ "risk_level": risk,
+ },
+ )
+
+
+def approval_resolved(request_id: str, approved: bool) -> AgentEvent:
+ return AgentEvent(
+ type=EventType.APPROVAL_RESOLVED,
+ data={"request_id": request_id, "approved": approved},
+ )
+
+
+def plan_step(index: int, title: str, status: str, action: str = "") -> AgentEvent:
+ return AgentEvent(
+ type=EventType.PLAN_STEP,
+ data={
+ "step_index": index,
+ "title": title,
+ "status": status,
+ "action": action,
+ },
+ )
+
+
+def terminal_output(stream: str, text: str) -> AgentEvent:
+ return AgentEvent(
+ type=EventType.TERMINAL_OUTPUT, data={"stream": stream, "text": text}
+ )
+
+
+def terminal_exit(command: str, exit_code: int, duration_ms: int = 0) -> AgentEvent:
+ return AgentEvent(
+ type=EventType.TERMINAL_EXIT,
+ data={
+ "command": command,
+ "exit_code": exit_code,
+ "duration_ms": duration_ms,
+ },
+ )
+
+
+def test_result(
+ framework: str,
+ passed: int,
+ failed: int,
+ skipped: int = 0,
+ output: str = "",
+ exit_code: int = 0,
+) -> AgentEvent:
+ return AgentEvent(
+ type=EventType.TEST_RESULT,
+ data={
+ "framework": framework,
+ "passed": passed,
+ "failed": failed,
+ "skipped": skipped,
+ "output": output[:5000],
+ "exit_code": exit_code,
+ },
+ )
+
+
+def diagnostics(
+ errors: int, warnings: int, entries: list
+) -> AgentEvent:
+ return AgentEvent(
+ type=EventType.DIAGNOSTICS,
+ data={
+ "errors": errors,
+ "warnings": warnings,
+ "entries": entries[:30],
+ },
+ )
+
+
+def status_change(status: str, message: str = "") -> AgentEvent:
+ return AgentEvent(
+ type=EventType.STATUS_CHANGE, data={"status": status, "message": message}
+ )
+
+
+def agent_done(usage: dict | None = None, summary: str = "") -> AgentEvent:
+ return AgentEvent(
+ type=EventType.DONE,
+ data={"usage": usage or {}, "summary": summary},
+ )
+
+
+def agent_error(error: str, recoverable: bool = True) -> AgentEvent:
+ return AgentEvent(
+ type=EventType.ERROR,
+ data={"error": error, "recoverable": recoverable},
+ )
+
+
+# ---------------------------------------------------------------------------
+# AgentEventBus — fan-out event bus per session
+# ---------------------------------------------------------------------------
+
+
+class AgentEventBus:
+ """
+ Fan-out event bus. The executor emits events; multiple consumers
+ (WebSocket, SSE, polling) subscribe via async queues.
+
+ Thread-safe: events are pushed through asyncio.Queue per subscriber.
+ """
+
+ def __init__(self) -> None:
+ self._subscribers: Dict[str, asyncio.Queue[AgentEvent]] = {}
+
+ def subscribe(self) -> tuple[str, asyncio.Queue[AgentEvent]]:
+ """Create a new subscription. Returns (sub_id, queue)."""
+ sub_id = uuid.uuid4().hex[:8]
+ queue: asyncio.Queue[AgentEvent] = asyncio.Queue(maxsize=2000)
+ self._subscribers[sub_id] = queue
+ return sub_id, queue
+
+ def unsubscribe(self, sub_id: str) -> None:
+ self._subscribers.pop(sub_id, None)
+
+ async def emit(self, event: AgentEvent) -> None:
+ """Push event to all subscribers (non-blocking drop on full)."""
+ for queue in self._subscribers.values():
+ try:
+ queue.put_nowait(event)
+ except asyncio.QueueFull:
+ logger.warning("AgentEventBus: subscriber queue full, dropping event")
+
+ async def stream(self, sub_id: str) -> AsyncIterator[AgentEvent]:
+ """Yield events for a subscriber. Sends keepalive every 25s."""
+ queue = self._subscribers.get(sub_id)
+ if not queue:
+ return
+ try:
+ while sub_id in self._subscribers:
+ try:
+ event = await asyncio.wait_for(queue.get(), timeout=25.0)
+ yield event
+ except asyncio.TimeoutError:
+ yield AgentEvent(
+ type=EventType.STATUS_CHANGE,
+ data={"status": "keepalive"},
+ )
+ finally:
+ self.unsubscribe(sub_id)
+
+
+# ---------------------------------------------------------------------------
+# Global bus registry (one bus per session)
+# ---------------------------------------------------------------------------
+
+_session_buses: Dict[str, AgentEventBus] = {}
+
+
+def get_bus(session_id: str) -> AgentEventBus:
+ """Get or create the event bus for a session."""
+ if session_id not in _session_buses:
+ _session_buses[session_id] = AgentEventBus()
+ return _session_buses[session_id]
+
+
+def remove_bus(session_id: str) -> None:
+ """Clean up the event bus for a session."""
+ _session_buses.pop(session_id, None)
diff --git a/gitpilot/agent_executor.py b/gitpilot/agent_executor.py
new file mode 100644
index 0000000000000000000000000000000000000000..3998968ea274cd0e84e3cfff7b4604dabc3e9d6d
--- /dev/null
+++ b/gitpilot/agent_executor.py
@@ -0,0 +1,416 @@
+# gitpilot/agent_executor.py
+"""
+Streaming multi-step agent executor.
+
+Wraps existing CrewAI plan+execute functions with granular event streaming
+via the AgentEventBus. Does NOT modify agentic.py or any existing module.
+
+Execution pipeline:
+ 1. Plan (reuses generate_plan_lite / generate_plan)
+ 2. Execute (reuses execute_plan_lite / execute_plan, with streaming shim)
+ 3. Validate (run detected linter/tests, feed errors back)
+
+All events flow through AgentEventBus -> consumers (WS v2, SSE, VS Code).
+"""
+from __future__ import annotations
+
+import asyncio
+import json
+import logging
+import re
+import uuid
+from pathlib import Path
+from typing import Optional
+
+from . import agent_events as evt
+from .agent_events import AgentEventBus
+from .approval_protocol import ApprovalGate
+from .terminal import TerminalExecutor, TerminalSession
+from .workspace import WorkspaceManager, WorkspaceInfo
+from .test_detection import detect_test_command, detect_framework_name
+from .diagnostics_runner import run_linter, parse_diagnostics
+
+logger = logging.getLogger(__name__)
+
+# Maximum retries for self-correction after validation errors
+MAX_VALIDATION_RETRIES = 2
+
+
+class StreamingAgentExecutor:
+ """
+ Autonomous multi-step agent executor with real-time streaming.
+
+ Orchestrates the full pipeline:
+ plan -> per-step execution -> validation -> self-correction -> done
+
+ Emits all events through the AgentEventBus so any transport
+ (WebSocket v2, SSE, VS Code postMessage) can forward them.
+ """
+
+ def __init__(
+ self,
+ bus: AgentEventBus,
+ gate: ApprovalGate,
+ workspace: Optional[WorkspaceInfo] = None,
+ ws_manager: Optional[WorkspaceManager] = None,
+ terminal: Optional[TerminalExecutor] = None,
+ ) -> None:
+ self._bus = bus
+ self._gate = gate
+ self._workspace = workspace
+ self._ws_manager = ws_manager or WorkspaceManager()
+ self._terminal = terminal or TerminalExecutor()
+ self._cancelled = False
+
+ def cancel(self) -> None:
+ """Signal the executor to stop after the current step."""
+ self._cancelled = True
+
+ async def execute(
+ self,
+ user_message: str,
+ repo_full_name: str,
+ branch: Optional[str] = None,
+ token: Optional[str] = None,
+ mode: str = "auto",
+ ) -> Optional[dict]:
+ """
+ Main entry point. Streams events throughout.
+
+ Args:
+ user_message: The user's request
+ repo_full_name: "owner/repo" (required for multi-agent path)
+ branch: Git branch (default HEAD)
+ token: GitHub token (for remote repos)
+ mode: "auto" | "plan_only"
+
+ Returns:
+ The plan dict on success, None on failure.
+ """
+ # Validate repo_full_name BEFORE hitting CrewAI/generate_plan_lite.
+ # The legacy planners do `owner, repo = repo_full_name.split("/")`
+ # which raises ValueError("not enough values to unpack") on empty
+ # or slashless strings. Catch this at the boundary.
+ #
+ # For folder-only / local-git sessions (no GitHub remote) the
+ # multi-agent CrewAI planner is simply not applicable. Instead of
+ # emitting a scary `agent_error` (which every SSE consumer renders
+ # as a user-visible notice), close the stream cleanly with an
+ # empty `done` event. Callers that support a batch fallback — e.g.
+ # the VS Code extension's sendChatToBackend() flow — will treat
+ # the empty stream as "streaming unavailable" and transparently
+ # fall through to /api/chat/send, which is the correct path for
+ # folder-only sessions. No regression for strict clients: they
+ # still observe a terminal event and the method returns None.
+ if not self._is_valid_repo_full_name(repo_full_name):
+ logger.debug(
+ "StreamingAgentExecutor: skipping CrewAI planner for "
+ "non-GitHub session (repo_full_name=%r); clients should "
+ "fall back to batch chat.",
+ repo_full_name,
+ )
+ await self._bus.emit(evt.status_change("done"))
+ await self._bus.emit(evt.agent_done(summary=""))
+ return None
+
+ try:
+ # ── Phase 1: Plan ──
+ await self._bus.emit(evt.status_change("planning", "Analyzing your request..."))
+
+ plan = await self._generate_plan(user_message, repo_full_name, token, branch)
+ if not plan:
+ await self._bus.emit(evt.agent_error("Failed to generate plan"))
+ return None
+
+ # Emit plan structure
+ steps = plan.get("steps", []) if isinstance(plan, dict) else []
+ if hasattr(plan, "steps"):
+ steps = [
+ {"title": s.title, "description": s.description}
+ for s in plan.steps
+ ]
+
+ goal = plan.get("goal", user_message) if isinstance(plan, dict) else getattr(plan, "goal", user_message)
+ summary = plan.get("summary", "") if isinstance(plan, dict) else getattr(plan, "summary", "")
+
+ for i, step in enumerate(steps):
+ title = step.get("title", f"Step {i + 1}") if isinstance(step, dict) else getattr(step, "title", f"Step {i + 1}")
+ await self._bus.emit(evt.plan_step(i, title, "pending"))
+
+ if self._cancelled:
+ await self._bus.emit(evt.agent_error("Cancelled by user", recoverable=True))
+ return None
+
+ if mode == "plan_only":
+ await self._bus.emit(evt.agent_done(summary="Plan generated."))
+ return plan if isinstance(plan, dict) else {"goal": goal, "summary": summary, "steps": steps}
+
+ # ── Phase 2: Execute ──
+ await self._bus.emit(evt.status_change("generating", "Executing plan..."))
+
+ result = await self._execute_plan(plan, repo_full_name, token, branch)
+
+ # Stream result text in chunks (simulate token streaming from batch)
+ answer = self._extract_answer(result)
+ chunk_size = 80
+ for i in range(0, len(answer), chunk_size):
+ if self._cancelled:
+ break
+ await self._bus.emit(evt.text_delta(answer[i : i + chunk_size]))
+ await asyncio.sleep(0.015)
+
+ if self._cancelled:
+ await self._bus.emit(evt.agent_error("Cancelled by user", recoverable=True))
+ return None
+
+ # ── Phase 3: Validate ──
+ await self._bus.emit(evt.status_change("reviewing", "Validating changes..."))
+ await self._run_validation()
+
+ # ── Done ──
+ await self._bus.emit(evt.status_change("done"))
+ await self._bus.emit(evt.agent_done(summary=answer[:500]))
+
+ return plan if isinstance(plan, dict) else {"goal": goal, "summary": summary}
+
+ except Exception as e:
+ logger.error("StreamingAgentExecutor error: %s", e, exc_info=True)
+ await self._bus.emit(evt.agent_error(str(e)))
+ return None
+
+ # ── Internal helpers ──
+
+ @staticmethod
+ def _is_valid_repo_full_name(name: object) -> bool:
+ """True iff `name` is a non-empty 'owner/repo' string with both parts.
+
+ Rejects: None, empty strings, strings without '/', strings with
+ empty owner or repo, and strings containing more than one '/'.
+ """
+ if not isinstance(name, str) or not name.strip():
+ return False
+ parts = name.strip().split("/")
+ return len(parts) == 2 and all(p.strip() for p in parts)
+
+ async def _generate_plan(self, goal, repo_full_name, token, branch):
+ """Wrap existing plan generators with event streaming."""
+ try:
+ from .agentic import generate_plan_lite
+ return await generate_plan_lite(
+ goal=goal,
+ repo_full_name=repo_full_name,
+ token=token,
+ branch_name=branch,
+ )
+ except ImportError:
+ logger.warning("generate_plan_lite not available, using fallback")
+ except Exception as e:
+ logger.error("Plan generation error: %s", e)
+ await self._bus.emit(evt.agent_error(f"Planning failed: {e}"))
+ return None
+
+ async def _execute_plan(self, plan, repo_full_name, token, branch):
+ """Wrap existing plan executors with event streaming."""
+ try:
+ from .agentic import execute_plan_lite
+ return await execute_plan_lite(
+ plan=plan,
+ repo_full_name=repo_full_name,
+ token=token,
+ branch_name=branch,
+ )
+ except ImportError:
+ logger.warning("execute_plan_lite not available, using fallback")
+ except Exception as e:
+ logger.error("Plan execution error: %s", e)
+ await self._bus.emit(evt.agent_error(f"Execution failed: {e}"))
+ return None
+
+ async def _run_validation(self) -> None:
+ """Run linter and/or tests if the workspace supports them."""
+ if not self._workspace:
+ return
+
+ ws_path = self._workspace.path if hasattr(self._workspace, "path") else Path(str(self._workspace))
+
+ # ── Lint check ──
+ try:
+ lint_result = await run_linter(ws_path, self._terminal, timeout=60)
+ if lint_result:
+ entries = parse_diagnostics(lint_result.stdout + lint_result.stderr)
+ errors = [e for e in entries if e.get("severity") == "error"]
+ warnings = [e for e in entries if e.get("severity") == "warning"]
+ await self._bus.emit(evt.diagnostics(
+ errors=len(errors),
+ warnings=len(warnings),
+ entries=entries,
+ ))
+ except Exception as e:
+ logger.debug("Linter not available: %s", e)
+
+ # ── Test check ──
+ try:
+ test_cmd = await detect_test_command(ws_path)
+ if test_cmd:
+ framework = await detect_framework_name(ws_path) or "unknown"
+ session = TerminalSession(workspace_path=ws_path)
+
+ await self._bus.emit(evt.tool_start(
+ uuid.uuid4().hex[:8], "run_tests", {"command": test_cmd}
+ ))
+
+ # Stream test output
+ output_chunks = []
+ exit_code = -1
+ duration_ms = 0
+
+ async for chunk in self._terminal.execute_streaming(session, test_cmd, timeout=120):
+ if chunk.get("type") == "stdout":
+ output_chunks.append(chunk["data"])
+ await self._bus.emit(evt.terminal_output("stdout", chunk["data"]))
+ elif chunk.get("type") == "error":
+ output_chunks.append(chunk.get("data", ""))
+ await self._bus.emit(evt.terminal_output("stderr", chunk.get("data", "")))
+ elif chunk.get("type") == "exit":
+ exit_code = chunk.get("exit_code", -1)
+ duration_ms = chunk.get("duration_ms", 0)
+
+ await self._bus.emit(evt.terminal_exit(test_cmd, exit_code, duration_ms))
+
+ # Parse test results
+ full_output = "".join(output_chunks)
+ passed, failed, skipped = self._parse_test_counts(full_output)
+
+ await self._bus.emit(evt.test_result(
+ framework=framework,
+ passed=passed,
+ failed=failed,
+ skipped=skipped,
+ output=full_output,
+ exit_code=exit_code,
+ ))
+ except Exception as e:
+ logger.debug("Test runner not available: %s", e)
+
+ @staticmethod
+ def _extract_answer(result) -> str:
+ """Extract the response text from various result formats."""
+ if result is None:
+ return "Task completed."
+ if isinstance(result, str):
+ return result
+ if isinstance(result, dict):
+ return (
+ result.get("result")
+ or result.get("answer")
+ or result.get("summary")
+ or result.get("message")
+ or str(result)
+ )
+ if hasattr(result, "raw"):
+ return str(result.raw)
+ return str(result)
+
+ @staticmethod
+ def _parse_test_counts(output: str) -> tuple[int, int, int]:
+ """Best-effort extraction of pass/fail/skip counts from test output."""
+ passed = failed = skipped = 0
+
+ # Jest / Vitest: "Tests: 3 passed, 1 failed, 4 total"
+ m = re.search(r"(\d+)\s+passed", output)
+ if m:
+ passed = int(m.group(1))
+ m = re.search(r"(\d+)\s+failed", output)
+ if m:
+ failed = int(m.group(1))
+ m = re.search(r"(\d+)\s+skipped", output)
+ if m:
+ skipped = int(m.group(1))
+
+ # pytest: "3 passed, 1 failed, 2 skipped"
+ m = re.search(r"(\d+)\s+passed", output)
+ if m:
+ passed = int(m.group(1))
+ m = re.search(r"(\d+)\s+failed", output)
+ if m:
+ failed = int(m.group(1))
+
+ # go test: "ok" or "FAIL"
+ if "FAIL" in output and failed == 0:
+ failed = output.count("FAIL")
+ if "ok" in output and passed == 0:
+ passed = output.count("\nok")
+
+ return passed, failed, skipped
+
+
+ # ---------------------------------------------------------------------
+ # Batch P2-D — additive streaming co-method.
+ #
+ # Adapts the legacy ``execute(...)`` to the :mod:`gitpilot.streaming`
+ # adapter contract. Yields :class:`StreamEvent` objects so the SSE
+ # route can flush each as it arrives. Behaviour falls back to a
+ # single ``assistant_chunk`` when the underlying executor has nothing
+ # to stream (e.g. folder-only sessions). No legacy method is
+ # modified.
+ # ---------------------------------------------------------------------
+ async def run_streaming(self, payload):
+ """Yield ``StreamEvent`` instances for the request *payload*.
+
+ Recognised keys (every key is optional; sensible defaults apply):
+
+ * ``user_message`` (str) — the user's request
+ * ``repo_full_name`` (str) — ``owner/repo`` for GitHub sessions
+ * ``branch`` (str), ``token`` (str), ``mode`` (str)
+
+ The method itself does not import ``gitpilot.streaming`` at
+ module top-level so the agent executor stays usable in
+ contexts where the streaming layer isn't wired (CLI, tests).
+ """
+ from .streaming import StreamEvent # local import — keep agent_executor lean
+
+ user_message = str(payload.get("user_message", ""))
+ repo_full_name = str(payload.get("repo_full_name", ""))
+ branch = payload.get("branch")
+ token = payload.get("token")
+ mode = payload.get("mode", "auto")
+
+ yield StreamEvent(
+ event="agent_event",
+ data={"type": "executor_started", "mode": mode},
+ )
+
+ try:
+ result = await self.execute(
+ user_message=user_message,
+ repo_full_name=repo_full_name,
+ branch=branch,
+ token=token,
+ mode=mode,
+ )
+ except Exception as exc: # noqa: BLE001 — boundary adapter
+ yield StreamEvent(
+ event="error",
+ data={"code": "executor.failed", "message": str(exc)[:240]},
+ )
+ return
+
+ if result is None:
+ yield StreamEvent(
+ event="assistant_chunk",
+ data={"text": "(no plan produced — streaming fallback)"},
+ )
+ return
+
+ plan_text = result.get("summary") if isinstance(result, dict) else None
+ if not plan_text and isinstance(result, dict):
+ plan_text = "\n".join(
+ str(step.get("title") or step) for step in (result.get("steps") or [])[:5]
+ )
+ if plan_text:
+ yield StreamEvent(
+ event="assistant_chunk",
+ data={"text": plan_text},
+ )
+
+ yield StreamEvent(event="agent_event", data={"type": "executor_finished"})
diff --git a/gitpilot/agent_router.py b/gitpilot/agent_router.py
new file mode 100644
index 0000000000000000000000000000000000000000..4639bf2c47a9fb100c9b296e093e0fbcf45daade
--- /dev/null
+++ b/gitpilot/agent_router.py
@@ -0,0 +1,284 @@
+# gitpilot/agent_router.py
+"""Intelligent Agent Router for GitPilot.
+
+Classifies user requests and delegates them to the appropriate specialised
+agent (or a pipeline of agents). The router itself does **not** use an LLM;
+it relies on lightweight keyword / pattern matching so that routing is
+instantaneous and deterministic.
+
+The router returns a *WorkflowPlan* describing which agents should run and
+in what order. The actual agent execution is handled by the orchestrator
+in ``agentic.py``.
+"""
+from __future__ import annotations
+
+import re
+from dataclasses import dataclass, field
+from enum import Enum
+from typing import List, Optional
+
+
+class AgentType(str, Enum):
+ """Available specialised agents."""
+
+ EXPLORER = "explorer"
+ PLANNER = "planner"
+ CODE_WRITER = "code_writer"
+ CODE_REVIEWER = "code_reviewer"
+ ISSUE_MANAGER = "issue_manager"
+ PR_MANAGER = "pr_manager"
+ SEARCH = "search"
+ LEARNING = "learning"
+ LOCAL_EDITOR = "local_editor" # Phase 1: local file editing + shell
+ TERMINAL = "terminal" # Phase 1: dedicated terminal agent
+
+
+class RequestCategory(str, Enum):
+ """High-level intent category inferred from the user request."""
+
+ PLAN_EXECUTE = "plan_execute" # Existing explore -> plan -> execute workflow
+ ISSUE_MANAGEMENT = "issue_management"
+ PR_MANAGEMENT = "pr_management"
+ CODE_SEARCH = "code_search"
+ CODE_REVIEW = "code_review"
+ LEARNING = "learning"
+ CONVERSATIONAL = "conversational" # Free-form chat / Q&A about the repo
+ LOCAL_EDIT = "local_edit" # Phase 1: direct file editing with verification
+ TERMINAL = "terminal" # Phase 1: shell command execution
+
+
+@dataclass
+class WorkflowPlan:
+ """Describes which agents to invoke and in what order."""
+
+ category: RequestCategory
+ agents: List[AgentType]
+ description: str
+ requires_repo_context: bool = True
+ # If the request mentions a specific issue/PR number, capture it.
+ entity_number: Optional[int] = None
+ # Additional metadata extracted from the request.
+ metadata: dict = field(default_factory=dict)
+
+
+# ---------------------------------------------------------------------------
+# Pattern definitions (order matters -- first match wins)
+# ---------------------------------------------------------------------------
+
+_ISSUE_CREATE_RE = re.compile(
+ r"\b(create|open|new|file|add)\b.*\bissue\b", re.IGNORECASE
+)
+_ISSUE_UPDATE_RE = re.compile(
+ r"\b(update|modify|edit|change|close|reopen|label|assign|milestone)\b.*\bissue\b",
+ re.IGNORECASE,
+)
+_ISSUE_LIST_RE = re.compile(
+ r"\b(list|show|get|find|search)\b.*\bissues?\b", re.IGNORECASE
+)
+_ISSUE_COMMENT_RE = re.compile(
+ r"\b(comment|reply|respond)\b.*\bissue\b", re.IGNORECASE
+)
+_ISSUE_NUMBER_RE = re.compile(r"#(\d+)")
+
+_PR_CREATE_RE = re.compile(
+ r"\b(create|open|new|make)\b.*\b(pull request|pr|pull)\b", re.IGNORECASE
+)
+_PR_MERGE_RE = re.compile(
+ r"\b(merge|squash|rebase)\b.*\b(pull request|pr|pull)\b", re.IGNORECASE
+)
+_PR_REVIEW_RE = re.compile(
+ r"\b(review|approve|request changes)\b.*\b(pull request|pr|pull)\b",
+ re.IGNORECASE,
+)
+_PR_LIST_RE = re.compile(
+ r"\b(list|show|get|find)\b.*\b(pull requests?|prs?|pulls?)\b", re.IGNORECASE
+)
+
+_SEARCH_CODE_RE = re.compile(
+ r"\b(search|find|locate|grep|look for)\b.*\b(code|function|class|symbol|pattern|file)\b",
+ re.IGNORECASE,
+)
+_SEARCH_USER_RE = re.compile(
+ r"\b(search|find|who)\b.*\b(user|developer|org|organization|contributor)\b",
+ re.IGNORECASE,
+)
+_SEARCH_REPO_RE = re.compile(
+ r"\b(search|find|discover)\b.*\b(repo|repository|project)\b", re.IGNORECASE
+)
+
+_TERMINAL_RE = re.compile(
+ r"\b(run|execute|launch)\b.*\b(command|test|tests|script|build|lint|npm|pip|make|docker|pytest|cargo|go)\b",
+ re.IGNORECASE,
+)
+_LOCAL_EDIT_RE = re.compile(
+ r"\b(edit|modify|change|update|fix|write|rewrite|patch)\b.*\b(file|code|function|class|method|module|line|lines)\b",
+ re.IGNORECASE,
+)
+
+_REVIEW_RE = re.compile(
+ r"\b(review|analyze|audit|check|inspect)\b.*\b(code|quality|security|performance)\b",
+ re.IGNORECASE,
+)
+
+_LEARNING_RE = re.compile(
+ r"\b(how (do|can|to)|explain|what is|guide|tutorial|best practice|help with)\b",
+ re.IGNORECASE,
+)
+_GITHUB_TOPICS_RE = re.compile(
+ r"\b(actions?|workflow|ci/?cd|pages?|packages?|discussions?|authentication|deploy|release)\b",
+ re.IGNORECASE,
+)
+
+
+def _extract_issue_number(text: str) -> Optional[int]:
+ m = _ISSUE_NUMBER_RE.search(text)
+ if m:
+ return int(m.group(1))
+ # Also try "issue 42" / "issue number 42"
+ m2 = re.search(r"\bissue\s*(?:number\s*)?(\d+)\b", text, re.IGNORECASE)
+ return int(m2.group(1)) if m2 else None
+
+
+def _extract_pr_number(text: str) -> Optional[int]:
+ m = re.search(r"\b(?:pr|pull request|pull)\s*#?(\d+)\b", text, re.IGNORECASE)
+ return int(m.group(1)) if m else None
+
+
+# ---------------------------------------------------------------------------
+# Public API
+# ---------------------------------------------------------------------------
+
+def route(user_request: str) -> WorkflowPlan:
+ """Classify *user_request* and return a ``WorkflowPlan``."""
+ text = user_request.strip()
+
+ # --- Issue management ------------------------------------------------
+ if _ISSUE_CREATE_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.ISSUE_MANAGEMENT,
+ agents=[AgentType.ISSUE_MANAGER],
+ description="Create a new GitHub issue",
+ entity_number=_extract_issue_number(text),
+ metadata={"action": "create"},
+ )
+ if _ISSUE_COMMENT_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.ISSUE_MANAGEMENT,
+ agents=[AgentType.ISSUE_MANAGER],
+ description="Comment on an issue",
+ entity_number=_extract_issue_number(text),
+ metadata={"action": "comment"},
+ )
+ if _ISSUE_UPDATE_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.ISSUE_MANAGEMENT,
+ agents=[AgentType.ISSUE_MANAGER],
+ description="Update an existing issue",
+ entity_number=_extract_issue_number(text),
+ metadata={"action": "update"},
+ )
+ if _ISSUE_LIST_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.ISSUE_MANAGEMENT,
+ agents=[AgentType.ISSUE_MANAGER],
+ description="List or search issues",
+ metadata={"action": "list"},
+ )
+
+ # --- PR management ---------------------------------------------------
+ if _PR_CREATE_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.PR_MANAGEMENT,
+ agents=[AgentType.PR_MANAGER],
+ description="Create a pull request",
+ metadata={"action": "create"},
+ )
+ if _PR_MERGE_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.PR_MANAGEMENT,
+ agents=[AgentType.PR_MANAGER],
+ description="Merge a pull request",
+ entity_number=_extract_pr_number(text),
+ metadata={"action": "merge"},
+ )
+ if _PR_REVIEW_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.PR_MANAGEMENT,
+ agents=[AgentType.CODE_REVIEWER, AgentType.PR_MANAGER],
+ description="Review a pull request",
+ entity_number=_extract_pr_number(text),
+ metadata={"action": "review"},
+ )
+ if _PR_LIST_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.PR_MANAGEMENT,
+ agents=[AgentType.PR_MANAGER],
+ description="List pull requests",
+ metadata={"action": "list"},
+ )
+
+ # --- Code search -----------------------------------------------------
+ if _SEARCH_USER_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.CODE_SEARCH,
+ agents=[AgentType.SEARCH],
+ description="Search for GitHub users or organisations",
+ requires_repo_context=False,
+ metadata={"search_type": "users"},
+ )
+ if _SEARCH_REPO_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.CODE_SEARCH,
+ agents=[AgentType.SEARCH],
+ description="Search for repositories",
+ requires_repo_context=False,
+ metadata={"search_type": "repositories"},
+ )
+ if _SEARCH_CODE_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.CODE_SEARCH,
+ agents=[AgentType.SEARCH],
+ description="Search for code in the repository",
+ metadata={"search_type": "code"},
+ )
+
+ # --- Terminal / shell commands ----------------------------------------
+ if _TERMINAL_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.TERMINAL,
+ agents=[AgentType.TERMINAL],
+ description="Run shell commands in the workspace",
+ metadata={"action": "execute"},
+ )
+
+ # --- Local file editing -----------------------------------------------
+ if _LOCAL_EDIT_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.LOCAL_EDIT,
+ agents=[AgentType.LOCAL_EDITOR],
+ description="Edit files directly in the local workspace",
+ )
+
+ # --- Code review -----------------------------------------------------
+ if _REVIEW_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.CODE_REVIEW,
+ agents=[AgentType.EXPLORER, AgentType.CODE_REVIEWER],
+ description="Analyse code quality and suggest improvements",
+ )
+
+ # --- Learning & guidance ---------------------------------------------
+ if _LEARNING_RE.search(text) or _GITHUB_TOPICS_RE.search(text):
+ return WorkflowPlan(
+ category=RequestCategory.LEARNING,
+ agents=[AgentType.LEARNING],
+ description="Provide guidance on GitHub features or best practices",
+ requires_repo_context=False,
+ )
+
+ # --- Default: existing plan+execute workflow -------------------------
+ return WorkflowPlan(
+ category=RequestCategory.PLAN_EXECUTE,
+ agents=[AgentType.EXPLORER, AgentType.PLANNER, AgentType.CODE_WRITER],
+ description="Explore repository, create plan, and execute changes",
+ )
diff --git a/gitpilot/agent_teams.py b/gitpilot/agent_teams.py
new file mode 100644
index 0000000000000000000000000000000000000000..354e3a0578e8d398299d716efe8442db1dc593e6
--- /dev/null
+++ b/gitpilot/agent_teams.py
@@ -0,0 +1,263 @@
+# gitpilot/agent_teams.py
+"""Parallel multi-agent execution on git worktrees.
+
+Coordinates multiple agents working on independent subtasks simultaneously.
+Each agent operates on its own git worktree to avoid conflicts, and a lead
+agent reviews and merges the results.
+
+Architecture inspired by the MapReduce pattern and the *divide-and-conquer*
+approach from distributed systems research (Dean & Ghemawat, 2004).
+
+Workflow::
+
+ User: "Add authentication to the API"
+ Lead agent splits → 4 subtasks
+ ┌────────────┐ ┌────────────┐ ┌────────────┐ ┌────────────┐
+ │ Agent A: │ │ Agent B: │ │ Agent C: │ │ Agent D: │
+ │ User model │ │ Middleware │ │ Endpoints │ │ Tests │
+ │ worktree/a │ │ worktree/b │ │ worktree/c │ │ worktree/d │
+ └─────┬──────┘ └─────┬──────┘ └─────┬──────┘ └─────┬──────┘
+ └───────────┴───────────┴───────────┘
+ │
+ Lead reviews & merges
+"""
+from __future__ import annotations
+
+import asyncio
+import logging
+import uuid
+from dataclasses import dataclass, field
+from datetime import datetime, timezone
+from enum import Enum
+from pathlib import Path
+from typing import Any, Dict, List, Optional
+
+logger = logging.getLogger(__name__)
+
+
+class SubTaskStatus(str, Enum):
+ PENDING = "pending"
+ RUNNING = "running"
+ COMPLETED = "completed"
+ FAILED = "failed"
+
+
+@dataclass
+class SubTask:
+ """A single subtask to be executed by one agent."""
+
+ id: str = field(default_factory=lambda: uuid.uuid4().hex[:8])
+ title: str = ""
+ description: str = ""
+ assigned_agent: str = ""
+ files: List[str] = field(default_factory=list)
+ status: SubTaskStatus = SubTaskStatus.PENDING
+ result: str = ""
+ error: Optional[str] = None
+ worktree_path: Optional[Path] = None
+ started_at: Optional[str] = None
+ completed_at: Optional[str] = None
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "id": self.id,
+ "title": self.title,
+ "description": self.description,
+ "assigned_agent": self.assigned_agent,
+ "files": self.files,
+ "status": self.status.value,
+ "result": self.result,
+ "error": self.error,
+ "started_at": self.started_at,
+ "completed_at": self.completed_at,
+ }
+
+
+@dataclass
+class TeamResult:
+ """Aggregated result from parallel agent execution."""
+
+ task: str
+ subtasks: List[SubTask] = field(default_factory=list)
+ merge_status: str = "pending" # pending | merged | conflict | failed
+ conflicts: List[str] = field(default_factory=list)
+ summary: str = ""
+
+ @property
+ def all_completed(self) -> bool:
+ return all(s.status == SubTaskStatus.COMPLETED for s in self.subtasks)
+
+ @property
+ def any_failed(self) -> bool:
+ return any(s.status == SubTaskStatus.FAILED for s in self.subtasks)
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "task": self.task,
+ "subtasks": [s.to_dict() for s in self.subtasks],
+ "merge_status": self.merge_status,
+ "conflicts": self.conflicts,
+ "summary": self.summary,
+ "all_completed": self.all_completed,
+ "any_failed": self.any_failed,
+ }
+
+
+class AgentTeam:
+ """Coordinate multiple agents working in parallel.
+
+ Usage::
+
+ team = AgentTeam(workspace_path=Path("/repo"))
+ subtasks = team.plan_and_split("Add auth system", num_agents=4)
+ result = await team.execute_parallel(subtasks, executor_fn=my_agent_fn)
+ merge = await team.merge_results(result)
+ """
+
+ def __init__(self, workspace_path: Optional[Path] = None) -> None:
+ self.workspace_path = workspace_path
+ self._worktrees: List[Path] = []
+
+ def plan_and_split(
+ self,
+ task: str,
+ num_agents: int = 4,
+ subtask_descriptions: Optional[List[Dict[str, str]]] = None,
+ ) -> List[SubTask]:
+ """Split a task into independent subtasks.
+
+ If ``subtask_descriptions`` is provided, use those directly.
+ Otherwise, create generic subtasks from the task description.
+ """
+ subtasks = []
+
+ if subtask_descriptions:
+ for i, desc in enumerate(subtask_descriptions):
+ subtasks.append(SubTask(
+ title=desc.get("title", f"Subtask {i + 1}"),
+ description=desc.get("description", ""),
+ assigned_agent=desc.get("agent", f"agent_{i}"),
+ files=desc.get("files", []),
+ ))
+ else:
+ # Generic split — the LLM would normally do this
+ for i in range(min(num_agents, 8)):
+ subtasks.append(SubTask(
+ title=f"Part {i + 1} of {task}",
+ description=f"Handle part {i + 1} of the task: {task}",
+ assigned_agent=f"agent_{i}",
+ ))
+
+ return subtasks
+
+ async def execute_parallel(
+ self,
+ subtasks: List[SubTask],
+ executor_fn: Optional[Any] = None,
+ ) -> TeamResult:
+ """Execute subtasks in parallel.
+
+ ``executor_fn`` is an async callable(SubTask) -> str that runs the
+ agent logic for each subtask. If not provided, subtasks are marked
+ as completed with a placeholder result.
+ """
+ result = TeamResult(task="parallel_execution", subtasks=subtasks)
+
+ async def _run_subtask(subtask: SubTask) -> None:
+ subtask.status = SubTaskStatus.RUNNING
+ subtask.started_at = datetime.now(timezone.utc).isoformat()
+ try:
+ if executor_fn:
+ subtask.result = await executor_fn(subtask)
+ else:
+ subtask.result = f"Completed: {subtask.title}"
+ subtask.status = SubTaskStatus.COMPLETED
+ except Exception as e:
+ subtask.status = SubTaskStatus.FAILED
+ subtask.error = str(e)
+ logger.error("Subtask %s failed: %s", subtask.id, e)
+ finally:
+ subtask.completed_at = datetime.now(timezone.utc).isoformat()
+
+ # Run all subtasks concurrently
+ await asyncio.gather(*[_run_subtask(st) for st in subtasks])
+
+ return result
+
+ async def merge_results(self, team_result: TeamResult) -> TeamResult:
+ """Merge results from parallel execution.
+
+ In a full implementation, this would:
+ 1. Check for file conflicts between subtask outputs
+ 2. Use git merge-tree for conflict detection
+ 3. Have a lead agent resolve conflicts
+
+ For now, it aggregates results and detects file overlaps.
+ """
+ if team_result.any_failed:
+ team_result.merge_status = "failed"
+ failed = [s for s in team_result.subtasks if s.status == SubTaskStatus.FAILED]
+ team_result.summary = (
+ f"{len(failed)} subtask(s) failed: "
+ + ", ".join(f"{s.title} ({s.error})" for s in failed)
+ )
+ return team_result
+
+ # Detect file conflicts (same file modified by multiple agents)
+ file_owners: Dict[str, List[str]] = {}
+ for st in team_result.subtasks:
+ for f in st.files:
+ file_owners.setdefault(f, []).append(st.assigned_agent)
+
+ conflicts = [f for f, owners in file_owners.items() if len(owners) > 1]
+ team_result.conflicts = conflicts
+
+ if conflicts:
+ team_result.merge_status = "conflict"
+ team_result.summary = (
+ f"File conflicts detected in: {', '.join(conflicts)}. "
+ "Manual review required."
+ )
+ else:
+ team_result.merge_status = "merged"
+ completed = [s for s in team_result.subtasks if s.status == SubTaskStatus.COMPLETED]
+ team_result.summary = (
+ f"All {len(completed)} subtasks completed successfully. "
+ "No file conflicts detected."
+ )
+
+ return team_result
+
+ async def setup_worktrees(self, subtasks: List[SubTask], base_branch: str = "main") -> None:
+ """Create git worktrees for each subtask (requires workspace_path)."""
+ if not self.workspace_path:
+ return
+ for st in subtasks:
+ worktree_name = f"worktree-{st.id}"
+ worktree_path = self.workspace_path / ".worktrees" / worktree_name
+ branch_name = f"team/{st.id}"
+
+ proc = await asyncio.create_subprocess_exec(
+ "git", "worktree", "add", "-b", branch_name,
+ str(worktree_path), base_branch,
+ cwd=str(self.workspace_path),
+ stdout=asyncio.subprocess.PIPE,
+ stderr=asyncio.subprocess.PIPE,
+ )
+ await proc.communicate()
+ st.worktree_path = worktree_path
+ self._worktrees.append(worktree_path)
+
+ async def cleanup_worktrees(self) -> None:
+ """Remove all worktrees created by this team."""
+ if not self.workspace_path:
+ return
+ for wt in self._worktrees:
+ proc = await asyncio.create_subprocess_exec(
+ "git", "worktree", "remove", "--force", str(wt),
+ cwd=str(self.workspace_path),
+ stdout=asyncio.subprocess.PIPE,
+ stderr=asyncio.subprocess.PIPE,
+ )
+ await proc.communicate()
+ self._worktrees.clear()
diff --git a/gitpilot/agent_tools.py b/gitpilot/agent_tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..e0a34ea89e6a7f3fa2426fa9e7fbf80f282f08ef
--- /dev/null
+++ b/gitpilot/agent_tools.py
@@ -0,0 +1,336 @@
+"""
+Agent Tools for GitPilot Multi-Agent System
+Provides CrewAI-compatible tools for agents to explore and analyze repositories.
+"""
+import asyncio
+import threading
+from typing import Any, Dict, List, Optional, Tuple
+
+from crewai.tools import tool
+
+from .github_api import get_repo_tree, get_file
+
+
+def _sanitize_tool_arg(value: Any, fallback_key: str = "description") -> str:
+ """Fix CrewAI tool argument format bug.
+
+ Smaller LLMs (deepseek-r1, qwen, phi) sometimes send tool arguments
+ as a dict copying the schema definition instead of the actual value:
+ {"description": "README.md", "type": "str"}
+ instead of:
+ "README.md"
+
+ Worst case: the LLM copies the schema verbatim with a literal
+ ``"None"`` value (because the tool exposes ``description: None``):
+ {"description": "None", "type": "str"}
+
+ This helper unwraps every variant we have seen in production and
+ returns a plain string. Raises ``ValueError`` only when the value
+ cannot be recovered (e.g. the LLM passed a list or an empty dict)
+ so the caller can surface a clear error instead of querying
+ GitHub with a stringified Python dict.
+ """
+ if isinstance(value, str):
+ return value
+ if isinstance(value, dict):
+ # 1. Try the most likely human-supplied keys.
+ for key in (fallback_key, "description", "value", "default", "title", "path"):
+ v = value.get(key)
+ if isinstance(v, str) and v and v.lower() != "none":
+ return v
+ # 2. Any other string field on the dict that isn't the schema
+ # ``type`` marker.
+ for key, v in value.items():
+ if key in {"type", "anyOf", "format"}:
+ continue
+ if isinstance(v, str) and v and v.lower() != "none":
+ return v
+ raise ValueError(
+ f"tool argument arrived as a schema-shaped dict with no "
+ f"usable value (got keys: {sorted(value.keys())!r}). "
+ f"Pass the parameter as a plain string."
+ )
+ if value is None:
+ raise ValueError("tool argument is required but received None")
+ if isinstance(value, (list, tuple, set)):
+ raise ValueError(
+ f"tool argument expected a string, got a {type(value).__name__}; "
+ f"pass a single value, not a sequence."
+ )
+ return str(value)
+
+# Global context for current repository
+# Now includes 'token' to ensure tools can authenticate even in threads
+# AND includes 'branch' to ensure tools operate on the correct ref (not default HEAD/main)
+_current_repo_context: Dict[str, Any] = {}
+_context_lock = threading.RLock()
+
+
+def set_repo_context(
+ owner: str,
+ repo: str,
+ token: Optional[str] = None,
+ branch: Optional[str] = None,
+):
+ """Set the current repository context for tools."""
+ global _current_repo_context
+ with _context_lock:
+ _current_repo_context = {
+ "owner": owner,
+ "repo": repo,
+ "token": token,
+ "branch": branch or "HEAD",
+ }
+
+
+def get_repo_context() -> Tuple[str, str, Optional[str], str]:
+ """Get the current repository context including token and branch."""
+ with _context_lock:
+ owner = _current_repo_context.get("owner", "")
+ repo = _current_repo_context.get("repo", "")
+ token = _current_repo_context.get("token")
+ branch = _current_repo_context.get("branch", "HEAD")
+
+ if not owner or not repo:
+ raise ValueError("Repository context not set. Call set_repo_context first.")
+ return owner, repo, token, branch
+
+
+async def get_repository_context_summary(
+ owner: str,
+ repo: str,
+ token: Optional[str] = None,
+ branch: str = "HEAD",
+) -> Dict[str, Any]:
+ """Programmatically gather repository context."""
+ try:
+ # Pass token + ref explicitly
+ tree = await get_repo_tree(owner, repo, token=token, ref=branch)
+
+ if not tree:
+ return {
+ "all_files": [],
+ "total_files": 0,
+ "extensions": {},
+ "directories": set(),
+ "key_files": [],
+ }
+
+ all_files = [item["path"] for item in tree]
+ extensions: Dict[str, int] = {}
+ directories: set = set()
+ key_files: List[str] = []
+
+ for item in tree:
+ path = item["path"]
+ if "." in path:
+ ext = "." + path.rsplit(".", 1)[1]
+ extensions[ext] = extensions.get(ext, 0) + 1
+ if "/" in path:
+ directories.add(path.split("/")[0])
+
+ path_lower = path.lower()
+ if any(
+ k in path_lower
+ for k in ["readme", "package.json", "requirements.txt", "dockerfile", "makefile"]
+ ):
+ key_files.append(path)
+
+ return {
+ "all_files": all_files,
+ "total_files": len(all_files),
+ "extensions": extensions,
+ "directories": directories,
+ "key_files": key_files,
+ }
+
+ except Exception as e:
+ print(f"[Error] Failed to get repository context: {str(e)}")
+ return {"error": str(e), "total_files": 0}
+
+
+@tool("List all files in repository")
+def list_repository_files() -> str:
+ """Lists all files in the current repository."""
+ try:
+ owner, repo, token, branch = get_repo_context()
+
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ try:
+ # Pass token + ref explicitly
+ tree = loop.run_until_complete(get_repo_tree(owner, repo, token=token, ref=branch))
+ finally:
+ loop.close()
+
+ if not tree:
+ return f"Repository is empty - no files found. (Branch: {branch})"
+
+ result = f"Repository: {owner}/{repo} (Branch: {branch})\nFiles:\n"
+ for item in sorted(tree, key=lambda x: x["path"]):
+ result += f" - {item['path']}\n"
+ return result
+ except Exception as e:
+ return f"Error listing files: {str(e)}"
+
+
+@tool("Get directory structure")
+def get_directory_structure() -> str:
+ """Gets the hierarchical directory structure."""
+ try:
+ owner, repo, token, branch = get_repo_context()
+
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ try:
+ # Pass token + ref explicitly
+ tree = loop.run_until_complete(get_repo_tree(owner, repo, token=token, ref=branch))
+ finally:
+ loop.close()
+
+ if not tree:
+ return f"No files. (Branch: {branch})"
+
+ # Simple structure generation
+ paths = [t["path"] for t in tree]
+ return f"Structure for {owner}/{repo} (Branch: {branch}):\n" + "\n".join(sorted(paths))
+ except Exception as e:
+ return f"Error: {str(e)}"
+
+
+@tool("Read file content")
+def read_file(file_path: Any) -> str:
+ """Read the content of a file from the active repository.
+
+ file_path: the file's path relative to the repository root, e.g.
+ "README.md" or "src/main.py". Pass a plain string — do **not** pass
+ a dict like ``{"description": "...", "type": "str"}`` (that is the
+ parameter's schema, not its value).
+ """
+ file_path = _sanitize_tool_arg(file_path)
+ try:
+ owner, repo, token, branch = get_repo_context()
+
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ try:
+ # Pass token + ref explicitly
+ content = loop.run_until_complete(get_file(owner, repo, file_path, token=token, ref=branch))
+ finally:
+ loop.close()
+
+ return f"Content of {file_path}:\n---\n{content}\n---"
+ except Exception as e:
+ return f"Error reading file {file_path}: {str(e)}"
+
+
+@tool("Get repository summary")
+def get_repository_summary() -> str:
+ """Provides a comprehensive summary of the repository."""
+ try:
+ owner, repo, token, branch = get_repo_context()
+
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ try:
+ # Pass token + ref explicitly
+ tree = loop.run_until_complete(get_repo_tree(owner, repo, token=token, ref=branch))
+ finally:
+ loop.close()
+
+ return f"Summary for {owner}/{repo} (Branch: {branch}): {len(tree)} files found."
+ except Exception as e:
+ return f"Error: {str(e)}"
+
+
+# ---------------------------------------------------------------------------
+# Write tools — allow agents to create, update, and delete files via GitHub API
+# ---------------------------------------------------------------------------
+
+@tool("Write or update a file in the repository")
+def write_file(file_path: Any, content: Any, commit_message: Any) -> str:
+ """Create or update a file in the repository.
+
+ file_path: path relative to the repo root (plain string, e.g.
+ ``"src/main.py"``). content: the full new file content (plain
+ string). commit_message: a short imperative commit summary. Do
+ **not** wrap any of these in a ``{description, type}`` schema dict.
+ """
+ file_path = _sanitize_tool_arg(file_path)
+ content = _sanitize_tool_arg(content, fallback_key="value")
+ commit_message = _sanitize_tool_arg(commit_message, fallback_key="value")
+ try:
+ owner, repo, token, branch = get_repo_context()
+ from .github_api import put_file
+
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ try:
+ result = loop.run_until_complete(
+ put_file(owner, repo, file_path, content, commit_message, token=token, branch=branch)
+ )
+ finally:
+ loop.close()
+
+ sha = result.get("commit_sha", "")
+ return f"File '{file_path}' written successfully. Commit: {sha[:8]}"
+ except Exception as e:
+ return f"Error writing file {file_path}: {str(e)}"
+
+
+@tool("Delete a file from the repository")
+def delete_repo_file(file_path: Any, commit_message: Any) -> str:
+ """Delete a file from the repository.
+
+ file_path: the path relative to the repo root (plain string, e.g.
+ ``"docs/old.md"``). commit_message: a short imperative commit
+ summary. Both are plain strings — never wrap them in a schema dict.
+ """
+ file_path = _sanitize_tool_arg(file_path)
+ commit_message = _sanitize_tool_arg(commit_message, fallback_key="value")
+ try:
+ owner, repo, token, branch = get_repo_context()
+ from .github_api import delete_file
+
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ try:
+ result = loop.run_until_complete(
+ delete_file(owner, repo, file_path, commit_message, token=token, branch=branch)
+ )
+ finally:
+ loop.close()
+
+ sha = result.get("commit_sha", "")
+ return f"File '{file_path}' deleted. Commit: {sha[:8]}"
+ except Exception as e:
+ return f"Error deleting file {file_path}: {str(e)}"
+
+
+@tool("Create a new branch in the repository")
+def create_repo_branch(branch_name: str) -> str:
+ """Creates a new branch from the current HEAD."""
+ branch_name = _sanitize_tool_arg(branch_name)
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ from .github_api import create_branch
+
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ try:
+ loop.run_until_complete(
+ create_branch(owner, repo, branch_name, from_ref="HEAD", token=token)
+ )
+ finally:
+ loop.close()
+
+ return f"Branch '{branch_name}' created successfully."
+ except Exception as e:
+ if "already exists" in str(e).lower() or "422" in str(e):
+ return f"Branch '{branch_name}' already exists (OK to use)."
+ return f"Error creating branch: {str(e)}"
+
+
+# Export tools
+REPOSITORY_TOOLS = [list_repository_files, get_directory_structure, read_file, get_repository_summary]
+WRITE_TOOLS = [write_file, delete_repo_file, create_repo_branch]
diff --git a/gitpilot/agentic.py b/gitpilot/agentic.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c30ff93f484c10764d8e52fee73a82f2ec350a8
--- /dev/null
+++ b/gitpilot/agentic.py
@@ -0,0 +1,2229 @@
+from __future__ import annotations
+
+import asyncio
+import contextvars
+import logging
+from textwrap import dedent
+from typing import Any, Dict, List, Literal, Optional
+
+from pydantic import BaseModel, Field, ValidationError as _PydanticValidationError
+from .agent_router import AgentType, RequestCategory, WorkflowPlan, route as route_request
+from .context_pack import build_context_pack
+from .topology_registry import (
+ get_topology,
+ get_topology_graph,
+ classify_message,
+ get_saved_topology_preference,
+ ExecutionStyle,
+ RoutingStrategy,
+)
+from fastapi import HTTPException
+
+logger = logging.getLogger(__name__)
+
+
+# ---------------------------------------------------------------------------
+# Incompatible model detection
+# ---------------------------------------------------------------------------
+# Models that struggle with CrewAI's multi-agent ReAct format.
+# Two categories:
+# 1. REASONING models (deepseek-r1, qwq, marco-o1) — produce tokens
+# that break CrewAI's parser regardless of model size
+# 2. SMALL models (<7B params) — return empty responses when they can't
+# follow "Thought: Action: Action Input:" format
+#
+# All of these are auto-routed to Lite Mode for reliability.
+_INCOMPATIBLE_MODEL_PATTERNS = (
+ # Reasoning models (ALL sizes fail — the tag breaks ReAct parser)
+ "deepseek-r1",
+ "qwq",
+ "marco-o1",
+ "o1-",
+ # Small models (<7B)
+ "qwen2.5:0.5b", "qwen2.5:1.5b", "qwen2.5:3b",
+ "qwen2:0.5b", "qwen2:1.5b",
+ "llama3.2:1b", "llama3.2:3b",
+ "phi3:mini", "phi-3-mini", "phi3.5:mini", "phi3:3.8b",
+ "gemma:2b", "gemma2:2b",
+ "deepseek-coder:1.3b", "deepseek-coder:6.7b",
+ "tinyllama", "tinydolphin",
+ "stablelm2", "smollm", "granite3",
+)
+
+
+def _is_incompatible_model(settings) -> bool:
+ """Check if the active model is incompatible with multi-agent ReAct.
+
+ Uses substring matching so "deepseek-r1" catches all variants
+ (deepseek-r1:1.5b, deepseek-r1:7b, deepseek-r1:14b, deepseek-r1:latest).
+ """
+ try:
+ provider = str(getattr(settings, "provider", "")).lower()
+ # Only applies to local Ollama/OllaBridge providers — cloud APIs
+ # (OpenAI, Claude) have native tool-calling that handles this
+ if provider not in ("ollama", "ollabridge"):
+ return False
+
+ if provider == "ollama":
+ model = str(getattr(settings.ollama, "model", "")).lower()
+ else:
+ model = str(getattr(settings.ollabridge, "model", "")).lower()
+
+ for pattern in _INCOMPATIBLE_MODEL_PATTERNS:
+ if pattern in model:
+ return True
+ return False
+ except Exception:
+ return False
+
+
+def _split_repo_full_name(repo_full_name: str) -> tuple[str, str]:
+ """Safely split 'owner/repo' into (owner, repo).
+
+ Raises a clear ValueError if the input is missing, empty, or malformed.
+ This replaces `owner, repo = _split_repo_full_name(repo_full_name)` which produces
+ a cryptic "not enough values to unpack" error on folder/local-git
+ sessions that have no GitHub repository.
+ """
+ if not isinstance(repo_full_name, str) or not repo_full_name.strip():
+ raise ValueError(
+ "repo_full_name is required but was empty. "
+ "This session is not connected to a GitHub repository — "
+ "the multi-agent planner needs a repo in 'owner/repo' format. "
+ "Open the Workspace tab and add a repository before chatting."
+ )
+ parts = repo_full_name.strip().split("/")
+ if len(parts) != 2 or not all(p.strip() for p in parts):
+ raise ValueError(
+ f"repo_full_name must be in 'owner/repo' format, got: {repo_full_name!r}. "
+ "Example: 'octocat/hello-world'"
+ )
+ return parts[0].strip(), parts[1].strip()
+
+
+# ---------------------------------------------------------------------------
+# Resilient agent execution: timeout + circuit breaker
+# ---------------------------------------------------------------------------
+async def _guarded_agent_call(ctx, func, *, label: str = "agent"):
+ """Run a CrewAI kickoff in a thread with timeout and circuit breaker.
+
+ - Checks circuit breaker before starting.
+ - Applies a hard timeout (default 5 min, configurable via GITPILOT_AGENT_TIMEOUT).
+ - Records success/failure in the circuit breaker.
+ """
+ from .resilience import llm_circuit, run_with_timeout
+
+ if not llm_circuit.allow_request():
+ raise RuntimeError(
+ f"LLM provider circuit breaker is OPEN after repeated failures. "
+ f"Requests are temporarily rejected. Try again in "
+ f"{int(llm_circuit.recovery_timeout)}s."
+ )
+
+ try:
+ result = await run_with_timeout(
+ asyncio.to_thread(ctx.run, func),
+ label=label,
+ )
+ llm_circuit.record_success()
+ return result
+ except (TimeoutError, RuntimeError):
+ llm_circuit.record_failure()
+ raise
+ except Exception:
+ llm_circuit.record_failure()
+ raise
+
+
+# ---------------------------------------------------------------------------
+# Lazy-load heavy dependencies (CrewAI, tool modules, LLM provider)
+# so that importing this module does NOT block FastAPI startup on HF Spaces.
+# The actual import happens on first call to any agent function.
+# ---------------------------------------------------------------------------
+_crewai_cache: dict = {}
+
+
+def _crewai():
+ """Return cached CrewAI classes (Agent, Crew, Process, Task)."""
+ if not _crewai_cache:
+ from crewai import Agent, Crew, Process, Task # noqa: F811
+ _crewai_cache.update(Agent=Agent, Crew=Crew, Process=Process, Task=Task)
+ return _crewai_cache
+
+
+_tools_cache: dict = {}
+
+
+def _tools():
+ """Return cached tool collections (lazy-loaded on first use)."""
+ if not _tools_cache:
+ from .agent_tools import REPOSITORY_TOOLS, WRITE_TOOLS, set_repo_context, get_repository_context_summary
+ from .issue_tools import ISSUE_TOOLS
+ from .pr_tools import PR_TOOLS
+ from .search_tools import SEARCH_TOOLS
+ from .local_tools import LOCAL_TOOLS, LOCAL_FILE_TOOLS, LOCAL_GIT_TOOLS, LOCAL_SHELL_TOOLS
+ _tools_cache.update(
+ REPOSITORY_TOOLS=REPOSITORY_TOOLS,
+ WRITE_TOOLS=WRITE_TOOLS,
+ set_repo_context=set_repo_context,
+ get_repository_context_summary=get_repository_context_summary,
+ ISSUE_TOOLS=ISSUE_TOOLS,
+ PR_TOOLS=PR_TOOLS,
+ SEARCH_TOOLS=SEARCH_TOOLS,
+ LOCAL_TOOLS=LOCAL_TOOLS,
+ LOCAL_FILE_TOOLS=LOCAL_FILE_TOOLS,
+ LOCAL_GIT_TOOLS=LOCAL_GIT_TOOLS,
+ LOCAL_SHELL_TOOLS=LOCAL_SHELL_TOOLS,
+ )
+ return _tools_cache
+
+
+def _build_llm():
+ """Lazy-import and call build_llm."""
+ from .llm_provider import build_llm as _build
+ return _build()
+
+
+class PlanFile(BaseModel):
+ """Represents a file operation in a plan step."""
+ path: str
+ action: Literal["CREATE", "MODIFY", "DELETE", "READ"] = "MODIFY"
+
+
+class PlanStep(BaseModel):
+ """A single step in the execution plan."""
+ step_number: int
+ title: str
+ description: str
+ # Important: avoid mutable default list
+ files: List[PlanFile] = Field(default_factory=list)
+ risks: str | None = None
+
+
+class PlanResult(BaseModel):
+ """The complete execution plan."""
+ goal: str
+ summary: str
+ steps: List[PlanStep]
+
+
+# ---------------------------------------------------------------------------
+# Markdown-fence stripper for agent file-content output.
+#
+# The Code Writer agent's system prompt asks it to return ONLY the file
+# content, no markdown code blocks. In practice every small LLM and
+# even some large ones wrap the output in ``` ... ``` (and sometimes
+# ~~~ ... ~~~). This helper removes that wrapper before the content
+# is written to disk, including a few real-world variants the previous
+# inline logic missed:
+#
+# * tilde fences ``~~~python ... ~~~``
+# * fenced block with a leading language tag (``` ```python ... ``` ```)
+# * leading or trailing whitespace / blank lines outside the fence
+# * fenced block embedded in explanatory prose
+# ("Here is the file:\n```python\n...\n```\nLet me know if…")
+#
+# The fallback is the input unchanged — if no clear single fenced block
+# is found, we leave the content alone (better to commit slightly
+# wrapped content than to corrupt it by guessing).
+# ---------------------------------------------------------------------------
+
+_FENCE_BLOCK_RE = __import__("re").compile(
+ r"(?P```|~~~)[^\n]*\n(?P.*?)\n[ \t]*(?P=f)\s*$",
+ __import__("re").DOTALL | __import__("re").MULTILINE,
+)
+
+
+def _strip_markdown_fences(content: str) -> str:
+ """Strip a wrapping markdown code fence from agent-produced file
+ content. Returns the bare body when a clean fence pair is found;
+ returns the input unchanged otherwise."""
+ if not isinstance(content, str) or not content:
+ return content
+ stripped = content.strip()
+
+ # Fast path: the whole payload is one fenced block with nothing
+ # before it. Walk every fence occurrence and pick the largest body
+ # — this gives the right answer when the agent prepends a sentence
+ # like "Here is the file:".
+ best_body: str | None = None
+ for match in _FENCE_BLOCK_RE.finditer(stripped):
+ body = match.group("body")
+ if best_body is None or len(body) > len(best_body):
+ best_body = body
+ if best_body is not None:
+ return best_body
+
+ return stripped
+
+
+async def generate_plan(
+ goal: str,
+ repo_full_name: str,
+ token: str | None = None,
+ branch_name: str | None = None,
+) -> PlanResult:
+ """Agentic planning: create a structured plan but DO NOT modify the repo.
+
+ Two-phase approach:
+ 1) Explore and understand the repository (on the correct branch)
+ 2) Create a plan based on actual repository state
+ """
+ llm = _build_llm()
+
+ owner, repo = _split_repo_full_name(repo_full_name)
+
+ # CRITICAL: Set context INCLUDING branch so tools never fall back to HEAD/main
+ active_ref = branch_name or "HEAD"
+ _tools()["set_repo_context"](owner, repo, token=token, branch=active_ref)
+
+ # CONTEXT PACK: Load project context (conventions, active use case, asset chunks)
+ # This is additive — if nothing exists, context_pack is empty and agents behave as before.
+ from pathlib import Path as _P
+ workspace_path = _P.home() / ".gitpilot" / "workspaces" / owner / repo
+ context_pack = build_context_pack(workspace_path, query=goal)
+ if context_pack:
+ logger.info("[GitPilot] Context pack loaded (%d chars)", len(context_pack))
+
+ # PHASE 1: Explore repository (correct branch)
+ logger.info("[GitPilot] Phase 1: Exploring repository %s (ref=%s)...", repo_full_name, active_ref)
+
+ repo_context_data = await _tools()["get_repository_context_summary"](owner, repo, token=token, branch=active_ref)
+ logger.info(
+ "[GitPilot] Repository context gathered: %s files found (ref=%s)",
+ repo_context_data.get("total_files", 0),
+ active_ref,
+ )
+
+ explorer = _crewai()["Agent"](
+ role="Repository Explorer",
+ goal="Thoroughly explore and document the current state of the repository",
+ backstory=(
+ "You are a meticulous code archaeologist who explores repositories "
+ "to understand their complete structure before any changes are made. "
+ "You use all available tools to build a comprehensive picture."
+ ),
+ llm=llm,
+ tools=_tools()["REPOSITORY_TOOLS"],
+ verbose=True,
+ allow_delegation=False,
+ )
+
+ explore_task = _crewai()["Task"](
+ description=dedent(f"""
+ Repository: {repo_full_name}
+ Active Ref (branch/tag/SHA): {active_ref}
+
+ Your mission is to THOROUGHLY explore this repository and document its current state.
+ You MUST use your tools to gather the following information:
+
+ 1. Call "Get repository summary" - to get overall statistics
+ 2. Call "List all files in repository" - to see EVERY file that exists
+ 3. Call "Get directory structure" - to understand the organization
+ 4. If there are key files (README.md, package.json, etc.), read them
+
+ CRITICAL: You must ACTUALLY CALL these tools. Do not make assumptions.
+
+ After exploring, provide a detailed report in this EXACT format:
+
+ REPOSITORY EXPLORATION REPORT
+ =============================
+
+ Files Found: [list all file paths you discovered]
+
+ Key Files: [list important files like README.md, .gitignore, etc.]
+
+ Directory Structure: [describe the folder organization]
+
+ File Types: [count files by extension]
+
+ Your report MUST be based on ACTUAL tool calls, not assumptions.
+ """),
+ expected_output="A detailed exploration report listing ALL files found in the repository",
+ agent=explorer,
+ )
+
+ explore_crew = _crewai()["Crew"](
+ agents=[explorer],
+ tasks=[explore_task],
+ process=_crewai()["Process"].sequential,
+ verbose=True,
+ )
+
+ def _explore():
+ return explore_crew.kickoff()
+
+ # Propagate context to thread for CrewAI execution
+ ctx = contextvars.copy_context()
+ try:
+ exploration_result = await _guarded_agent_call(ctx, _explore, label="explore_repo")
+ except _PydanticValidationError as exc:
+ # Same failure mode as the planner-side validation error: the
+ # explorer's Final Answer didn't match the expected schema, so
+ # CrewAI's converter blew up before we could even ask the
+ # planner anything. Surface the same friendly message — the
+ # underlying agent-quality issue is identical.
+ logger.warning(
+ "[GitPilot] Explorer emitted output that failed schema "
+ "validation: %s",
+ (exc.errors()[0].get("msg") if exc.errors() else "(no detail)"),
+ )
+ raise RuntimeError(
+ "The repository explorer did not return a usable result. "
+ "This usually means the LLM lost its instruction format "
+ "(common with smaller / quantised models). Re-run the "
+ "request, or switch to a stronger LLM via Settings → Provider."
+ ) from exc
+
+ exploration_report = exploration_result.raw if hasattr(exploration_result, "raw") else str(exploration_result)
+ logger.info("[GitPilot] Exploration complete. Report length: %s chars", len(exploration_report))
+
+ # PHASE 2: Plan creation based on exploration
+ logger.info("[GitPilot] Phase 2: Creating plan based on repository exploration (ref=%s)...", active_ref)
+
+ # Build planner backstory with optional context pack injection
+ _planner_backstory = (
+ "You are an experienced staff engineer who creates plans based on FACTS, not assumptions. "
+ "You have received a complete exploration report of the repository. "
+ "You ONLY create plans for files that actually exist in the exploration report. "
+ "You are extremely careful with DELETE actions - you verify the file exists "
+ "and that it's not on the 'keep' list before marking it for deletion. "
+ "When users ask to delete files, you delete individual FILES, not directory names. "
+ "When users ask to ANALYZE files and GENERATE new content (code, docs, examples), "
+ "you create plans that READ existing files and CREATE new files with generated content. "
+ "You understand that 'analyze X and create Y' means: use tools to read X, then plan to CREATE Y. "
+ "You never make changes yourself, only create detailed plans."
+ )
+ if context_pack:
+ _planner_backstory += "\n\n" + context_pack
+
+ planner = _crewai()["Agent"](
+ role="Repository Refactor Planner",
+ goal=(
+ "Design safe, step-by-step refactor plans based on ACTUAL repository state "
+ "discovered during exploration"
+ ),
+ backstory=_planner_backstory,
+ llm=llm,
+ tools=_tools()["REPOSITORY_TOOLS"],
+ verbose=True,
+ allow_delegation=False,
+ )
+
+ plan_task = _crewai()["Task"](
+ description=dedent(f"""
+ User goal: {{goal}}
+ Repository: {repo_full_name}
+ Active Ref (branch/tag/SHA): {active_ref}
+
+ REPOSITORY EXPLORATION REPORT (CRITICAL CONTEXT):
+ ==================================================
+ {exploration_report}
+ ==================================================
+
+ Based on the ACTUAL files listed in the exploration report above, create a plan.
+
+ CRITICAL RULES FOR ANALYSIS AND GENERATION TASKS:
+ - If the goal mentions "analyze" or "generate" or "create examples/demos", you MUST create NEW files
+ - When the user asks to "analyze X and create Y":
+ * Step 1: Use "Read file content" tool to analyze existing files (if needed)
+ * Step 2: Plan CREATE actions for new files (e.g., demo.py, example.py, tutorial.md)
+ - NEW files can include: Python scripts, examples, demos, tutorials, documentation
+ - Examples of analysis tasks that should CREATE files:
+ * "analyze README and generate Python code" → CREATE: demo.py, example.py
+ * "create demo based on documentation" → CREATE: demo.py, test_example.py
+ * "generate tutorial from existing code" → CREATE: tutorial.md, examples/
+ - IMPORTANT: Empty plans (steps: []) are ONLY acceptable if the goal is purely informational
+ - If the user wants something generated/created, you MUST include CREATE actions
+
+ CRITICAL RULES FOR DELETION SCENARIOS:
+ - If the goal mentions "delete files" or "keep only", you MUST identify which files to DELETE
+ - For EACH file in the exploration report:
+ * If it should be KEPT (e.g., README.md if goal says "keep README.md"), do NOT include it in the plan
+ * If it should be DELETED (e.g., all other files), mark it with action "DELETE"
+ - ONLY delete files that actually exist (check the exploration report)
+ - NEVER delete files that the user wants to keep
+ - Be explicit: if the goal is "delete all files except README.md", then:
+ * README.md should NOT appear in your plan (it's being kept)
+ * ALL other files from the exploration report should have action "DELETE"
+
+ CRITICAL RULES FOR VERIFICATION:
+ - ONLY include files that appear in the exploration report
+ - For "CREATE" actions: file must NOT be in the exploration report
+ - For "MODIFY" or "DELETE" actions: file MUST be in the exploration report
+ - If you're unsure, you can still call your tools to double-check
+
+ Your FINAL ANSWER must be a single JSON object that matches exactly this schema:
+
+ {{
+ "goal": "string describing the goal",
+ "summary": "string with overall plan summary",
+ "steps": [
+ {{
+ "step_number": 1,
+ "title": "Step title",
+ "description": "What this step does",
+ "files": [
+ {{"path": "file/path.py", "action": "CREATE"}},
+ {{"path": "another/file.py", "action": "MODIFY"}},
+ {{"path": "old/file.py", "action": "DELETE"}},
+ {{"path": "README.md", "action": "READ"}}
+ ],
+ "risks": "Optional risk description or null"
+ }}
+ ]
+ }}
+
+ CRITICAL JSON RULES:
+ - Output MUST be valid JSON.
+ - STRICTLY NO COMMENTS allowed (no // or #).
+ - Double quotes around all keys and string values.
+ - No trailing commas.
+ - "action" MUST be exactly one of: "CREATE", "MODIFY", "DELETE", "READ"
+ - "step_number" MUST be an integer starting from 1
+ - "risks" can be either a string or null (the JSON null value, without quotes)
+ - Do NOT wrap the JSON in markdown code fences
+ - Do NOT add any explanation before or after the JSON
+ - The ENTIRE response MUST be ONLY the JSON object, starting with '{{' and ending with '}}'
+ """),
+ expected_output=dedent("""
+ A single valid JSON object matching the PlanResult schema:
+ - goal: string
+ - summary: string
+ - steps: array of objects, each with:
+ - step_number: integer
+ - title: string
+ - description: string
+ - files: array of { "path": string, "action": "CREATE" | "MODIFY" | "DELETE" | "READ" }
+ - risks: string or null
+ The response must contain ONLY pure JSON (no markdown, no prose, no code fences, NO COMMENTS).
+ """),
+ agent=planner,
+ output_pydantic=PlanResult,
+ )
+
+ plan_crew = _crewai()["Crew"](
+ agents=[planner],
+ tasks=[plan_task],
+ process=_crewai()["Process"].sequential,
+ verbose=True,
+ )
+
+ def _plan():
+ return plan_crew.kickoff(inputs={"goal": goal})
+
+ ctx = contextvars.copy_context()
+ try:
+ result = await _guarded_agent_call(ctx, _plan, label="generate_plan")
+ except _PydanticValidationError as exc:
+ # CrewAI tried to coerce the planner's Final Answer into the
+ # ``PlanResult`` schema and failed. We have seen two real
+ # production payloads cause this:
+ #
+ # 1. The agent emitted a ReAct-format "Thought / Action /
+ # Action Input" block instead of JSON (its instruction
+ # formatting collapsed). CrewAI's converter still tries
+ # to find a ``{...}`` substring, lands on ``Input: {}``,
+ # validates that, and Pydantic complains:
+ # "3 validation errors for PlanResult: goal / summary
+ # / steps - Field required"
+ #
+ # 2. The agent returned plain refusal prose with an empty
+ # ``{}`` somewhere in it.
+ #
+ # Both cases are agent-quality failures, not user errors.
+ # Translate to the same friendly RuntimeError surface the
+ # refusal path already uses so the UI shows "couldn't produce
+ # a plan" rather than a 500 with a Pydantic traceback.
+ logger.warning(
+ "[GitPilot] Planner emitted output that failed PlanResult "
+ "validation (%d error%s). First error: %s",
+ len(exc.errors()),
+ "" if len(exc.errors()) == 1 else "s",
+ (exc.errors()[0].get("msg") if exc.errors() else "(no detail)"),
+ )
+ raise RuntimeError(
+ "The planner did not return a valid plan structure. This "
+ "usually means the LLM lost its instruction format mid-task "
+ "(common with smaller / quantised models). Re-run the "
+ "request, or switch to a stronger LLM via Settings → Provider."
+ ) from exc
+
+ # ------------------------------------------------------------------
+ # Post-hoc guards — catch the failure mode where the planner LLM
+ # returns either a refusal or a hallucinated stock plan that has
+ # nothing to do with the user's repository.
+ # ------------------------------------------------------------------
+ from .plan_guards import (
+ PlanHallucinationError,
+ assess_plan,
+ detect_refusal,
+ enrich_plan_with_reads,
+ )
+
+ refusal = detect_refusal(result)
+ if refusal is not None:
+ logger.warning(
+ "[GitPilot] Planner returned a refusal-shaped response (%r); "
+ "treating as failure rather than rendering a hallucinated plan.",
+ refusal,
+ )
+ raise RuntimeError(
+ "The planner refused to produce a plan. This usually means "
+ "the explorer could not read repository content. Re-run the "
+ "request, or switch to a stronger LLM via Settings → Provider."
+ )
+
+ if hasattr(result, "pydantic") and result.pydantic:
+ plan = result.pydantic
+ logger.info("[GitPilot] Plan created with %s steps (ref=%s)", len(plan.steps), active_ref)
+
+ # Cross-check the plan against the real repo file list. Suspicious
+ # placeholder-shaped paths combined with a 0% hit-rate on
+ # MODIFY/DELETE actions strongly suggests the planner hallucinated
+ # a generic stock plan rather than working from the actual repo.
+ try:
+ repo_files: list[str] = []
+ tools_cache = _tools()
+ owner, repo, token, branch = await _resolve_repo_target(tools_cache)
+ if owner and repo:
+ ctx_summary = await tools_cache["get_repository_context_summary"](
+ owner, repo, token=token, branch=branch,
+ )
+ repo_files = list(ctx_summary.get("all_files", []) or [])
+ except Exception:
+ logger.debug("[GitPilot] could not fetch repo file list for plausibility check", exc_info=True)
+ repo_files = []
+
+ if repo_files:
+ # Small / quantised LLMs (llama3:8b is the canonical case)
+ # consistently drop READ entries from plan steps even when
+ # the step's description clearly says "Read the content of
+ # README.md". Enrich the plan before the plausibility
+ # check so the Action Plan card surfaces the complete set
+ # of files the agent will touch — both the READ inputs and
+ # the CREATE / MODIFY / DELETE outputs.
+ added_reads = enrich_plan_with_reads(plan, repo_files)
+ if added_reads:
+ logger.info(
+ "[GitPilot] Auto-injected %d READ entr%s based on plan "
+ "step descriptions (small-model READ-drop mitigation).",
+ added_reads, "y" if added_reads == 1 else "ies",
+ )
+
+ assessment = assess_plan(plan, repo_files)
+ if assessment.hallucinated:
+ logger.warning(
+ "[GitPilot] Plausibility check failed (suspicious=%s, hit_ratio=%.2f); "
+ "treating plan as hallucinated.",
+ len(assessment.suspicious_paths), assessment.hit_ratio,
+ )
+ raise PlanHallucinationError(
+ "The planner produced paths that do not match this "
+ "repository. Re-run the request, or switch to a "
+ "stronger LLM via Settings → Provider.",
+ assessment=assessment,
+ )
+
+ return plan
+
+ logger.warning("[GitPilot] Unexpected planning result type: %r", type(result))
+ return result
+
+
+async def _resolve_repo_target(tools_cache: dict) -> tuple[str, str, str | None, str | None]:
+ """Best-effort lookup of (owner, repo, token, branch) for the active
+ planning session. Returns empty strings when the context is not
+ available — callers must tolerate that and skip the plausibility
+ check rather than fail."""
+ try:
+ from .agent_tools import get_repo_context
+ owner, repo, token, branch = get_repo_context()
+ return owner, repo, token, branch
+ except Exception:
+ return "", "", None, None
+
+
+# ============================================================================
+# Lite Mode — Simplified single-agent for small LLMs (< 7B parameters)
+# ============================================================================
+
+# Regex-based intent classifier — no LLM needed, runs instantly.
+_QUESTION_PATTERNS = [
+ r"\b(what|which|where|how|why|who|when|does|is|are|can|could|tell|show|list|describe|explain|summarize|overview)\b",
+ r"\?$",
+]
+_ACTION_PATTERNS = [
+ r"\b(create|add|delete|remove|modify|change|update|rename|fix|write|implement|refactor|move|generate code)\b",
+]
+
+
+def _classify_lite_intent(goal: str) -> str:
+ """Classify user intent as 'question' or 'action' using regex only."""
+ import re as _re
+ goal_lower = goal.strip().lower()
+
+ action_score = sum(1 for p in _ACTION_PATTERNS if _re.search(p, goal_lower))
+ question_score = sum(1 for p in _QUESTION_PATTERNS if _re.search(p, goal_lower))
+
+ # Action words dominate — user wants to change something
+ if action_score > 0 and action_score >= question_score:
+ return "action"
+ return "question"
+
+
+async def _lite_prefetch_context(
+ owner: str,
+ repo: str,
+ token: str | None,
+ branch: str,
+ key_file_limit: int = 3,
+) -> str:
+ """Pre-fetch repo context programmatically and format as plain text.
+
+ Returns a string ready to inject into the LLM prompt. No LLM
+ tool-calling involved — everything comes from the GitHub API.
+ """
+ from .github_api import get_file as _get_file
+
+ ctx = await _tools()["get_repository_context_summary"](owner, repo, token=token, branch=branch)
+
+ all_files = ctx.get("all_files", [])
+ extensions = ctx.get("extensions", {})
+ directories = ctx.get("directories", set())
+ key_files = ctx.get("key_files", [])
+
+ parts = []
+
+ # File listing (cap at 80 to stay within small-model context)
+ if all_files:
+ shown = all_files[:80]
+ file_lines = "\n".join(f" {f}" for f in shown)
+ parts.append(f"Files ({len(all_files)} total):\n{file_lines}")
+ if len(all_files) > 80:
+ parts.append(f" ... and {len(all_files) - 80} more")
+ else:
+ parts.append("Files: (none found)")
+
+ # Extensions summary
+ if extensions:
+ ext_str = ", ".join(f"{ext} ({n})" for ext, n in sorted(extensions.items(), key=lambda x: -x[1])[:10])
+ parts.append(f"File types: {ext_str}")
+
+ # Top-level directories
+ if directories:
+ dir_list = sorted(directories)[:15]
+ parts.append(f"Top directories: {', '.join(dir_list)}")
+
+ # Read content of key files (README, etc.) — give LLM real context
+ for kf in key_files[:key_file_limit]:
+ try:
+ content = await _get_file(owner, repo, kf, token=token, ref=branch)
+ # Truncate to keep prompt small for 1.5B models
+ snippet = content[:1500] if content else ""
+ if snippet:
+ parts.append(f"--- {kf} ---\n{snippet}")
+ if len(content) > 1500:
+ parts.append(f" [truncated, {len(content)} chars total]")
+ except Exception:
+ pass # File unreadable — skip silently
+
+ return "\n\n".join(parts)
+
+
+async def generate_plan_lite(
+ goal: str,
+ repo_full_name: str,
+ token: str | None = None,
+ branch_name: str | None = None,
+) -> PlanResult:
+ """Lite Mode planning: smart intent detection + single agent + pre-fetched context.
+
+ The topology is:
+ 1. Classify intent (regex — instant, no LLM)
+ 2. Pre-fetch repo context from GitHub API (no LLM tool-calling)
+ 3. Build a short, focused prompt based on intent type
+ 4. Single LLM call → parse response
+
+ For QUESTION intents: LLM answers directly, plan has 0 file actions.
+ For ACTION intents: LLM lists file changes, plan has file actions.
+ """
+ llm = _build_llm()
+
+ owner, repo = _split_repo_full_name(repo_full_name)
+ active_ref = branch_name or "HEAD"
+ _tools()["set_repo_context"](owner, repo, token=token, branch=active_ref)
+
+ intent = _classify_lite_intent(goal)
+ logger.info("[GitPilot Lite] Intent: %s | Goal: %s", intent, goal[:80])
+
+ # PRE-FETCH: real data from GitHub API
+ logger.info("[GitPilot Lite] Pre-fetching context for %s (ref=%s)...", repo_full_name, active_ref)
+ context_text = await _lite_prefetch_context(owner, repo, token, active_ref)
+
+ # BUILD PROMPT based on intent
+ if intent == "question":
+ lite_prompt = (
+ f"Repository: {repo_full_name} (branch: {active_ref})\n\n"
+ f"{context_text}\n\n"
+ f"Question: {goal}\n\n"
+ f"Answer the question based on the repository information above. "
+ f"Be specific — mention actual file names and directories you can see."
+ )
+ expected = "A direct answer to the user's question about the repository"
+ else:
+ lite_prompt = (
+ f"Repository: {repo_full_name} (branch: {active_ref})\n\n"
+ f"{context_text}\n\n"
+ f"Task: {goal}\n\n"
+ f"You MUST respond with ONLY a list of file actions. One per line.\n"
+ f"Format: ACTION filepath\n"
+ f"ACTION is one of: CREATE, MODIFY, DELETE\n\n"
+ f"Examples:\n"
+ f"DELETE demo.py\n"
+ f"DELETE example.py\n"
+ f"CREATE src/main.py\n"
+ f"MODIFY README.md\n\n"
+ f"Rules:\n"
+ f"- Only use MODIFY or DELETE for files that EXIST in the repository.\n"
+ f"- Only use CREATE for NEW files that do not exist yet.\n"
+ f"- Do NOT add explanations. ONLY output ACTION lines.\n"
+ f"- Output NOTHING else — no comments, no code, no explanations."
+ )
+ expected = "ONLY action lines like: DELETE demo.py"
+
+ lite_agent = _crewai()["Agent"](
+ role="GitPilot Lite",
+ goal="Help the user with their repository",
+ backstory="You are a helpful coding assistant. Be concise.",
+ llm=llm,
+ tools=[],
+ verbose=True,
+ allow_delegation=False,
+ )
+
+ lite_task = _crewai()["Task"](
+ description=lite_prompt,
+ expected_output=expected,
+ agent=lite_agent,
+ )
+
+ lite_crew = _crewai()["Crew"](
+ agents=[lite_agent],
+ tasks=[lite_task],
+ process=_crewai()["Process"].sequential,
+ verbose=True,
+ )
+
+ def _run_lite():
+ return lite_crew.kickoff()
+
+ ctx = contextvars.copy_context()
+ result = await _guarded_agent_call(ctx, _run_lite, label="lite_mode")
+
+ raw_text = result.raw if hasattr(result, "raw") else str(result)
+ logger.info("[GitPilot Lite] Response (%d chars, intent=%s)", len(raw_text), intent)
+
+ # PARSE RESPONSE based on intent
+ if intent == "question":
+ # Pure Q&A — no file actions, just wrap the answer.
+ # summary = full answer text (shown in the "Answer" section of the chat)
+ return PlanResult(
+ goal=goal,
+ summary=raw_text,
+ steps=[PlanStep(
+ step_number=1,
+ title="Answer",
+ description=raw_text,
+ files=[],
+ risks=None,
+ )],
+ )
+
+ # Action intent — parse ACTION lines
+ import re as _re
+ action_pattern = _re.compile(r"^(CREATE|MODIFY|DELETE)\s+(\S+)", _re.MULTILINE)
+ matches = action_pattern.findall(raw_text)
+
+ # Strip raw ACTION lines from description to get the human-readable parts
+ clean_description = _re.sub(
+ r"^(CREATE|MODIFY|DELETE)\s+\S+.*$", "", raw_text, flags=_re.MULTILINE,
+ ).strip()
+
+ # Get actual repo files for validation
+ repo_ctx = await _tools()["get_repository_context_summary"](owner, repo, token=token, branch=active_ref)
+ real_files = set(repo_ctx.get("all_files", []))
+
+ # ── Fuzzy fallback: if the LLM didn't use ACTION format, try to infer ──
+ if not matches and real_files:
+ logger.info("[GitPilot Lite] No ACTION lines found — trying fuzzy extraction")
+ goal_lower = goal.strip().lower()
+ response_lower = raw_text.lower()
+
+ # Pattern: "delete all files except X"
+ except_match = _re.search(
+ r"(?:delete|remove)\s+(?:all\s+)?(?:files?\s+)?(?:except|but|besides|other\s+than)\s+(.+)",
+ goal_lower,
+ )
+ if except_match:
+ keep_raw = except_match.group(1).strip()
+ keep_files = {f.strip().rstrip(",.") for f in _re.split(r"[,\s]+and\s+|,\s*|\s+", keep_raw) if f.strip()}
+ for f in real_files:
+ fname = f.rsplit("/", 1)[-1] if "/" in f else f
+ if f not in keep_files and fname not in keep_files:
+ matches.append(("DELETE", f))
+ if matches:
+ logger.info("[GitPilot Lite] Fuzzy: keep=%s, delete=%d files", keep_files, len(matches))
+
+ # Pattern: LLM mentions specific filenames with delete/remove verbs
+ if not matches:
+ for verb in ("delete", "remove", "rm", "git rm"):
+ for f in real_files:
+ if f in response_lower or f in goal_lower:
+ if verb in response_lower or verb in goal_lower:
+ matches.append(("DELETE", f))
+
+ # Pattern: LLM mentions files with create/add verbs
+ if not matches:
+ create_match = _re.findall(r"(?:create|add|write|generate)\s+(\S+\.(?:py|js|ts|md|txt|yaml|json|sh))", goal_lower)
+ for path in create_match:
+ if path not in real_files:
+ matches.append(("CREATE", path))
+
+ valid_files = []
+ for action, path in matches:
+ path = path.strip().rstrip(",-:")
+ if action in ("MODIFY", "DELETE"):
+ if path in real_files:
+ valid_files.append(PlanFile(path=path, action=action))
+ else:
+ logger.warning("[GitPilot Lite] Skipping %s %s — file not in repo", action, path)
+ elif action == "CREATE":
+ if path not in real_files:
+ valid_files.append(PlanFile(path=path, action=action))
+
+ steps = []
+ if valid_files:
+ # Build a clean summary: "Create 2 files, modify 1 file"
+ counts = {}
+ for f in valid_files:
+ counts[f.action] = counts.get(f.action, 0) + 1
+ action_labels = {"CREATE": "create", "MODIFY": "modify", "DELETE": "delete"}
+ summary_parts = []
+ for act in ("CREATE", "MODIFY", "DELETE"):
+ n = counts.get(act, 0)
+ if n > 0:
+ label = action_labels[act]
+ summary_parts.append(f"{label} {n} file{'s' if n > 1 else ''}")
+ clean_summary = "Plan: " + ", ".join(summary_parts) + "."
+
+ # Use the clean description if available, otherwise a generic one
+ step_desc = clean_description if clean_description else f"Apply changes to {len(valid_files)} file(s) in {repo_full_name}."
+
+ steps.append(PlanStep(
+ step_number=1,
+ title="Execute changes",
+ description=step_desc,
+ files=valid_files,
+ risks=None,
+ ))
+ return PlanResult(goal=goal, summary=clean_summary, steps=steps)
+
+ # No valid files after validation — the LLM hallucinated paths.
+ # Return as a Q&A-style answer (no Action Plan section shown in UI).
+ fallback_text = clean_description if clean_description else raw_text
+ # Strip any remaining ACTION-like artifacts
+ fallback_text = _re.sub(r"\bACTION\b", "", fallback_text).strip()
+ if not fallback_text:
+ fallback_text = (
+ f"I analyzed {repo_full_name} but couldn't determine specific file "
+ f"changes for your request. The repository has {len(real_files)} file(s). "
+ f"Try being more specific about what you'd like to create or modify."
+ )
+
+ steps.append(PlanStep(
+ step_number=1,
+ title="Analysis",
+ description=fallback_text,
+ files=[],
+ risks=None,
+ ))
+ return PlanResult(goal=goal, summary=fallback_text, steps=steps)
+
+
+async def execute_plan_lite(
+ plan: PlanResult,
+ repo_full_name: str,
+ token: str | None = None,
+ branch_name: str | None = None,
+) -> dict:
+ """Lite Mode execution: single agent generates file content with simplified prompts.
+
+ Unlike the standard execute_plan, the Lite version:
+ - Uses a single short prompt per file (no CRITICAL INSTRUCTIONS blocks)
+ - Does not require the LLM to call tools
+ - Pre-reads existing file content and injects it into the prompt
+ """
+ from .github_api import get_file, put_file, create_branch, get_repo
+ import re
+ import time
+
+ owner, repo = _split_repo_full_name(repo_full_name)
+ execution_steps: list[dict] = []
+ llm = _build_llm()
+
+ if branch_name is None:
+ sanitized = re.sub(r"[^a-z0-9-]+", "-", plan.goal.lower())
+ sanitized = sanitized[:40].strip("-")
+ timestamp = str(int(time.time()))[-6:]
+ branch_name = f"gitpilot-{sanitized}-{timestamp}"
+
+ try:
+ await create_branch(owner, repo, branch_name, from_ref="HEAD", token=token)
+ except HTTPException:
+ pass # Branch may already exist
+
+ _tools()["set_repo_context"](owner, repo, token=token, branch=branch_name)
+
+ for step in plan.steps:
+ step_summary = f"Step {step.step_number}: {step.title}"
+
+ for file in step.files:
+ try:
+ if file.action == "CREATE":
+ # SIMPLIFIED PROMPT for small LLMs
+ create_prompt = (
+ f"Write the content for a new file: {file.path}\n"
+ f"Goal: {plan.goal}\n"
+ f"Context: {step.description[:300]}\n\n"
+ f"Return ONLY the file content, nothing else."
+ )
+
+ lite_agent = _crewai()["Agent"](
+ role="Code Writer",
+ goal="Write file content",
+ backstory="You write clean, working code.",
+ llm=llm, tools=[], verbose=False, allow_delegation=False,
+ )
+ task = _crewai()["Task"](
+ description=create_prompt,
+ expected_output=f"Content for {file.path}",
+ agent=lite_agent,
+ )
+ crew = _crewai()["Crew"](agents=[lite_agent], tasks=[task], process=_crewai()["Process"].sequential, verbose=False)
+
+ def _create():
+ r = crew.kickoff()
+ return r.raw if hasattr(r, "raw") else str(r)
+
+ ctx = contextvars.copy_context()
+ content = await _guarded_agent_call(ctx, _create, label="create_file")
+ content = _strip_markdown_fences(content)
+
+ await put_file(owner, repo, file.path, content,
+ f"GitPilot Lite: Create {file.path}", token=token, branch=branch_name)
+ step_summary += f"\n + Created {file.path}"
+
+ elif file.action == "MODIFY":
+ try:
+ existing = await get_file(owner, repo, file.path, token=token, ref=branch_name)
+ modify_prompt = (
+ f"Modify this file: {file.path}\n"
+ f"Goal: {plan.goal}\n"
+ f"What to change: {step.description[:300]}\n\n"
+ f"Current content:\n{existing[:2000]}\n\n"
+ f"Return the complete modified file content, nothing else."
+ )
+
+ lite_agent = _crewai()["Agent"](
+ role="Code Writer",
+ goal="Modify file content",
+ backstory="You write clean, working code.",
+ llm=llm, tools=[], verbose=False, allow_delegation=False,
+ )
+ task = _crewai()["Task"](description=modify_prompt, expected_output=f"Modified {file.path}", agent=lite_agent)
+ crew = _crewai()["Crew"](agents=[lite_agent], tasks=[task], process=_crewai()["Process"].sequential, verbose=False)
+
+ def _modify():
+ r = crew.kickoff()
+ return r.raw if hasattr(r, "raw") else str(r)
+
+ ctx = contextvars.copy_context()
+ modified = await _guarded_agent_call(ctx, _modify, label="modify_file")
+ modified = modified.strip()
+ if modified.startswith("```"):
+ lines = modified.split("\n")
+ if lines[-1].strip() == "```":
+ modified = "\n".join(lines[1:-1])
+ else:
+ modified = "\n".join(lines[1:])
+
+ await put_file(owner, repo, file.path, modified,
+ f"GitPilot Lite: Modify {file.path}", token=token, branch=branch_name)
+ step_summary += f"\n ~ Modified {file.path}"
+ except Exception as e:
+ logger.exception("Lite: Failed to modify %s: %s", file.path, e)
+ step_summary += f"\n ! Failed to modify {file.path}: {e}"
+
+ elif file.action == "DELETE":
+ from .github_api import delete_file
+ try:
+ await delete_file(owner, repo, file.path,
+ f"GitPilot Lite: Delete {file.path}", token=token, branch=branch_name)
+ step_summary += f"\n - Deleted {file.path}"
+ except Exception as e:
+ logger.exception("Lite: Failed to delete %s: %s", file.path, e)
+ step_summary += f"\n ! Failed to delete {file.path}: {e}"
+
+ elif file.action == "READ":
+ step_summary += f"\n i Inspected {file.path}"
+
+ except Exception as e:
+ logger.exception("Lite: Error processing %s: %s", file.path, e)
+ step_summary += f"\n ! Error: {file.path}: {e}"
+
+ execution_steps.append({"step_number": step.step_number, "summary": step_summary})
+
+ return {
+ "status": "completed",
+ "message": f"Lite Mode: executed {len(plan.steps)} steps on {repo_full_name} (branch '{branch_name}')",
+ "branch": branch_name,
+ "branch_url": f"https://github.com/{repo_full_name}/tree/{branch_name}",
+ "executionLog": {"steps": execution_steps},
+ "lite_mode": True,
+ }
+
+
+async def execute_plan(
+ plan: PlanResult,
+ repo_full_name: str,
+ token: str | None = None,
+ branch_name: str | None = None,
+) -> dict:
+ """Execute the approved plan by applying changes to the GitHub repository."""
+ from .github_api import get_file, put_file, create_branch, get_repo
+ import re
+ import time
+
+ owner, repo = _split_repo_full_name(repo_full_name)
+ execution_steps: list[dict] = []
+ llm = _build_llm()
+
+ if branch_name is None:
+ sanitized = re.sub(r"[^a-z0-9-]+", "-", plan.goal.lower())
+ sanitized = sanitized[:40].strip("-")
+ timestamp = str(int(time.time()))[-6:]
+ branch_name = f"gitpilot-{sanitized}-{timestamp}"
+
+ try:
+ logger.info("[GitPilot] Creating feature branch: %s", branch_name)
+ await create_branch(owner, repo, branch_name, from_ref="HEAD", token=token)
+ logger.info("[GitPilot] Branch created successfully: %s", branch_name)
+ except HTTPException as e:
+ logger.warning(
+ "[GitPilot] Branch %s already exists or creation failed: %s. Attempting to use existing branch.",
+ branch_name,
+ e.detail,
+ )
+
+ # CRITICAL: ensure tools read from the ACTIVE execution branch
+ _tools()["set_repo_context"](owner, repo, token=token, branch=branch_name)
+
+ code_writer = _crewai()["Agent"](
+ role="Expert Code Writer",
+ goal="Generate high-quality, production-ready code and documentation based on requirements.",
+ backstory=(
+ "You are a senior software engineer with expertise in multiple programming languages. "
+ "You write clean, well-documented, and functional code. "
+ "You understand context and generate appropriate content for each file type. "
+ "For documentation files (README.md, docs, etc.), you write clear, comprehensive content. "
+ "For code files, you follow best practices and include proper comments. "
+ "IMPORTANT: You ALWAYS use repository exploration tools before creating new content. "
+ "When asked to create demos/examples/tutorials, you first READ the existing files to understand "
+ "the project, then generate content that is relevant and accurate. "
+ "You never create generic examples - you create content specific to THIS repository."
+ ),
+ llm=llm,
+ tools=_tools()["REPOSITORY_TOOLS"],
+ verbose=True,
+ allow_delegation=False,
+ )
+
+ for step in plan.steps:
+ step_summary = f"Step {step.step_number}: {step.title}"
+
+ for file in step.files:
+ try:
+ if file.action == "CREATE":
+ create_task = _crewai()["Task"](
+ description=(
+ f"Generate complete content for a new file: {file.path}\n\n"
+ f"Overall Goal: {plan.goal}\n"
+ f"Step Context: {step.description}\n\n"
+ "CRITICAL INSTRUCTIONS:\n"
+ "- You have access to repository exploration tools - USE THEM!\n"
+ "- If the goal mentions 'analyze' or 'based on', first read the relevant files:\n"
+ " * Use 'Read file content' to read existing files (README.md, source code, etc.)\n"
+ " * Use 'List all files in repository' to see what files exist\n"
+ "- Generate content that is INFORMED by the actual repository content\n"
+ "- If creating a demo/example, make it relevant to the actual project\n"
+ "- If creating documentation, reference actual files and code in the repository\n\n"
+ "Requirements:\n"
+ f"- Create production-ready content appropriate for {file.path}\n"
+ "- If it's a documentation file (.md, .txt, .rst), write comprehensive, well-structured documentation\n"
+ "- If it's a code file, include proper imports, comments, and follow best practices\n"
+ "- If it's a configuration file, include sensible defaults and comments\n"
+ "- Make the content complete and ready to use\n"
+ "- Do NOT include placeholder comments like 'TODO' or 'IMPLEMENT THIS'\n"
+ "- The content should be fully functional and informative\n\n"
+ "Return ONLY the file content, no explanations or markdown code blocks."
+ ),
+ expected_output=f"Complete, production-ready content for {file.path}",
+ agent=code_writer,
+ )
+
+ def _create():
+ crew = _crewai()["Crew"](
+ agents=[code_writer],
+ tasks=[create_task],
+ process=_crewai()["Process"].sequential,
+ verbose=False,
+ )
+ result = crew.kickoff()
+ if hasattr(result, "raw"):
+ return result.raw
+ return str(result)
+
+ ctx = contextvars.copy_context()
+ content = await _guarded_agent_call(ctx, _create, label="exec_create_file")
+ content = _strip_markdown_fences(content)
+
+ await put_file(
+ owner,
+ repo,
+ file.path,
+ content,
+ f"GitPilot: Create {file.path} - {step.title}",
+ token=token,
+ branch=branch_name,
+ )
+ step_summary += f"\n ✓ Created {file.path}"
+
+ elif file.action == "MODIFY":
+ try:
+ existing_content = await get_file(
+ owner, repo, file.path, token=token, ref=branch_name
+ )
+
+ modify_task = _crewai()["Task"](
+ description=(
+ f"Modify the existing file: {file.path}\n\n"
+ f"Overall Goal: {plan.goal}\n"
+ f"Step Context: {step.description}\n\n"
+ f"Current File Content:\n"
+ f"---\n{existing_content}\n---\n\n"
+ "Requirements:\n"
+ "- Make the changes described in the step context\n"
+ "- Preserve the existing structure and format\n"
+ "- For documentation: update or add relevant sections\n"
+ "- For code: add/modify functions, imports, or logic as needed\n"
+ "- Ensure the result is complete and functional\n"
+ "- Do NOT just add comments - make real, substantive changes\n\n"
+ "Return ONLY the complete modified file content, no explanations."
+ ),
+ expected_output=f"Complete, modified content for {file.path}",
+ agent=code_writer,
+ )
+
+ def _modify():
+ crew = _crewai()["Crew"](
+ agents=[code_writer],
+ tasks=[modify_task],
+ process=_crewai()["Process"].sequential,
+ verbose=False,
+ )
+ result = crew.kickoff()
+ if hasattr(result, "raw"):
+ return result.raw
+ return str(result)
+
+ ctx = contextvars.copy_context()
+ modified_content = await _guarded_agent_call(ctx, _modify, label="exec_modify_file")
+
+ modified_content = modified_content.strip()
+ if modified_content.startswith("```"):
+ lines = modified_content.split("\n")
+ if lines[-1].strip() == "```":
+ modified_content = "\n".join(lines[1:-1])
+ else:
+ modified_content = "\n".join(lines[1:])
+
+ await put_file(
+ owner,
+ repo,
+ file.path,
+ modified_content,
+ f"GitPilot: Modify {file.path} - {step.title}",
+ token=token,
+ branch=branch_name,
+ )
+ step_summary += f"\n ✓ Modified {file.path}"
+ except Exception as e: # noqa: BLE001
+ logger.exception(
+ "Failed to modify file %s in step %s: %s",
+ file.path,
+ step.step_number,
+ e,
+ )
+ step_summary += f"\n ✗ Failed to modify {file.path}: {str(e)}"
+
+ elif file.action == "DELETE":
+ from .github_api import delete_file
+
+ try:
+ await delete_file(
+ owner,
+ repo,
+ file.path,
+ f"GitPilot: Delete {file.path} - {step.title}",
+ token=token,
+ branch=branch_name,
+ )
+ step_summary += f"\n ✓ Deleted {file.path}"
+ except Exception as e: # noqa: BLE001
+ logger.exception(
+ "Failed to delete file %s in step %s: %s",
+ file.path,
+ step.step_number,
+ e,
+ )
+ step_summary += f"\n ✗ Failed to delete {file.path}: {str(e)}"
+
+ elif file.action == "READ":
+ step_summary += f"\n ℹ️ READ-only: inspected {file.path}"
+
+ except Exception as e: # noqa: BLE001
+ logger.exception(
+ "Error processing file %s in step %s: %s",
+ file.path,
+ step.step_number,
+ e,
+ )
+ step_summary += f"\n ✗ Error processing {file.path}: {str(e)}"
+
+ execution_steps.append({"step_number": step.step_number, "summary": step_summary})
+
+ return {
+ "status": "completed",
+ "message": f"Successfully executed {len(plan.steps)} steps on {repo_full_name} in branch '{branch_name}'",
+ "branch": branch_name,
+ "branch_url": f"https://github.com/{repo_full_name}/tree/{branch_name}",
+ "executionLog": {"steps": execution_steps},
+ }
+
+
+# ============================================================================
+# New Agent Builders (v2 upgrade)
+# ============================================================================
+
+def _build_issue_agent(llm) -> Agent:
+ return _crewai()["Agent"](
+ role="GitHub Issue Management Specialist",
+ goal="Create, modify, and manage GitHub issues with proper metadata and relationships",
+ backstory=(
+ "You are an expert in GitHub issue management. You can create new issues "
+ "with detailed descriptions, modify existing issues and their metadata, "
+ "manage labels, milestones, and assignees, and add comments. "
+ "You ensure issues are well-organised and provide clear status updates. "
+ "When creating issues you always include a concise title and a structured body."
+ ),
+ llm=llm,
+ tools=_tools()["ISSUE_TOOLS"],
+ verbose=True,
+ allow_delegation=False,
+ )
+
+
+def _build_pr_agent(llm) -> Agent:
+ return _crewai()["Agent"](
+ role="Pull Request Management Specialist",
+ goal="Create branches, commit changes, and manage pull requests",
+ backstory=(
+ "You are skilled in pull request workflows. You can create branches, "
+ "create PRs from feature branches, list open PRs, inspect changed files, "
+ "add reviews, and merge PRs using the appropriate strategy. "
+ "You always verify the source and target branches before acting."
+ ),
+ llm=llm,
+ tools=_tools()["PR_TOOLS"] + _tools()["WRITE_TOOLS"],
+ verbose=True,
+ allow_delegation=False,
+ )
+
+
+def _build_search_agent(llm) -> Agent:
+ return _crewai()["Agent"](
+ role="Search & Discovery Specialist",
+ goal="Find code, repositories, issues, and users across GitHub",
+ backstory=(
+ "You are an expert at finding resources on GitHub. You can search for "
+ "code by keywords, symbols, or patterns within a repository or globally. "
+ "You can find users and organisations, discover repositories by topic, "
+ "and locate issues or PRs matching specific criteria. "
+ "You present results in a clear, structured format."
+ ),
+ llm=llm,
+ tools=_tools()["SEARCH_TOOLS"] + _tools()["REPOSITORY_TOOLS"],
+ verbose=True,
+ allow_delegation=False,
+ )
+
+
+def _build_code_review_agent(llm) -> Agent:
+ return _crewai()["Agent"](
+ role="Code Review & Analysis Specialist",
+ goal="Review code quality, identify patterns, and suggest improvements",
+ backstory=(
+ "You are an experienced code reviewer who analyses code for quality, "
+ "security issues, and performance problems. You inspect files in the "
+ "repository, read their contents, and provide constructive feedback. "
+ "For pull requests you examine the changed files and produce a detailed "
+ "review with actionable suggestions."
+ ),
+ llm=llm,
+ tools=_tools()["REPOSITORY_TOOLS"] + _tools()["PR_TOOLS"] + _tools()["SEARCH_TOOLS"],
+ verbose=True,
+ allow_delegation=False,
+ )
+
+
+def _build_learning_agent(llm) -> Agent:
+ return _crewai()["Agent"](
+ role="GitHub Learning & Guidance Specialist",
+ goal="Provide expert guidance on GitHub features, best practices, and workflows",
+ backstory=(
+ "You are a GitHub expert who helps users understand GitHub Actions, "
+ "CI/CD workflows, authentication, pull request best practices, "
+ "repository maintenance, GitHub Pages, Packages, Discussions, "
+ "and security best practices. You provide clear, actionable guidance "
+ "with examples. You can also read the repository to give contextualised advice."
+ ),
+ llm=llm,
+ tools=_tools()["REPOSITORY_TOOLS"] + _tools()["SEARCH_TOOLS"],
+ verbose=True,
+ allow_delegation=False,
+ )
+
+
+def _build_local_editor_agent(llm) -> Agent:
+ """Phase 1: Agent for direct local file editing with verification."""
+ return _crewai()["Agent"](
+ role="Local File Editor",
+ goal="Read, write, and modify files in the local workspace with verification",
+ backstory=(
+ "You are an expert code editor that operates directly on the local "
+ "filesystem. You read files, make precise edits, write new files, "
+ "and verify changes using git diff. You always check file contents "
+ "before editing and confirm results after. You follow project "
+ "conventions and never introduce breaking changes."
+ ),
+ llm=llm,
+ tools=_tools()["LOCAL_FILE_TOOLS"] + _tools()["LOCAL_GIT_TOOLS"],
+ verbose=True,
+ allow_delegation=False,
+ )
+
+
+def _build_terminal_agent(llm) -> Agent:
+ """Phase 1: Agent for sandboxed shell command execution."""
+ return _crewai()["Agent"](
+ role="Terminal & Shell Executor",
+ goal="Execute shell commands safely in the workspace and report results",
+ backstory=(
+ "You are a terminal expert that runs shell commands in a sandboxed "
+ "environment. You can run tests, linters, build tools, and other "
+ "development commands. You always report exit codes and output. "
+ "You refuse to run destructive commands like rm -rf / or format disks. "
+ "You explain command output clearly to the user."
+ ),
+ llm=llm,
+ tools=_tools()["LOCAL_SHELL_TOOLS"] + _tools()["LOCAL_GIT_TOOLS"],
+ verbose=True,
+ allow_delegation=False,
+ )
+
+
+# ============================================================================
+# Unified Dispatcher (v2 upgrade)
+# ============================================================================
+
+async def dispatch_request(
+ user_request: Optional[str] = None,
+ repo_full_name: Optional[str] = None,
+ token: Optional[str] = None,
+ branch_name: Optional[str] = None,
+ topology_id: Optional[str] = None,
+ # -----------------------------------------------------------------
+ # Backwards-compatible keyword arguments.
+ # Older callers (notably early WebSocket and A2A adapters) used:
+ # dispatch_request(repo_owner=..., repo_name=..., message=...)
+ # Keeping these kwargs prevents crashes when frontend/backend drift.
+ # -----------------------------------------------------------------
+ repo_owner: Optional[str] = None,
+ repo_name: Optional[str] = None,
+ message: Optional[str] = None,
+ **_ignored_kwargs: Any,
+) -> Dict[str, Any]:
+ """Route a free-form user request to the appropriate agent(s) and return the result.
+
+ This is the single entry-point for the new conversational mode. For backwards
+ compatibility the original ``generate_plan`` / ``execute_plan`` pair is still
+ available and untouched.
+
+ If *topology_id* is supplied, topology-aware routing is used:
+ - ``classify_and_dispatch`` → falls through to the existing agent_router
+ - ``always_main_agent`` → all requests go to the primary agent (T2)
+ - ``fixed_sequence`` → a CrewAI sequential crew is built from the
+ topology's agent sequence (T3-T7)
+
+ When *topology_id* is ``None``, behaviour is identical to the original v2
+ dispatcher.
+ """
+ # ---- Input normalization / compat layer ----
+ if user_request is None and message is not None:
+ user_request = message
+ if repo_full_name is None and repo_owner and repo_name:
+ repo_full_name = f"{repo_owner}/{repo_name}"
+
+ if not user_request:
+ raise ValueError("dispatch_request: missing user_request (or legacy 'message')")
+ if not repo_full_name:
+ raise ValueError("dispatch_request: missing repo_full_name (or legacy repo_owner/repo_name)")
+
+ # ---------- Lite Mode check (additive, non-destructive) ----------
+ # Lite mode activates if ANY of:
+ # - the explicit setting is on
+ # - the topology is "lite_mode"
+ # - the active model is incompatible with multi-agent ReAct prompts
+ # (deepseek-r1, qwq, small Ollama models)
+ from .settings import get_settings as _get_settings
+
+ _current_settings = _get_settings()
+ _saved_topology = get_saved_topology_preference()
+
+ # Explicit topology_id must always win.
+ if topology_id:
+ _resolved_tid = topology_id
+ else:
+ _resolved_tid = _saved_topology
+
+ # Auto-detect models that can't handle multi-agent ReAct format
+ # (deepseek-r1, qwq, small local models) — route them to Lite Mode
+ # regardless of explicit settings.
+ _auto_lite = _is_incompatible_model(_current_settings)
+
+ # Lite mode only applies when explicitly selected or globally enabled,
+ # and it must not override an explicit non-lite topology choice.
+ _lite_active = (
+ _current_settings.lite_mode
+ or _resolved_tid == "lite_mode"
+ or _auto_lite
+ )
+
+ # Do not force lite mode when the caller explicitly requested another topology.
+ if topology_id and topology_id != "lite_mode":
+ _lite_active = False
+
+ if _auto_lite and _lite_active:
+ logger.info(
+ "[GitPilot] Auto-routed to Lite Mode: active model is incompatible "
+ "with multi-agent ReAct (deepseek-r1, qwq, or small local model)"
+ )
+
+ if _lite_active:
+ logger.info("[GitPilot Lite] Lite Mode active — using simplified single-agent path")
+ plan = await generate_plan_lite(
+ user_request,
+ repo_full_name,
+ token=token,
+ branch_name=branch_name,
+ )
+ return {
+ "category": "plan_execute",
+ "workflow": "plan_execute",
+ "plan": plan.model_dump() if hasattr(plan, "model_dump") else plan,
+ "message": "Lite Mode: Plan generated. Review and approve to execute.",
+ "lite_mode": True,
+ }
+
+ _active_topology = None
+ if _resolved_tid:
+ _active_topology = get_topology(_resolved_tid)
+
+ # ---------- Topology-aware routing (additive) ----------
+ _active_topology = None
+ _resolved_tid = topology_id or get_saved_topology_preference()
+ if _resolved_tid:
+ _active_topology = get_topology(_resolved_tid)
+
+ if _active_topology and _active_topology.routing_policy.strategy == RoutingStrategy.fixed_sequence:
+ # Pipeline topologies (T3-T7): build a multi-task sequential crew
+ return await _dispatch_pipeline(
+ _active_topology, user_request, repo_full_name,
+ token=token, branch_name=branch_name,
+ )
+
+ # For ``classify_and_dispatch`` (T1/default) and ``always_main_agent`` (T2)
+ # we fall through to the existing routing. T2's react_loop execution will
+ # be wired in a future phase; for now it uses the same single-task path
+ # but the *visualization* already shows the correct graph.
+
+ workflow = route_request(user_request)
+ logger.info(
+ "[GitPilot] Router: category=%s agents=%s desc=%s",
+ workflow.category.value,
+ [a.value for a in workflow.agents],
+ workflow.description,
+ )
+
+ # Phase 2: Smart model routing
+ try:
+ from .smart_model_router import ModelRouter
+ _router = ModelRouter()
+ selection = _router.select(user_request, category=workflow.category.value)
+ logger.info(
+ "[GitPilot] ModelRouter: model=%s tier=%s complexity=%s reason=%s",
+ selection.model, selection.tier.value, selection.complexity.value, selection.reason,
+ )
+ except Exception:
+ pass # Model routing is optional; fall through to default LLM
+
+ # Set repo context if needed
+ if workflow.requires_repo_context and repo_full_name:
+ owner, repo = _split_repo_full_name(repo_full_name)
+ active_ref = branch_name or "HEAD"
+ _tools()["set_repo_context"](owner, repo, token=token, branch=active_ref)
+
+ llm = _build_llm()
+
+ # If it's the existing plan+execute workflow, delegate there
+ if workflow.category == RequestCategory.PLAN_EXECUTE:
+ plan = await generate_plan(user_request, repo_full_name, token=token, branch_name=branch_name)
+ return {
+ "category": workflow.category.value,
+ "workflow": "plan_execute",
+ "plan": plan.model_dump() if hasattr(plan, "model_dump") else plan,
+ "message": "Plan generated. Review and approve to execute.",
+ }
+
+ # CONTEXT PACK: Load project context for non-plan agents too (additive)
+ _dispatch_ctx_pack = ""
+ if repo_full_name:
+ try:
+ _d_owner, _d_repo = repo_full_name.split("/")
+ from pathlib import Path as _P
+ _d_ws = _P.home() / ".gitpilot" / "workspaces" / _d_owner / _d_repo
+ _dispatch_ctx_pack = build_context_pack(_d_ws, query=user_request)
+ except Exception:
+ pass
+
+ # Build the task description
+ task_description = _build_task_description(workflow, user_request, repo_full_name, branch_name)
+ if _dispatch_ctx_pack:
+ task_description += "\n\n" + _dispatch_ctx_pack
+
+ # Build agent(s) for this workflow
+ agents = []
+ for agent_type in workflow.agents:
+ agents.append(_get_agent(agent_type, llm))
+
+ # Use the first agent as the primary executor
+ primary_agent = agents[0]
+ task = _crewai()["Task"](
+ description=task_description,
+ expected_output="A clear, structured response addressing the user request",
+ agent=primary_agent,
+ )
+
+ crew = _crewai()["Crew"](
+ agents=agents,
+ tasks=[task],
+ process=_crewai()["Process"].sequential,
+ verbose=True,
+ )
+
+ def _run():
+ result = crew.kickoff()
+ if hasattr(result, "raw"):
+ return result.raw
+ return str(result)
+
+ ctx = contextvars.copy_context()
+ result_text = await _guarded_agent_call(ctx, _run, label="dispatch")
+
+ return {
+ "category": workflow.category.value,
+ "agents_used": [a.value for a in workflow.agents],
+ "result": result_text,
+ "entity_number": workflow.entity_number,
+ }
+
+
+# ============================================================================
+# Topology Pipeline Dispatcher (additive — T3-T7)
+# ============================================================================
+
+# Maps topology agent IDs to AgentType enum + task descriptions.
+# This bridge lets the topology registry reference agents by string ID while
+# reusing the existing _get_agent() builders.
+_TOPO_AGENT_MAP = {
+ "explorer": (AgentType.EXPLORER, "Explore the codebase: map project structure, discover relevant files, "
+ "identify patterns, dependencies, and test conventions. "
+ "Return a structured analysis with file paths and key findings."),
+ "planner": (AgentType.PLANNER, "Based on the exploration results, create a detailed implementation plan. "
+ "Include: files to modify, files to create, step-by-step order, "
+ "and test strategy. Consider trade-offs and alternatives."),
+ "developer": (AgentType.CODE_WRITER, "Execute the implementation plan step by step. For each step: "
+ "make the code change, then run tests. If tests fail, fix the issue "
+ "before moving to the next step. Follow project coding standards."),
+ "reviewer": (AgentType.CODE_REVIEWER, "Review all code changes. Check for: security vulnerabilities, "
+ "code quality, test coverage, performance issues. "
+ "Organise findings by severity: Critical, Warning, Suggestion."),
+ "git_agent": (AgentType.PR_MANAGER, "Create a branch, commit all changes with a descriptive message, "
+ "push the branch, and create a GitHub PR. PR should summarise "
+ "the changes clearly with a test plan."),
+}
+
+
+async def _dispatch_pipeline(
+ topology,
+ user_request: str,
+ repo_full_name: str,
+ token: Optional[str] = None,
+ branch_name: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Run a topology's fixed-sequence pipeline as a multi-task CrewAI crew.
+
+ Each agent in the sequence gets its own Task. Tasks are linked via
+ CrewAI's ``context`` parameter so the output of step N feeds step N+1.
+ """
+ sequence = topology.routing_policy.sequence or []
+ if not sequence:
+ return {"error": "Topology has no agent sequence defined"}
+
+ # Determine if this pipeline has write-capable agents
+ _write_agents = {"developer", "git_agent"}
+ _has_writers = bool(set(sequence) & _write_agents)
+
+ # Create a working branch for pipelines that modify files
+ pipeline_branch = branch_name
+ if repo_full_name and _has_writers and not branch_name:
+ import re as _re
+ import time as _time
+ owner, repo = _split_repo_full_name(repo_full_name)
+ sanitized = _re.sub(r"[^a-z0-9-]+", "-", user_request.lower())[:35].strip("-")
+ timestamp = str(int(_time.time()))[-6:]
+ pipeline_branch = f"gitpilot-{topology.id}-{sanitized}-{timestamp}"
+ try:
+ from .github_api import create_branch
+ await create_branch(owner, repo, pipeline_branch, from_ref="HEAD", token=token)
+ logger.info("[Pipeline] Created branch: %s", pipeline_branch)
+ except Exception:
+ pass # branch may already exist
+
+ # Set repo context (on the working branch)
+ if repo_full_name:
+ owner, repo = _split_repo_full_name(repo_full_name)
+ active_ref = pipeline_branch or "HEAD"
+ _tools()["set_repo_context"](owner, repo, token=token, branch=active_ref)
+
+ llm = _build_llm()
+
+ # Build agents and tasks
+ agents = []
+ tasks = []
+ for i, agent_id in enumerate(sequence):
+ mapping = _TOPO_AGENT_MAP.get(agent_id)
+ if not mapping:
+ logger.warning("[GitPilot] Unknown topology agent ID: %s — skipping", agent_id)
+ continue
+ agent_type, base_description = mapping
+ agent = _get_agent(agent_type, llm)
+ agents.append(agent)
+
+ # Build task description: combine base description with user request
+ task_desc = (
+ f"User request: {user_request}\n"
+ f"Repository: {repo_full_name}\n"
+ )
+ if pipeline_branch:
+ task_desc += f"Branch: {pipeline_branch}\n"
+ task_desc += f"\nYour role in this pipeline: {base_description}"
+
+ # Tell write-capable agents to actually use their tools
+ if agent_id in _write_agents and pipeline_branch:
+ task_desc += (
+ f"\n\nIMPORTANT: You have tools to write and delete files. "
+ f"USE THEM to make real changes on branch '{pipeline_branch}'. "
+ f"Do NOT just describe changes — actually write/delete files using your tools."
+ )
+
+ # Context chaining: each task after the first receives prior tasks
+ context = tasks[:] if tasks else []
+
+ task = _crewai()["Task"](
+ description=task_desc,
+ expected_output=f"Structured output from the {agent_id} phase",
+ agent=agent,
+ context=context if context else None,
+ )
+ tasks.append(task)
+
+ if not agents:
+ return {"error": "No valid agents could be built for this topology"}
+
+ # Load optional context pack
+ _ctx_pack = ""
+ if repo_full_name:
+ try:
+ from pathlib import Path as _P
+ _owner, _repo = repo_full_name.split("/")
+ _ws = _P.home() / ".gitpilot" / "workspaces" / _owner / _repo
+ _ctx_pack = build_context_pack(_ws, query=user_request)
+ except Exception:
+ pass
+ if _ctx_pack:
+ # Append context pack to the first task's description
+ tasks[0].description += "\n\n" + _ctx_pack
+
+ crew = _crewai()["Crew"](
+ agents=agents,
+ tasks=tasks,
+ process=_crewai()["Process"].sequential,
+ verbose=True,
+ )
+
+ def _run():
+ result = crew.kickoff()
+ if hasattr(result, "raw"):
+ return result.raw
+ return str(result)
+
+ ctx = contextvars.copy_context()
+ result_text = await _guarded_agent_call(ctx, _run, label="topology_pipeline")
+
+ response = {
+ "category": "topology_pipeline",
+ "topology_id": topology.id,
+ "topology_name": topology.name,
+ "execution_style": topology.execution_style.value,
+ "agents_used": sequence,
+ "result": result_text,
+ }
+
+ # Add branch info for pipelines that created a working branch
+ if pipeline_branch and _has_writers:
+ response["branch"] = pipeline_branch
+ response["branch_url"] = f"https://github.com/{repo_full_name}/tree/{pipeline_branch}"
+
+ return response
+
+
+def _get_agent(agent_type: AgentType, llm) -> Agent:
+ """Instantiate an agent by type."""
+ builders = {
+ AgentType.EXPLORER: lambda: _crewai()["Agent"](
+ role="Repository Explorer",
+ goal="Thoroughly explore and document the current state of the repository",
+ backstory="You are a meticulous code archaeologist who explores repositories.",
+ llm=llm,
+ tools=_tools()["REPOSITORY_TOOLS"],
+ verbose=True,
+ allow_delegation=False,
+ ),
+ AgentType.PLANNER: lambda: _crewai()["Agent"](
+ role="Repository Refactor Planner",
+ goal="Design safe, step-by-step refactor plans",
+ backstory="You are an experienced staff engineer who creates plans based on facts.",
+ llm=llm,
+ tools=_tools()["REPOSITORY_TOOLS"],
+ verbose=True,
+ allow_delegation=False,
+ ),
+ AgentType.CODE_WRITER: lambda: _crewai()["Agent"](
+ role="Expert Code Writer",
+ goal="Generate high-quality, production-ready code and write it to the repository",
+ backstory=(
+ "You are a senior software engineer with multi-language expertise. "
+ "You read existing files, write new code, and update files directly "
+ "in the repository using your tools. Always read a file before modifying it."
+ ),
+ llm=llm,
+ tools=_tools()["REPOSITORY_TOOLS"] + _tools()["WRITE_TOOLS"],
+ verbose=True,
+ allow_delegation=False,
+ ),
+ AgentType.CODE_REVIEWER: lambda: _build_code_review_agent(llm),
+ AgentType.ISSUE_MANAGER: lambda: _build_issue_agent(llm),
+ AgentType.PR_MANAGER: lambda: _build_pr_agent(llm),
+ AgentType.SEARCH: lambda: _build_search_agent(llm),
+ AgentType.LEARNING: lambda: _build_learning_agent(llm),
+ AgentType.LOCAL_EDITOR: lambda: _build_local_editor_agent(llm),
+ AgentType.TERMINAL: lambda: _build_terminal_agent(llm),
+ }
+ builder = builders.get(agent_type)
+ if not builder:
+ raise ValueError(f"Unknown agent type: {agent_type}")
+ return builder()
+
+
+def _build_task_description(
+ workflow: WorkflowPlan,
+ user_request: str,
+ repo_full_name: str,
+ branch_name: Optional[str],
+) -> str:
+ """Build a detailed task description for the agent based on the workflow."""
+ parts = [
+ f"User request: {user_request}",
+ f"Repository: {repo_full_name}",
+ ]
+ if branch_name:
+ parts.append(f"Branch: {branch_name}")
+ if workflow.entity_number:
+ parts.append(f"Entity number: #{workflow.entity_number}")
+
+ # Category-specific instructions
+ if workflow.category == RequestCategory.ISSUE_MANAGEMENT:
+ action = workflow.metadata.get("action", "")
+ parts.append(
+ "\nYou are handling an ISSUE MANAGEMENT request. "
+ f"Action hint: {action}. "
+ "Use your issue tools to fulfill the request. "
+ "If creating an issue, extract title and body from the user request. "
+ "If listing issues, present results in a clear table. "
+ "If updating, identify the issue number and fields to change. "
+ "Always confirm what you did with the issue URL."
+ )
+
+ elif workflow.category == RequestCategory.PR_MANAGEMENT:
+ action = workflow.metadata.get("action", "")
+ parts.append(
+ "\nYou are handling a PULL REQUEST request. "
+ f"Action hint: {action}. "
+ "Use your PR tools to fulfill the request. "
+ "If creating a PR, determine the head and base branches. "
+ "If merging, confirm the PR number and merge method. "
+ "Always confirm with the PR URL."
+ )
+
+ elif workflow.category == RequestCategory.CODE_SEARCH:
+ search_type = workflow.metadata.get("search_type", "code")
+ parts.append(
+ f"\nYou are handling a SEARCH request (type: {search_type}). "
+ "Use your search tools to find what the user is looking for. "
+ "Present results clearly with paths, URLs, and context snippets."
+ )
+
+ elif workflow.category == RequestCategory.CODE_REVIEW:
+ parts.append(
+ "\nYou are handling a CODE REVIEW request. "
+ "First explore the repository to understand the codebase, "
+ "then analyse code quality, identify potential issues "
+ "(security, performance, maintainability), and provide "
+ "constructive suggestions with specific file references."
+ )
+
+ elif workflow.category == RequestCategory.LEARNING:
+ parts.append(
+ "\nYou are handling a LEARNING / GUIDANCE request. "
+ "Provide clear, actionable guidance about GitHub features. "
+ "Include examples and best practices. "
+ "If relevant, reference the current repository for context."
+ )
+
+ elif workflow.category == RequestCategory.LOCAL_EDIT:
+ parts.append(
+ "\nYou are handling a LOCAL FILE EDITING request. "
+ "Use your local file tools to read, write, and modify files. "
+ "Always read the file before editing to understand current content. "
+ "After editing, use git_diff or git_status to verify your changes. "
+ "Report exactly what was changed."
+ )
+
+ elif workflow.category == RequestCategory.TERMINAL:
+ parts.append(
+ "\nYou are handling a TERMINAL / SHELL COMMAND request. "
+ "Use the run_command tool to execute the requested command. "
+ "Report the exit code and output. If tests fail, summarise "
+ "which tests failed and why. Never run destructive commands."
+ )
+
+ elif workflow.category == RequestCategory.CONVERSATIONAL:
+ parts.append(
+ "\nYou are handling a general question about the repository. "
+ "Use repository tools to explore and answer the question. "
+ "Be concise and helpful."
+ )
+
+ return "\n".join(parts)
+
+
+# ============================================================================
+# Auto PR Creation (v2 upgrade)
+# ============================================================================
+
+async def create_pr_after_execution(
+ repo_full_name: str,
+ branch_name: str,
+ goal: str,
+ execution_log: Dict[str, Any],
+ token: Optional[str] = None,
+) -> Optional[Dict[str, Any]]:
+ """Automatically create a PR after plan execution completes.
+
+ Returns the PR data dict or None if creation fails.
+ """
+ from .github_pulls import create_pull_request
+ from .github_api import get_repo
+
+ owner, repo = _split_repo_full_name(repo_full_name)
+
+ try:
+ repo_info = await get_repo(owner, repo, token=token)
+ default_branch = repo_info.get("default_branch", "main")
+ except Exception:
+ default_branch = "main"
+
+ # Build PR body from execution log
+ steps = execution_log.get("steps", [])
+ body_lines = [f"## GitPilot Auto-PR\n\n**Goal:** {goal}\n"]
+ for step in steps:
+ body_lines.append(f"- {step.get('summary', '')}")
+ body_lines.append(f"\n---\n*Created by GitPilot*")
+ body = "\n".join(body_lines)
+
+ # Truncate title to stay within GitHub limits
+ title = f"GitPilot: {goal}"
+ if len(title) > 256:
+ title = title[:253] + "..."
+
+ try:
+ pr = await create_pull_request(
+ owner,
+ repo,
+ title=title,
+ head=branch_name,
+ base=default_branch,
+ body=body,
+ token=token,
+ )
+ logger.info("[GitPilot] Auto-PR created: %s", pr.get("html_url", ""))
+ return pr
+ except Exception as e:
+ logger.warning("[GitPilot] Failed to create auto-PR: %s", e)
+ return None
+
+
+# ============================================================================
+# Flow Definition (v3 -- topology-aware with legacy fallback)
+# ============================================================================
+
+async def get_flow_definition(topology_id: Optional[str] = None) -> dict:
+ """Return the agent workflow as a visual graph.
+
+ When *topology_id* is provided (or a saved preference exists), the graph
+ is served from the topology registry. Otherwise the original hardcoded
+ graph is returned for backward compatibility.
+ """
+ tid = topology_id or get_saved_topology_preference()
+ if tid:
+ return get_topology_graph(tid)
+
+ # Legacy hardcoded graph (unchanged from v2)
+ return {
+ "nodes": [
+ {
+ "id": "router",
+ "label": "Request Router",
+ "type": "router",
+ "description": "Analyses user intent and delegates to the right agent(s)",
+ },
+ {
+ "id": "repo_explorer",
+ "label": "Repository Explorer",
+ "type": "agent",
+ "description": "Explores repository to gather current state",
+ },
+ {
+ "id": "planner",
+ "label": "Refactor Planner",
+ "type": "agent",
+ "description": "Creates safe, step-by-step refactor plans based on exploration",
+ },
+ {
+ "id": "code_writer",
+ "label": "Code Writer",
+ "type": "agent",
+ "description": "Implements approved changes to codebase",
+ },
+ {
+ "id": "reviewer",
+ "label": "Code Reviewer",
+ "type": "agent",
+ "description": "Reviews code quality, security, and performance",
+ },
+ {
+ "id": "issue_manager",
+ "label": "Issue Manager",
+ "type": "agent",
+ "description": "Creates, updates, and manages GitHub issues",
+ },
+ {
+ "id": "pr_manager",
+ "label": "PR Manager",
+ "type": "agent",
+ "description": "Creates, reviews, and merges pull requests",
+ },
+ {
+ "id": "search_agent",
+ "label": "Search & Discovery",
+ "type": "agent",
+ "description": "Searches code, repos, issues, and users",
+ },
+ {
+ "id": "learning_agent",
+ "label": "Learning & Guidance",
+ "type": "agent",
+ "description": "Provides GitHub feature guidance and best practices",
+ },
+ {
+ "id": "local_editor",
+ "label": "Local Editor",
+ "type": "agent",
+ "description": "Reads and writes files directly in the local workspace",
+ },
+ {
+ "id": "terminal_agent",
+ "label": "Terminal",
+ "type": "agent",
+ "description": "Executes shell commands in a sandboxed environment",
+ },
+ {
+ "id": "github_tools",
+ "label": "GitHub API",
+ "type": "tool",
+ "description": "Read/write/delete files, issues, PRs, search",
+ },
+ {
+ "id": "local_tools",
+ "label": "Local Tools",
+ "type": "tool",
+ "description": "File I/O, git operations, shell commands on local workspace",
+ },
+ ],
+ "edges": [
+ {
+ "id": "e0",
+ "source": "router",
+ "target": "repo_explorer",
+ "label": "Plan & Execute workflow",
+ },
+ {
+ "id": "e0b",
+ "source": "router",
+ "target": "issue_manager",
+ "label": "Issue management requests",
+ },
+ {
+ "id": "e0c",
+ "source": "router",
+ "target": "pr_manager",
+ "label": "PR management requests",
+ },
+ {
+ "id": "e0d",
+ "source": "router",
+ "target": "search_agent",
+ "label": "Search requests",
+ },
+ {
+ "id": "e0e",
+ "source": "router",
+ "target": "reviewer",
+ "label": "Code review requests",
+ },
+ {
+ "id": "e0f",
+ "source": "router",
+ "target": "learning_agent",
+ "label": "Learning & guidance requests",
+ },
+ {
+ "id": "e1",
+ "source": "repo_explorer",
+ "target": "planner",
+ "label": "Complete repository state & file listing",
+ },
+ {
+ "id": "e2",
+ "source": "planner",
+ "target": "code_writer",
+ "label": "Approved plan with verified file actions",
+ },
+ {
+ "id": "e3",
+ "source": "code_writer",
+ "target": "pr_manager",
+ "label": "Auto-create PR after execution",
+ },
+ {
+ "id": "e4",
+ "source": "reviewer",
+ "target": "pr_manager",
+ "label": "Review results",
+ },
+ {
+ "id": "e5",
+ "source": "issue_manager",
+ "target": "github_tools",
+ "label": "Issue operations",
+ },
+ {
+ "id": "e6",
+ "source": "pr_manager",
+ "target": "github_tools",
+ "label": "PR operations",
+ },
+ {
+ "id": "e7",
+ "source": "search_agent",
+ "target": "github_tools",
+ "label": "Search queries",
+ },
+ {
+ "id": "e8",
+ "source": "router",
+ "target": "local_editor",
+ "label": "Local file editing requests",
+ },
+ {
+ "id": "e9",
+ "source": "router",
+ "target": "terminal_agent",
+ "label": "Shell command requests",
+ },
+ {
+ "id": "e10",
+ "source": "local_editor",
+ "target": "local_tools",
+ "label": "File and git operations",
+ },
+ {
+ "id": "e11",
+ "source": "terminal_agent",
+ "target": "local_tools",
+ "label": "Command execution",
+ },
+ ],
+ }
diff --git a/gitpilot/agents_md.py b/gitpilot/agents_md.py
new file mode 100644
index 0000000000000000000000000000000000000000..a3e04c22dae70f5df12cca028f5ad4949de05766
--- /dev/null
+++ b/gitpilot/agents_md.py
@@ -0,0 +1,314 @@
+# gitpilot/agents_md.py
+"""Persistent project context file — ``AGENTS.md`` + ``/init``.
+
+Industry-convention `AGENTS.md` lives at the workspace root and is loaded
+into every session as a high-priority context block. This module is
+purely additive — when no ``AGENTS.md`` exists the rest of GitPilot
+behaves exactly as before.
+
+Three responsibilities:
+
+1. Render a starter ``AGENTS.md`` from a workspace scan (``/init``).
+2. Load the active ``AGENTS.md`` and its mode-specific siblings under
+ ``.gitpilot/AGENTS..md`` for prompt injection.
+3. Expand inline ``@./other.md`` includes with circular-import detection.
+"""
+from __future__ import annotations
+
+import logging
+import os
+import re
+from dataclasses import dataclass, field
+from pathlib import Path
+from typing import Any, Dict, List, Optional, Set, Tuple, TypedDict
+
+
+class _IncludeInfo(TypedDict):
+ remaining_budget: int
+ include_count: int
+ truncated: bool
+
+logger = logging.getLogger(__name__)
+
+AGENTS_MD = "AGENTS.md"
+GITPILOT_DIR = ".gitpilot"
+
+MAX_AGENTS_MD_BYTES = 32_000
+MAX_INCLUDE_DEPTH = 5
+MAX_INCLUDES_TOTAL = 32
+
+_INCLUDE_RE = re.compile(r"^@(\./|\.\./|/)([^\s]+)\s*$", re.MULTILINE)
+
+
+@dataclass
+class AgentsDoc:
+ """Loaded AGENTS.md with includes resolved."""
+
+ path: Path
+ content: str
+ includes: List[Path] = field(default_factory=list)
+ truncated: bool = False
+ circular: List[str] = field(default_factory=list)
+
+ @property
+ def is_empty(self) -> bool:
+ return not self.content.strip()
+
+
+class AgentsLoader:
+ """Locate and load AGENTS.md (root + optional mode-specific)."""
+
+ def __init__(self, workspace_path: Path) -> None:
+ self.workspace_path = workspace_path.resolve()
+
+ # ------------------------------------------------------------------
+ # Discovery
+ # ------------------------------------------------------------------
+ def root_path(self) -> Path:
+ return self.workspace_path / AGENTS_MD
+
+ def mode_path(self, mode_slug: str) -> Path:
+ safe = re.sub(r"[^a-zA-Z0-9_.-]", "", mode_slug)
+ return self.workspace_path / GITPILOT_DIR / f"AGENTS.{safe}.md"
+
+ # ------------------------------------------------------------------
+ # Loading + include expansion
+ # ------------------------------------------------------------------
+ def load(self, mode_slug: Optional[str] = None) -> AgentsDoc:
+ candidates: List[Path] = []
+ if mode_slug:
+ mp = self.mode_path(mode_slug)
+ if mp.exists():
+ candidates.append(mp)
+ root = self.root_path()
+ if root.exists():
+ candidates.append(root)
+
+ if not candidates:
+ return AgentsDoc(path=root, content="")
+
+ # If both exist, mode-specific is appended after the root so the
+ # mode overrides apply last in the system prompt.
+ rendered_parts: List[str] = []
+ seen: Set[Path] = set()
+ circular: List[str] = []
+ includes: List[Path] = []
+ truncated = False
+ budget = MAX_AGENTS_MD_BYTES
+ include_count = 0
+
+ for cand in reversed(candidates): # root first, mode last
+ text, info = self._expand_includes(
+ cand, depth=0, seen=seen, circular=circular, includes=includes,
+ remaining_budget=budget, include_count=include_count,
+ )
+ rendered_parts.append(text)
+ budget = info["remaining_budget"]
+ include_count = info["include_count"]
+ truncated = truncated or info["truncated"]
+ if budget <= 0:
+ truncated = True
+ break
+
+ return AgentsDoc(
+ path=candidates[0],
+ content="\n\n".join(p for p in rendered_parts if p),
+ includes=includes,
+ truncated=truncated,
+ circular=circular,
+ )
+
+ def _expand_includes(
+ self,
+ path: Path,
+ *,
+ depth: int,
+ seen: Set[Path],
+ circular: List[str],
+ includes: List[Path],
+ remaining_budget: int,
+ include_count: int,
+ ) -> Tuple[str, _IncludeInfo]:
+ resolved = path.resolve()
+ if resolved in seen:
+ circular.append(str(resolved))
+ return "", {"remaining_budget": remaining_budget, "include_count": include_count, "truncated": False}
+ if depth > MAX_INCLUDE_DEPTH or include_count >= MAX_INCLUDES_TOTAL:
+ return "", {"remaining_budget": remaining_budget, "include_count": include_count, "truncated": True}
+
+ if not str(resolved).startswith(str(self.workspace_path)):
+ return "", {"remaining_budget": remaining_budget, "include_count": include_count, "truncated": False}
+
+ seen.add(resolved)
+ try:
+ raw = resolved.read_text(encoding="utf-8")
+ except Exception as e:
+ logger.debug("could not read %s: %s", resolved, e)
+ return "", {"remaining_budget": remaining_budget, "include_count": include_count, "truncated": False}
+
+ out_parts: List[str] = []
+ truncated = False
+ cursor = 0
+ for m in _INCLUDE_RE.finditer(raw):
+ out_parts.append(raw[cursor : m.start()])
+ cursor = m.end()
+ include_token = m.group(1) + m.group(2)
+ target = (resolved.parent / include_token).resolve() if not include_token.startswith("/") else Path(include_token).resolve()
+ includes.append(target)
+ include_count += 1
+ child_text, child_info = self._expand_includes(
+ target,
+ depth=depth + 1,
+ seen=seen,
+ circular=circular,
+ includes=includes,
+ remaining_budget=remaining_budget,
+ include_count=include_count,
+ )
+ out_parts.append(child_text)
+ remaining_budget = child_info["remaining_budget"]
+ include_count = child_info["include_count"]
+ truncated = truncated or child_info["truncated"]
+ out_parts.append(raw[cursor:])
+
+ body = "".join(out_parts)
+ if len(body) > remaining_budget:
+ body = body[:remaining_budget]
+ truncated = True
+ remaining_budget -= len(body)
+
+ return body, {"remaining_budget": remaining_budget, "include_count": include_count, "truncated": truncated}
+
+
+# ----------------------------------------------------------------------
+# /init implementation
+# ----------------------------------------------------------------------
+
+@dataclass
+class InitReport:
+ """Summary returned by ``/init``."""
+
+ created: bool
+ path: Path
+ sections: List[str] = field(default_factory=list)
+ skipped_reason: Optional[str] = None
+
+
+def _scan_workspace(workspace_path: Path) -> Dict[str, Any]:
+ """Extract a low-cost fingerprint of the project for the starter doc."""
+ info: Dict[str, Any] = {}
+ info["python"] = (workspace_path / "pyproject.toml").exists() or any(workspace_path.glob("*.py"))
+ info["node"] = (workspace_path / "package.json").exists()
+ info["docker"] = (workspace_path / "Dockerfile").exists() or any(workspace_path.glob("Dockerfile*"))
+ info["compose"] = any(workspace_path.glob("docker-compose*.y*ml"))
+ info["has_tests"] = (workspace_path / "tests").exists() or (workspace_path / "test").exists()
+ info["has_makefile"] = (workspace_path / "Makefile").exists()
+ info["readme"] = next((p.name for p in workspace_path.glob("README*")), None)
+ # Cheap top-level layout
+ top: List[str] = []
+ for child in sorted(workspace_path.iterdir()):
+ if child.name.startswith("."):
+ continue
+ top.append(child.name + ("/" if child.is_dir() else ""))
+ if len(top) >= 30:
+ break
+ info["top_level"] = top
+ return info
+
+
+_STARTER_TEMPLATE = """# AGENTS.md
+
+> Persistent project context loaded into every GitPilot session.
+> Edit freely — agents will follow these notes.
+
+## Project Overview
+{overview}
+
+## Directory Layout
+{layout}
+
+## Stack
+{stack}
+
+## Workflows
+{workflows}
+
+## Conventions
+- Keep changes small and reversible.
+- Run the test suite before committing.
+- Write docstrings for any new public function.
+
+## Mode-Specific Notes
+Place per-mode overrides in `.gitpilot/AGENTS..md` (for example
+`.gitpilot/AGENTS.coder.md`). Use `@./relative/path.md` on its own line to
+include another markdown file.
+"""
+
+
+def run_init(
+ workspace_path: Path,
+ *,
+ overwrite: bool = False,
+) -> InitReport:
+ """Generate a starter ``AGENTS.md`` for the workspace. Idempotent."""
+ workspace_path = workspace_path.resolve()
+ target = workspace_path / AGENTS_MD
+ if target.exists() and not overwrite:
+ return InitReport(created=False, path=target, skipped_reason="exists")
+
+ info = _scan_workspace(workspace_path)
+
+ stack_bits: List[str] = []
+ if info.get("python"):
+ stack_bits.append("Python")
+ if info.get("node"):
+ stack_bits.append("Node.js")
+ if info.get("docker"):
+ stack_bits.append("Docker")
+ if info.get("compose"):
+ stack_bits.append("docker-compose")
+ stack = ", ".join(stack_bits) or "_unknown — describe here_"
+
+ workflows: List[str] = []
+ if info.get("has_makefile"):
+ workflows.append("- `make install`, `make test`, `make run`")
+ if info.get("node"):
+ workflows.append("- `npm install`, `npm test`")
+ if info.get("python"):
+ workflows.append("- `pip install -e .` and `pytest`")
+ workflows_md = "\n".join(workflows) or "_describe build/test/run commands here_"
+
+ layout = "\n".join(f"- `{e}`" for e in info.get("top_level", [])) or "_workspace empty_"
+ overview = (
+ f"This project has a `{info.get('readme')}` at its root — refer to it for "
+ "purpose and high-level usage."
+ if info.get("readme") else "_describe the project here_"
+ )
+
+ doc = _STARTER_TEMPLATE.format(
+ overview=overview,
+ layout=layout,
+ stack=stack,
+ workflows=workflows_md,
+ )
+
+ target.write_text(doc, encoding="utf-8")
+ return InitReport(
+ created=True,
+ path=target,
+ sections=["Project Overview", "Directory Layout", "Stack", "Workflows", "Conventions"],
+ )
+
+
+def load_for_session(
+ workspace_path: Path,
+ mode_slug: Optional[str] = None,
+) -> str:
+ """Convenience: return the AGENTS.md content (with includes) or ''."""
+ doc = AgentsLoader(workspace_path).load(mode_slug=mode_slug)
+ if doc.is_empty:
+ return ""
+ suffix = ""
+ if doc.truncated:
+ suffix = "\n\n_…AGENTS.md truncated to fit context budget._"
+ return doc.content + suffix
diff --git a/gitpilot/api.py b/gitpilot/api.py
new file mode 100644
index 0000000000000000000000000000000000000000..0107bceadba4a8e11907c7e719066368f234039c
--- /dev/null
+++ b/gitpilot/api.py
@@ -0,0 +1,3906 @@
+# gitpilot/api.py
+
+from pathlib import Path
+from typing import List, Optional
+
+from fastapi import FastAPI, Query, Path as FPath, Header, HTTPException, UploadFile, File
+from fastapi.responses import FileResponse, JSONResponse
+from fastapi.staticfiles import StaticFiles
+from fastapi.middleware.cors import CORSMiddleware
+from pydantic import BaseModel, Field
+
+from .version import __version__
+# Batch P1-D — error-envelope decorator (opt-in via the `error_envelope` flag).
+# Re-exported here so endpoint authors can `@wrap_errors_envelope` without
+# reaching into the implementation module. Importing the symbol is a no-op
+# when the flag is off, so this is fully backwards compatible.
+from .errors import GitPilotError, wrap_errors_envelope # noqa: F401
+from .github_api import (
+ list_user_repos,
+ list_user_repos_paginated, # Pagination support
+ search_user_repos, # Search across all repos
+ get_repo_tree,
+ get_file,
+ put_file,
+ execution_context,
+ github_request,
+)
+from .github_app import check_repo_write_access
+from .settings import AppSettings, get_settings, set_provider, update_settings, autoconfigure_local_provider, LLMProvider
+from .agentic import (
+ generate_plan,
+ execute_plan,
+ generate_plan_lite,
+ execute_plan_lite,
+ PlanResult,
+ get_flow_definition,
+ dispatch_request,
+ create_pr_after_execution,
+)
+from .agent_router import route as route_request
+from . import github_issues
+from . import github_pulls
+from . import github_search
+from .session import SessionManager, Session
+from .hooks import HookManager, HookEvent
+from .permissions import PermissionManager, PermissionMode
+from .memory import MemoryManager
+from .context_vault import ContextVault
+from .use_case import UseCaseManager
+from .mcp_client import MCPClient
+from .plugins import PluginManager
+from .skills import SkillManager
+from .smart_model_router import ModelRouter, ModelRouterConfig
+from .topology_registry import (
+ list_topologies as _list_topologies,
+ get_topology_graph as _get_topology_graph,
+ classify_message as _classify_message,
+ get_saved_topology_preference,
+ save_topology_preference,
+)
+import httpx
+import logging
+from fastapi import HTTPException
+
+logger = logging.getLogger(__name__)
+
+def _is_small_local_model() -> bool:
+ """Detect if the active provider is Ollama/OllaBridge with a model
+ that can't handle multi-agent CrewAI prompts reliably.
+
+ Delegates to agentic._is_incompatible_model (single source of truth)
+ so that /api/chat/plan, /api/chat/execute, and /ws/sessions/ all
+ share the same detection logic.
+ """
+ try:
+ from .agentic import _is_incompatible_model
+ s = autoconfigure_local_provider()
+ return _is_incompatible_model(s)
+ except Exception as exc:
+ logger.debug("[GitPilot] _is_small_local_model check failed: %s", exc)
+ return False
+
+
+def _is_lite_mode_active() -> bool:
+ """Check if Lite Mode should be used.
+
+ Returns True if ANY of:
+ - settings.lite_mode is True (explicit toggle), OR
+ - the saved topology preference is "lite_mode" (selected in flow viewer), OR
+ - the active provider is a small local model that cannot handle
+ multi-agent CrewAI prompts (auto-detected for reliability)
+ """
+ s = autoconfigure_local_provider()
+ if s.lite_mode:
+ return True
+ pref = get_saved_topology_preference()
+ if pref == "lite_mode":
+ return True
+ # Auto-route small local models to lite mode for reliability
+ if _is_small_local_model():
+ logger.info("[GitPilot] Auto-enabling Lite Mode for small local model")
+ return True
+ return False
+# ═════════════════════════════════════════════════════════════════════
+# LAZY IMPORT STRATEGY — Phase 3 heavy modules
+# ═════════════════════════════════════════════════════════════════════
+# agent_teams, learning, cross_repo, predictions, security, nl_database
+# are deferred until first access via _LazyProxy. This saves 200-500ms
+# on WSL cold start (each import triggers disk I/O + pydantic compilation).
+# The proxy pattern means NO code changes are needed at call sites —
+# _agent_team.plan_and_split(...) works identically to the original.
+# NL database types are imported lazily at call site (see /api/nl-db endpoint)
+from .github_oauth import (
+ generate_authorization_url,
+ exchange_code_for_token,
+ validate_token,
+ initiate_device_flow,
+ poll_device_token,
+ AuthSession,
+ GitHubUser,
+)
+import os
+import logging
+from .model_catalog import list_models_for_provider
+
+# Optional A2A adapter (MCP ContextForge)
+from .a2a_adapter import router as a2a_router
+
+logger = logging.getLogger(__name__)
+
+
+class _LazyProxy:
+ """Lazy singleton proxy — instantiates the wrapped class on first attribute access.
+
+ Used to defer heavy imports (agent_teams, learning, cross_repo, etc.) until
+ they're actually needed, reducing backend startup time on slow filesystems
+ (WSL, HF Spaces cold start).
+
+ All attribute access is transparently forwarded to the underlying instance,
+ so existing code like `_agent_team.plan_and_split(...)` works unchanged.
+ """
+
+ def __init__(self, module_path: str, class_name: str) -> None:
+ object.__setattr__(self, "_module_path", module_path)
+ object.__setattr__(self, "_class_name", class_name)
+ object.__setattr__(self, "_instance", None)
+
+ def _get_instance(self) -> object:
+ inst = object.__getattribute__(self, "_instance")
+ if inst is None:
+ import importlib
+ module = importlib.import_module(self._module_path, package=__package__)
+ cls = getattr(module, self._class_name)
+ inst = cls()
+ object.__setattr__(self, "_instance", inst)
+ logger.debug("[LazyProxy] Instantiated %s.%s on first access",
+ self._module_path, self._class_name)
+ return inst
+
+ def __getattr__(self, name: str):
+ return getattr(self._get_instance(), name)
+
+ def __setattr__(self, name: str, value):
+ setattr(self._get_instance(), name, value)
+
+ def __call__(self, *args, **kwargs):
+ return self._get_instance()(*args, **kwargs)
+
+ def __repr__(self) -> str:
+ inst = object.__getattribute__(self, "_instance")
+ if inst is None:
+ return f"<_LazyProxy {self._module_path}.{self._class_name} (not yet loaded)>"
+ return repr(inst)
+
+
+# --- Phase 1 singletons (lightweight, instantiate eagerly) ---
+_session_mgr = SessionManager()
+_hook_mgr = HookManager()
+_perm_mgr = PermissionManager()
+
+# --- Phase 2 singletons (lightweight, instantiate eagerly) ---
+_mcp_client = MCPClient()
+_plugin_mgr = PluginManager()
+_skill_mgr = SkillManager()
+_model_router = ModelRouter()
+
+# --- Phase 3 singletons (HEAVY, lazy-loaded) ---
+# Each of these pulls in several MB of Python code and takes 50-200ms on WSL.
+# Deferred via _LazyProxy until first endpoint call that actually uses them.
+_agent_team = _LazyProxy(".agent_teams", "AgentTeam")
+_learning_engine = _LazyProxy(".learning", "LearningEngine")
+_cross_repo = _LazyProxy(".cross_repo", "CrossRepoAnalyzer")
+_predictive_engine = _LazyProxy(".predictions", "PredictiveEngine")
+_security_scanner = _LazyProxy(".security", "SecurityScanner")
+_nl_engine = _LazyProxy(".nl_database", "NLQueryEngine")
+
+import asyncio as _asyncio
+import signal
+from contextlib import asynccontextmanager
+
+_shutdown_event = _asyncio.Event()
+
+
+@asynccontextmanager
+async def _lifespan(application: FastAPI):
+ """Manage startup (pre-warm) and graceful shutdown."""
+ import time as _time
+
+ _startup_start = _time.monotonic()
+ logger.info("═══════════════════════════════════════════════════")
+ logger.info("🚀 [STARTUP] GitPilot backend initializing...")
+ logger.info("═══════════════════════════════════════════════════")
+
+ # -- Startup: pre-warm CrewAI in background ---
+ async def _warmup():
+ _t0 = _time.monotonic()
+ logger.info("[STARTUP] ⏳ Phase 1/3: Waiting 2s for health endpoint...")
+ await _asyncio.sleep(2)
+
+ _t1 = _time.monotonic()
+ logger.info("[STARTUP] ⏳ Phase 2/3: Importing CrewAI modules...")
+ try:
+ from .agentic import _crewai, _tools # noqa: F811
+ _crewai()
+ _t_crewai = _time.monotonic() - _t1
+ logger.info("[STARTUP] ✅ CrewAI imports complete in %.2fs", _t_crewai)
+
+ _t2 = _time.monotonic()
+ logger.info("[STARTUP] ⏳ Phase 3/3: Loading agent tools...")
+ _tools()
+ _t_tools = _time.monotonic() - _t2
+ logger.info("[STARTUP] ✅ Agent tools loaded in %.2fs", _t_tools)
+
+ _total = _time.monotonic() - _startup_start
+ logger.info("═══════════════════════════════════════════════════")
+ logger.info("[STARTUP] 🎉 Backend fully ready in %.2fs total", _total)
+ logger.info("═══════════════════════════════════════════════════")
+ except Exception as exc:
+ _t_fail = _time.monotonic() - _t1
+ logger.warning(
+ "[STARTUP] ⚠️ CrewAI pre-warm failed after %.2fs (will retry on first request): %s",
+ _t_fail, exc,
+ )
+
+ # Log memory usage after warmup
+ try:
+ import resource
+ rss_mb = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024
+ logger.info("[STARTUP] 📊 Memory after warmup: %.1f MB RSS", rss_mb)
+ except Exception:
+ pass
+
+ _asyncio.create_task(_warmup())
+
+ # -- Graceful shutdown handler ---
+ def _handle_signal(sig, _frame):
+ logger.info("Received %s — initiating graceful shutdown", signal.Signals(sig).name)
+ _shutdown_event.set()
+
+ for sig in (signal.SIGTERM, signal.SIGINT):
+ try:
+ signal.signal(sig, _handle_signal)
+ except (OSError, ValueError):
+ pass # not main thread or unsupported
+
+ _ready_time = _time.monotonic() - _startup_start
+ logger.info(
+ "[STARTUP] ✅ FastAPI ready to accept requests after %.2fs "
+ "(CrewAI warmup continues in background)",
+ _ready_time,
+ )
+
+ yield
+
+ # Cleanup on shutdown
+ logger.info("[SHUTDOWN] GitPilot shutting down gracefully")
+ _shutdown_event.set()
+
+
+app = FastAPI(
+ title="GitPilot API",
+ version=__version__,
+ description="Agentic AI assistant for GitHub repositories.",
+ lifespan=_lifespan,
+)
+
+# ==========================================================================
+# Optional A2A Adapter (MCP ContextForge)
+# ==========================================================================
+# This is feature-flagged and does not affect the existing UI/REST API unless
+# explicitly enabled.
+def _env_bool(name: str, default: bool) -> bool:
+ raw = os.getenv(name)
+ if raw is None:
+ return default
+ return raw.strip().lower() in {"1", "true", "yes", "y", "on"}
+
+
+if _env_bool("GITPILOT_ENABLE_A2A", False):
+ logger.info("A2A adapter enabled (mounting /a2a/* endpoints)")
+ app.include_router(a2a_router)
+else:
+ logger.info("A2A adapter disabled (set GITPILOT_ENABLE_A2A=true to enable)")
+
+# MCP Context Forge admin API (Settings → MCP Servers tab).
+try:
+ from .mcp_admin_api import router as mcp_admin_router
+
+ app.include_router(mcp_admin_router)
+ logger.info("MCP admin API enabled (mounting /api/mcp/* endpoints)")
+except Exception: # noqa: BLE001
+ logger.exception("MCP admin API failed to mount; tab will show as unavailable")
+
+# GitPilot-as-MCP-server (turns GitPilot into an MCP server other agents
+# can drive). Off by default; mount only when GITPILOT_EXPOSE_MCP_SERVER=true.
+try:
+ from .mcp_server import MCPServerConfig as _GPMCPConfig
+
+ _gp_mcp_config = _GPMCPConfig.from_env()
+ if _gp_mcp_config.enabled:
+ from . import mcp_server_bridge as _mcp_server_bridge
+
+ _mcp_server_bridge.mount(app, _gp_mcp_config)
+ logger.info(
+ "GitPilot MCP server enabled (mounting %s)", _gp_mcp_config.mount_path
+ )
+ else:
+ logger.info(
+ "GitPilot MCP server disabled (set GITPILOT_EXPOSE_MCP_SERVER=true to enable)"
+ )
+except Exception: # noqa: BLE001
+ logger.exception("GitPilot MCP server failed to mount; check env config")
+
+# ============================================================================
+# CORS Configuration
+# ============================================================================
+# Enable CORS to allow frontend (local dev or Vercel) to connect to backend
+allowed_origins_str = os.getenv("CORS_ORIGINS", "http://localhost:5173")
+allowed_origins = [origin.strip() for origin in allowed_origins_str.split(",")]
+
+logger.info(f"CORS enabled for origins: {allowed_origins}")
+
+app.add_middleware(
+ CORSMiddleware,
+ allow_origins=allowed_origins,
+ allow_credentials=True,
+ allow_methods=["*"],
+ allow_headers=["*"],
+)
+
+
+# ──────────────────────────────────────────────────────────────────
+# Request timing middleware (logs slow startup requests for debugging)
+# ──────────────────────────────────────────────────────────────────
+@app.middleware("http")
+async def _log_slow_requests(request, call_next):
+ """Log any request that takes >1s to complete, with path and duration.
+
+ This helps diagnose first-load slowness: if /api/status takes 8s on the
+ first call but <100ms afterwards, we know the backend is doing lazy
+ initialization on first request.
+ """
+ import time as _t
+ _start = _t.monotonic()
+ try:
+ response = await call_next(request)
+ except Exception:
+ _elapsed = _t.monotonic() - _start
+ logger.error(
+ "[HTTP] ❌ %s %s failed after %.2fs",
+ request.method, request.url.path, _elapsed,
+ )
+ raise
+
+ _elapsed = _t.monotonic() - _start
+ # Only log slow requests to avoid spam (>1s is slow for health endpoints)
+ if _elapsed > 1.0:
+ logger.warning(
+ "[HTTP] 🐢 %s %s took %.2fs (status=%s)",
+ request.method, request.url.path, _elapsed, response.status_code,
+ )
+ elif _elapsed > 0.5:
+ logger.info(
+ "[HTTP] ⚠️ %s %s took %.2fs (status=%s)",
+ request.method, request.url.path, _elapsed, response.status_code,
+ )
+
+ return response
+
+
+def _project_context_to_text(project_context) -> str:
+ if not project_context:
+ return ""
+
+ parts = []
+ mode = getattr(project_context, "mode", None)
+ repo_name = getattr(project_context, "repoName", None)
+ branch = getattr(project_context, "branch", None)
+ languages = getattr(project_context, "languages", []) or []
+ manifests = getattr(project_context, "manifests", []) or []
+ key_files = getattr(project_context, "keyFiles", []) or []
+ readme_preview = getattr(project_context, "readmePreview", None)
+ tree_summary = getattr(project_context, "treeSummary", []) or []
+
+ if mode:
+ parts.append(f"Mode: {mode}")
+ if repo_name:
+ parts.append(f"Repo: {repo_name}")
+ if branch:
+ parts.append(f"Branch: {branch}")
+ if languages:
+ parts.append("Languages: " + ", ".join(languages[:20]))
+ if manifests:
+ parts.append("Manifests: " + ", ".join(manifests[:20]))
+ if key_files:
+ parts.append("Key files: " + ", ".join(key_files[:30]))
+
+ if tree_summary:
+ rendered = []
+ for entry in tree_summary[:200]:
+ if isinstance(entry, dict):
+ rendered.append(f"- {entry.get('type', 'file')}: {entry.get('path', '')}")
+ if rendered:
+ parts.append("Project tree:\n" + "\n".join(rendered))
+
+ if readme_preview:
+ parts.append("README preview:\n" + readme_preview)
+
+ return "\n".join(parts)
+
+
+def _working_set_to_text(working_set) -> str:
+ if not working_set:
+ return ""
+
+ parts = []
+ current_file = getattr(working_set, "currentFile", None)
+ language_id = getattr(working_set, "languageId", None)
+ current_selection = getattr(working_set, "currentSelection", None)
+ open_tabs = getattr(working_set, "openTabs", []) or []
+ related_files = getattr(working_set, "relatedFiles", []) or []
+
+ if current_file:
+ parts.append(f"Current file: {current_file}")
+ if language_id:
+ parts.append(f"Language: {language_id}")
+ if open_tabs:
+ parts.append("Open tabs: " + ", ".join(open_tabs[:12]))
+ if related_files:
+ parts.append("Related files: " + ", ".join(related_files[:12]))
+ if current_selection:
+ parts.append("Selected code:\n```\n" + current_selection + "\n```")
+
+ return "\n".join(parts)
+
+
+def _sanitize_relative_path(p: str) -> str | None:
+ """Reject absolute paths, .. traversal, drive letters, and empty strings.
+
+ Also strips LLM artifacts like "three_backticks_space" that some models
+ produce instead of actual backtick characters.
+ """
+ import os
+ import re as _re
+ p = p.strip().strip("`\"'").strip()
+ # Strip common LLM artifacts
+ # Strip literal descriptions LLMs produce instead of actual backtick chars
+ p = _re.sub(r"(?i)three[\s_+]*backtick[s]?[\s_+]*space[\s_+]*", "", p)
+ p = _re.sub(r"(?i)three[\s_+]*\+[\s_+]*markdown[\s_+]*\+[\s_+]*space[\s_+]*\+?\s*", "", p)
+ p = _re.sub(r"(?i)backtick[s]?[\s_+]*", "", p)
+ p = _re.sub(r"(?i)triple[\s_+]*backtick[s]?[\s_+]*", "", p)
+ p = _re.sub(r"(?i)fenced?[\s_+]*code[\s_+]*block[\s_+]*", "", p)
+ p = p.strip()
+ if not p:
+ return None
+ # Reject absolute / drive / UNC paths
+ if os.path.isabs(p) or p.startswith("\\\\") or (len(p) >= 2 and p[1] == ":"):
+ return None
+ # Reject parent traversal
+ parts = p.replace("\\", "/").split("/")
+ if ".." in parts:
+ return None
+ # Normalise to forward slashes
+ return "/".join(parts)
+
+
+def _extract_edits_from_answer(answer: str) -> list[dict]:
+ """Extract structured ProposedEdit objects from LLM markdown answers.
+
+ Parses fenced code blocks where the filename appears on the opening
+ fence line (e.g. ```python hello.py) — the format we instruct the
+ LLM to use in _build_local_repo_aware_prompt.
+
+ Falls back to matching "save as " / "create file "
+ patterns paired with the nearest code block.
+
+ Returns a list of dicts matching the ProposedEdit schema:
+ [{"file": "hello.py", "kind": "create", "content": "...", "summary": "..."}]
+ """
+ import re
+
+ edits: list[dict] = []
+ seen_paths: set[str] = set()
+ if not answer:
+ return edits
+
+ def _add(raw_path: str, content: str) -> None:
+ path = _sanitize_relative_path(raw_path)
+ if not path or path in seen_paths:
+ return
+ seen_paths.add(path)
+ edits.append({
+ "file": path,
+ "kind": "create",
+ "content": content.rstrip(),
+ "summary": f"Create {path}",
+ })
+
+ # Pattern 1 (preferred): ```lang filepath\n...code...\n```
+ blocks = re.findall(
+ r"```(?:\w+)?\s+([^\n`]+?\.\w+)\s*\n(.*?)```",
+ answer,
+ re.DOTALL,
+ )
+ for filepath, content in blocks:
+ _add(filepath, content)
+
+ if edits:
+ return edits
+
+ # Pattern 2: non-standard format some LLMs produce
+ # "```\npython filepath\n---\n...code...\n---\n```"
+ # or just "python filepath\n---\n...code...\n" outside fences
+ dash_blocks = re.findall(
+ r"(?:```\n?)?(\w+)\s+([^\n]+?\.\w+)\s*\n-{3,}\n(.*?)\n-{3,}",
+ answer,
+ re.DOTALL,
+ )
+ for _lang, filepath, content in dash_blocks:
+ _add(filepath, content)
+
+ if edits:
+ return edits
+
+ # Pattern 3: "save this as `filename`" / "create a file called `filename`"
+ # followed by a code block
+ file_mentions = re.findall(
+ r"(?:save\s+(?:this\s+)?(?:as|to|in)|create\s+(?:a\s+)?(?:file\s+)?(?:called|named)?)\s+[`\"']?([^\s`\"']+\.\w+)[`\"']?",
+ answer,
+ re.IGNORECASE,
+ )
+ code_blocks = re.findall(r"```\w*\n(.*?)```", answer, re.DOTALL)
+
+ if file_mentions and code_blocks:
+ for filename, content in zip(file_mentions, code_blocks):
+ _add(filename, content)
+
+ return edits
+
+
+def _build_local_repo_aware_prompt(req, session) -> str:
+ task_summary = getattr(getattr(req, "task_context", None), "summary", None)
+
+ # System instructions — the file-output format uses triple-backtick
+ # fences with the filepath on the opening line. We use a raw block
+ # to avoid confusion when the prompt is joined with --- separators.
+ system_block = (
+ "You are GitPilot, a multi-agent AI coding assistant running in VS Code.\n"
+ "Use the supplied repository metadata, working-set context, and user request to answer precisely.\n"
+ "\n"
+ "IMPORTANT FILE OUTPUT FORMAT:\n"
+ "When you create or edit files, you MUST use triple-backtick fenced code blocks\n"
+ "with the language AND the file path on the SAME opening line.\n"
+ "\n"
+ "Correct format (you MUST follow this exactly):\n"
+ "\n"
+ " ```python hello.py\n"
+ " print('Hello, World!')\n"
+ " ```\n"
+ "\n"
+ " ```typescript src/utils/validate.ts\n"
+ " export function validate(input: string): boolean {\n"
+ " return input.length > 0;\n"
+ " }\n"
+ " ```\n"
+ "\n"
+ "Rules:\n"
+ "- The opening fence MUST be triple backticks followed by the language then the filepath.\n"
+ "- The closing fence MUST be triple backticks on their own line.\n"
+ "- Do NOT use --- separators or any other format.\n"
+ "- Output the COMPLETE file content, not just a snippet.\n"
+ "- For edits to existing files, output the full updated file.\n"
+ "- Be explicit about which files to create or modify and why.\n"
+ "- Prefer incremental, production-safe changes over large rewrites."
+ )
+
+ sections = [system_block]
+
+ session_lines = [
+ f"Session mode: {getattr(session, 'mode', None)}",
+ f"Folder path: {getattr(session, 'folder_path', None)}",
+ f"Repo root: {getattr(session, 'repo_root', None)}",
+ f"Branch: {getattr(session, 'branch', None)}",
+ ]
+
+ valid_session_lines = [
+ line for line in session_lines if line and not line.endswith(": None")
+ ]
+ if valid_session_lines:
+ sections.append("Session context:\n" + "\n".join(valid_session_lines))
+
+ project_txt = _project_context_to_text(getattr(req, "project_context", None))
+ if project_txt:
+ sections.append("Project context:\n" + project_txt)
+
+ working_txt = _working_set_to_text(getattr(req, "working_set", None))
+ if working_txt:
+ sections.append("Working set:\n" + working_txt)
+
+ if task_summary:
+ sections.append("Task context:\n" + task_summary)
+
+ sections.append("User request:\n" + req.message)
+
+ return "\n\n---\n\n".join(sections)
+
+def get_github_token(authorization: Optional[str] = Header(None)) -> Optional[str]:
+ """
+ Extract GitHub token from Authorization header.
+
+ Supports formats:
+ - Bearer
+ - token
+ -
+ """
+ if not authorization:
+ return None
+
+ if authorization.startswith("Bearer "):
+ return authorization[7:]
+ elif authorization.startswith("token "):
+ return authorization[6:]
+ else:
+ return authorization
+
+
+# --- FIXED: Added default_branch to model ---
+class RepoSummary(BaseModel):
+ id: int
+ name: str
+ full_name: str
+ private: bool
+ owner: str
+ default_branch: str = "main" # <--- CRITICAL FIX: Defaults to main, but can be master/dev
+
+
+class PaginatedReposResponse(BaseModel):
+ """Response model for paginated repository listing."""
+ repositories: List[RepoSummary]
+ page: int
+ per_page: int
+ total_count: Optional[int] = None
+ has_more: bool
+ query: Optional[str] = None
+
+
+class FileEntry(BaseModel):
+ path: str
+ type: str
+
+
+class FileTreeResponse(BaseModel):
+ files: List[FileEntry] = Field(default_factory=list)
+
+
+class FileContent(BaseModel):
+ path: str
+ encoding: str = "utf-8"
+ content: str
+
+
+class CommitRequest(BaseModel):
+ path: str
+ content: str
+ message: str
+
+
+class CommitResponse(BaseModel):
+ path: str
+ commit_sha: str
+ commit_url: Optional[str] = None
+
+
+class SettingsResponse(BaseModel):
+ provider: LLMProvider
+ providers: List[LLMProvider]
+ openai: dict
+ claude: dict
+ watsonx: dict
+ ollama: dict
+ ollabridge: dict
+ langflow_url: str
+ has_langflow_plan_flow: bool
+
+
+class ProviderModelsResponse(BaseModel):
+ provider: LLMProvider
+ models: List[str] = Field(default_factory=list)
+ error: Optional[str] = None
+
+
+class ProviderUpdate(BaseModel):
+ provider: LLMProvider
+
+
+class ChatPlanRequest(BaseModel):
+ repo_owner: str
+ repo_name: str
+ goal: str
+ branch_name: Optional[str] = None
+
+
+class ExecutePlanRequest(BaseModel):
+ repo_owner: str
+ repo_name: str
+ plan: PlanResult
+ branch_name: Optional[str] = None
+
+
+class AuthUrlResponse(BaseModel):
+ authorization_url: str
+ state: str
+
+
+class AuthCallbackRequest(BaseModel):
+ code: str
+ state: str
+
+
+class TokenValidationRequest(BaseModel):
+ access_token: str
+
+
+class UserInfoResponse(BaseModel):
+ user: GitHubUser
+ authenticated: bool
+
+
+class RepoAccessResponse(BaseModel):
+ can_write: bool
+ app_installed: bool
+ auth_type: str
+
+
+# --- v2 Request/Response models ---
+
+class ChatRequest(BaseModel):
+ """Unified chat request for the conversational dispatcher."""
+ repo_owner: str
+ repo_name: str
+ message: str
+ branch_name: Optional[str] = None
+ auto_pr: bool = False
+ topology_id: Optional[str] = None # Override topology for this request
+
+
+class IssueCreateRequest(BaseModel):
+ title: str
+ body: Optional[str] = None
+ labels: Optional[List[str]] = None
+ assignees: Optional[List[str]] = None
+ milestone: Optional[int] = None
+
+
+class IssueUpdateRequest(BaseModel):
+ title: Optional[str] = None
+ body: Optional[str] = None
+ state: Optional[str] = None
+ labels: Optional[List[str]] = None
+ assignees: Optional[List[str]] = None
+ milestone: Optional[int] = None
+
+
+class IssueCommentRequest(BaseModel):
+ body: str
+
+
+class PRCreateRequest(BaseModel):
+ title: str
+ head: str
+ base: str
+ body: Optional[str] = None
+ draft: bool = False
+
+
+class PRMergeRequest(BaseModel):
+ merge_method: str = "merge"
+ commit_title: Optional[str] = None
+ commit_message: Optional[str] = None
+
+
+class SearchRequest(BaseModel):
+ query: str
+ per_page: int = 30
+ page: int = 1
+
+
+# ============================================================================
+# Repository Endpoints - Enterprise Grade with Pagination & Search
+# ============================================================================
+
+@app.get("/api/repos", response_model=PaginatedReposResponse)
+async def api_list_repos(
+ query: str | None = Query(None, description="Search query"),
+ page: int = Query(1, ge=1),
+ per_page: int = Query(100, ge=1, le=100),
+ authorization: str | None = Header(None),
+):
+ token = get_github_token(authorization)
+
+ try:
+ if query:
+ result = await search_user_repos(
+ query=query,
+ page=page,
+ per_page=per_page,
+ token=token,
+ )
+ else:
+ result = await list_user_repos_paginated(
+ page=page,
+ per_page=per_page,
+ token=token,
+ )
+
+ repos = [
+ RepoSummary(
+ id=r["id"],
+ name=r["name"],
+ full_name=r["full_name"],
+ private=r["private"],
+ owner=r["owner"],
+ default_branch=r.get("default_branch", "main"),
+ )
+ for r in result["repositories"]
+ ]
+
+ return PaginatedReposResponse(
+ repositories=repos,
+ page=result["page"],
+ per_page=result["per_page"],
+ total_count=result.get("total_count"),
+ has_more=result["has_more"],
+ query=query,
+ )
+
+ except httpx.ConnectTimeout:
+ logger.exception("GitHub connection timed out while fetching repositories")
+ raise HTTPException(
+ status_code=504,
+ detail="Timed out while connecting to GitHub. Please try again."
+ )
+
+ except httpx.TimeoutException:
+ logger.exception("GitHub request timed out while fetching repositories")
+ raise HTTPException(
+ status_code=504,
+ detail="GitHub request timed out. Please try again."
+ )
+
+ except httpx.HTTPError as e:
+ logger.exception("GitHub HTTP error while fetching repositories")
+ raise HTTPException(
+ status_code=502,
+ detail=f"Failed to contact GitHub: {str(e)}"
+ )
+
+ except Exception as e:
+ logger.exception("Error fetching repositories")
+ raise HTTPException(
+ status_code=500,
+ detail=f"Unexpected error fetching repositories: {str(e)}"
+ )
+
+@app.get("/api/repos/all")
+async def api_list_all_repos(
+ query: Optional[str] = Query(None, description="Search query"),
+ authorization: Optional[str] = Header(None),
+):
+ """
+ Fetch ALL user repositories at once (no pagination).
+ Useful for quick searches, but paginated endpoint is preferred.
+ """
+ token = get_github_token(authorization)
+
+ try:
+ # Fetch all repositories (this will make multiple API calls)
+ all_repos = []
+ page = 1
+ max_pages = 15 # Safety limit: 1500 repos max (15 * 100)
+
+ while page <= max_pages:
+ result = await list_user_repos_paginated(
+ page=page,
+ per_page=100,
+ token=token
+ )
+
+ all_repos.extend(result["repositories"])
+
+ if not result["has_more"]:
+ break
+
+ page += 1
+
+ # Filter by query if provided
+ if query:
+ query_lower = query.lower()
+ all_repos = [
+ r for r in all_repos
+ if query_lower in r["name"].lower() or query_lower in r["full_name"].lower()
+ ]
+
+ # --- FIXED: Mapping default_branch ---
+ repos = [
+ RepoSummary(
+ id=r["id"],
+ name=r["name"],
+ full_name=r["full_name"],
+ private=r["private"],
+ owner=r["owner"],
+ default_branch=r.get("default_branch", "main"), # <--- CRITICAL FIX
+ )
+ for r in all_repos
+ ]
+
+ return {
+ "repositories": repos,
+ "total_count": len(repos),
+ "query": query,
+ }
+
+ except Exception as e:
+ logging.exception("Error fetching all repositories")
+ return JSONResponse(
+ content={"error": f"Failed to fetch repositories: {str(e)}"},
+ status_code=500
+ )
+
+
+@app.get("/api/repos/{owner}/{repo}/tree", response_model=FileTreeResponse)
+async def api_repo_tree(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ ref: Optional[str] = Query(
+ None,
+ description="Git reference (branch, tag, or commit SHA). If omitted, defaults to HEAD.",
+ ),
+ authorization: Optional[str] = Header(None),
+):
+ """
+ Get the file tree for a repository.
+ Handles 'main' vs 'master' discrepancies and empty repositories gracefully.
+ """
+ token = get_github_token(authorization)
+
+ # Keep legacy behavior: missing/empty ref behaves like HEAD.
+ ref_value = (ref or "").strip() or "HEAD"
+
+ try:
+ tree = await get_repo_tree(owner, repo, token=token, ref=ref_value)
+ return FileTreeResponse(files=[FileEntry(**f) for f in tree])
+
+ except HTTPException as e:
+ if e.status_code == 409:
+ return FileTreeResponse(files=[])
+
+ if e.status_code == 404:
+ return JSONResponse(
+ status_code=404,
+ content={
+ "detail": f"Ref '{ref_value}' not found. The repository might be using a different default branch (e.g., 'master')."
+ }
+ )
+
+ raise e
+
+
+@app.get("/api/repos/{owner}/{repo}/file", response_model=FileContent)
+async def api_get_file(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ path: str = Query(...),
+ authorization: Optional[str] = Header(None),
+):
+ token = get_github_token(authorization)
+ content = await get_file(owner, repo, path, token=token)
+ return FileContent(path=path, content=content)
+
+
+@app.post("/api/repos/{owner}/{repo}/file", response_model=CommitResponse)
+async def api_put_file(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ payload: CommitRequest = ...,
+ authorization: Optional[str] = Header(None),
+):
+ token = get_github_token(authorization)
+ result = await put_file(
+ owner, repo, payload.path, payload.content, payload.message, token=token
+ )
+ return CommitResponse(**result)
+
+
+# ============================================================================
+# Settings Endpoints
+# ============================================================================
+
+def settings_response_from(s: AppSettings) -> SettingsResponse:
+ return SettingsResponse(
+ provider=s.provider,
+ providers=[
+ LLMProvider.openai,
+ LLMProvider.claude,
+ LLMProvider.watsonx,
+ LLMProvider.ollama,
+ LLMProvider.ollabridge,
+ ],
+ openai=s.openai.model_dump(),
+ claude=s.claude.model_dump(),
+ watsonx=s.watsonx.model_dump(),
+ ollama=s.ollama.model_dump(),
+ ollabridge=s.ollabridge.model_dump(),
+ langflow_url=s.langflow_url,
+ has_langflow_plan_flow=bool(s.langflow_plan_flow_id),
+ )
+
+
+@app.get("/api/settings", response_model=SettingsResponse)
+async def api_get_settings():
+ """
+ Fast path:
+ Return persisted settings immediately without probing providers/models.
+
+ This keeps the Admin / LLM Settings page fast on first render.
+ """
+ s: AppSettings = get_settings()
+ return settings_response_from(s)
+
+
+@app.post("/api/settings/bootstrap", response_model=SettingsResponse)
+async def api_bootstrap_settings():
+ """
+ Slow path:
+ Perform local provider/model auto-configuration explicitly.
+
+ This can be called after the page renders, or on startup, without blocking
+ the first settings paint.
+ """
+ s: AppSettings = autoconfigure_local_provider()
+ return settings_response_from(s)
+
+
+@app.get("/api/settings/models", response_model=ProviderModelsResponse)
+async def api_list_models(provider: Optional[LLMProvider] = Query(None)):
+ """
+ Return the list of LLM models available for a provider.
+
+ If 'provider' is not given, use the currently active provider from settings.
+ """
+ s: AppSettings = get_settings()
+ effective_provider = provider or s.provider
+
+ models, error = list_models_for_provider(effective_provider, s)
+
+ return ProviderModelsResponse(
+ provider=effective_provider,
+ models=models,
+ error=error,
+ )
+
+
+@app.post("/api/settings/provider", response_model=SettingsResponse)
+async def api_set_provider(update: ProviderUpdate):
+ """
+ Provider changes may legitimately trigger local bootstrap, but only when
+ switching to local providers.
+ """
+ s = set_provider(update.provider)
+
+ if s.provider in (LLMProvider.ollama, LLMProvider.ollabridge):
+ s = autoconfigure_local_provider(force=True)
+
+ return settings_response_from(s)
+
+
+@app.put("/api/settings/llm", response_model=SettingsResponse)
+async def api_update_llm_settings(updates: dict):
+ """
+ Update full LLM settings including provider-specific configs.
+
+ Important:
+ - Do NOT auto-probe providers here on every save.
+ - Saving should be fast and deterministic.
+ """
+ s = update_settings(updates)
+ return settings_response_from(s)
+
+ """Update full LLM settings including provider-specific configs."""
+ s = update_settings(updates)
+ s = autoconfigure_local_provider()
+ return SettingsResponse(
+ provider=s.provider,
+ providers=[LLMProvider.openai, LLMProvider.claude, LLMProvider.watsonx, LLMProvider.ollama, LLMProvider.ollabridge],
+ openai=s.openai.model_dump(),
+ claude=s.claude.model_dump(),
+ watsonx=s.watsonx.model_dump(),
+ ollama=s.ollama.model_dump(),
+ ollabridge=s.ollabridge.model_dump(),
+ langflow_url=s.langflow_url,
+ has_langflow_plan_flow=bool(s.langflow_plan_flow_id),
+ )
+
+
+# ============================================================================
+# Context-window meter
+# ============================================================================
+
+@app.get("/api/context/usage")
+async def api_context_usage(session_id: Optional[str] = Query(None)):
+ """Return a snapshot of the active model's context-window utilisation.
+
+ When ``session_id`` is supplied, the ``messages`` row reflects the
+ real token total of that session's persisted conversation. Without
+ it the row is 0 and the popover shows the structure-only view (still
+ useful: tool schemas + system prompt + reserved are all populated).
+ """
+ from . import flags
+ from .context_meter import (
+ FLAG_CONTEXT_METER,
+ build_usage,
+ count_messages_tokens,
+ count_system_prompt_tokens,
+ count_tool_schema_tokens,
+ )
+
+ if not flags.is_on(FLAG_CONTEXT_METER, default=True):
+ raise HTTPException(status_code=404, detail="Context meter is disabled")
+
+ s: AppSettings = get_settings()
+ lite_mode = _is_lite_mode_active()
+
+ # Tool count + tool-schema tokens — best-effort, lazy import so we
+ # don't pay the agent-tools cost on a settings-only client. In lite
+ # mode the planner doesn't see tools at all, so we report zero.
+ tool_count = 0
+ tool_lists: list[list[object]] = []
+ if not lite_mode:
+ try:
+ from .agentic import _tools
+
+ t = _tools()
+ for key in (
+ "REPOSITORY_TOOLS",
+ "WRITE_TOOLS",
+ "ISSUE_TOOLS",
+ "PR_TOOLS",
+ "SEARCH_TOOLS",
+ "LOCAL_TOOLS",
+ ):
+ group = t.get(key) or []
+ tool_lists.append(list(group))
+ tool_count += len(group)
+ except Exception as exc: # pragma: no cover - defensive
+ logger.debug("[context-meter] tool count unavailable: %s", exc)
+
+ tool_schema_tokens = count_tool_schema_tokens(tool_lists) if tool_lists else 0
+ system_prompt_tokens = count_system_prompt_tokens(lite_mode=lite_mode)
+
+ # Conversation messages — only when the caller passes a session_id.
+ # Failure to load is silent: the popover stays useful with messages=0
+ # rather than erroring on a freshly-created session.
+ messages_tokens = 0
+ if session_id:
+ try:
+ session = _session_mgr.load(session_id)
+ messages_tokens = count_messages_tokens(session.messages)
+ except Exception as exc:
+ logger.debug(
+ "[context-meter] session %s not loadable: %s", session_id, exc
+ )
+
+ # Repo context summary is computed fresh per plan and not cached
+ # per-session, so we leave the row at 0. When we add per-session
+ # caching (planned), populate this from the cache.
+ breakdown = {
+ "messages": messages_tokens,
+ "system_prompt": system_prompt_tokens,
+ "repo_context": 0,
+ "tool_schemas": tool_schema_tokens,
+ }
+
+ usage = build_usage(
+ s,
+ breakdown=breakdown,
+ tool_count=tool_count,
+ lite_mode=lite_mode,
+ )
+ return usage.to_dict()
+
+
+# ============================================================================
+# Chat Endpoints
+# ============================================================================
+
+@app.post("/api/chat/plan")
+async def api_chat_plan(req: ChatPlanRequest, authorization: Optional[str] = Header(None)):
+ token = get_github_token(authorization)
+
+ logger.info(
+ "PLAN REQUEST: %s/%s | branch_name=%r",
+ req.repo_owner,
+ req.repo_name,
+ req.branch_name,
+ )
+
+ with execution_context(token, ref=req.branch_name):
+ full_name = f"{req.repo_owner}/{req.repo_name}"
+
+ # Use lite planner when Lite Mode is active (setting OR topology)
+ planner = generate_plan_lite if _is_lite_mode_active() else generate_plan
+
+ try:
+ plan = await planner(req.goal, full_name, token=token, branch_name=req.branch_name)
+ return plan
+ except Exception as exc:
+ error_msg = str(exc)
+
+ # ── Quota / rate-limit detection ────────────────
+ _quota_keywords = [
+ "insufficient_quota", "exceeded your current quota",
+ "rate_limit_exceeded", "429",
+ "billing", "plan and billing",
+ ]
+ _is_quota = any(kw in error_msg.lower() for kw in _quota_keywords)
+ if _is_quota:
+ logger.warning("[GitPilot] LLM quota/rate-limit error: %s", error_msg)
+ raise HTTPException(
+ status_code=429,
+ detail=(
+ "Your LLM provider credits have been exhausted or you've hit "
+ "a rate limit. Please check your plan and billing details at "
+ "your provider's dashboard, or switch to a different provider "
+ "in Settings (e.g. Ollama or OllaBridge for free local models)."
+ ),
+ ) from exc
+
+ # ── Empty/invalid LLM response (small model can't follow ReAct) ─
+ _empty_llm_errors = (
+ "No valid task outputs",
+ "Invalid response from LLM call",
+ "None or empty",
+ )
+ if any(kw in error_msg for kw in _empty_llm_errors):
+ logger.warning(
+ "[GitPilot] LLM returned empty/invalid response — "
+ "model may be too small for multi-agent CrewAI prompts: %s",
+ error_msg,
+ )
+ raise HTTPException(
+ status_code=502,
+ detail=(
+ "The LLM could not complete the multi-agent reasoning. "
+ "This usually happens with small local models "
+ "(qwen2.5:0.5b, tinyllama, phi3:mini, etc.) that struggle "
+ "with the ReAct format. Solutions:\n"
+ "• Switch to a larger model (llama3, qwen2.5:7b, mistral)\n"
+ "• Enable Lite Mode in Settings for simpler prompts\n"
+ "• Use a cloud provider (OpenAI, Claude) for complex tasks"
+ ),
+ ) from exc
+
+ # ── Structured-output parse failure (common with small models) ─
+ # New markers match the friendly RuntimeError surfaces we
+ # raise in gitpilot/agentic.py::generate_plan for refusal /
+ # ValidationError / tool-loop hallucination paths. Catching
+ # them here routes the user to the single-agent Lite planner
+ # automatically — much better than the previous outcome where
+ # those RuntimeErrors leaked through as raw HTTP 500.
+ _plan_parse_markers = (
+ "validation error for planresult",
+ "json_invalid",
+ "invalid json: key must be a string",
+ "did not return a valid plan structure",
+ "did not return a usable result",
+ "the planner refused to produce a plan",
+ "the planner produced paths that do not match",
+ )
+ if any(marker in error_msg.lower() for marker in _plan_parse_markers):
+ logger.warning(
+ "[GitPilot] Planner returned malformed structured output. "
+ "Falling back to Lite planner. Error: %s",
+ error_msg,
+ )
+ try:
+ return await generate_plan_lite(
+ req.goal,
+ full_name,
+ token=token,
+ branch_name=req.branch_name,
+ )
+ except Exception as lite_exc:
+ logger.exception(
+ "[GitPilot] Lite planner fallback also failed after parse error: %s",
+ lite_exc,
+ )
+ # Surface a clear 502 with actionable guidance rather
+ # than leaking the raw RuntimeError as a generic 500.
+ raise HTTPException(
+ status_code=502,
+ detail=(
+ "The planner couldn't produce a usable plan even "
+ "with the simplified Lite-mode fallback. This is "
+ "almost always a small-model issue — the LLM is "
+ "looping on tool calls or losing its instruction "
+ "format mid-task. Solutions:\n"
+ "• Switch to a larger Ollama model (llama3.1:8b → "
+ "llama3.1:70b, qwen2.5:14b+, mistral)\n"
+ "• Use a cloud provider (OpenAI, Claude) for "
+ "complex multi-step tasks\n"
+ "• Try simplifying the request (one file at a time)"
+ ),
+ ) from lite_exc
+
+ # Anything else — surface a clean 500 with a clear message
+ # so the UI's existing error handler renders something
+ # actionable instead of a bare "Internal Server Error".
+ logger.exception("[GitPilot] /api/chat/plan failed: %s", error_msg)
+ raise HTTPException(
+ status_code=500,
+ detail=error_msg or "Plan generation failed.",
+ ) from exc
+
+
+@app.post("/api/chat/execute")
+async def api_chat_execute(
+ req: ExecutePlanRequest,
+ authorization: Optional[str] = Header(None)
+):
+ token = get_github_token(authorization)
+
+ with execution_context(token, ref=req.branch_name):
+ full_name = f"{req.repo_owner}/{req.repo_name}"
+ executor = execute_plan_lite if _is_lite_mode_active() else execute_plan
+ try:
+ result = await executor(
+ req.plan, full_name, token=token, branch_name=req.branch_name
+ )
+ except Exception as exc:
+ error_msg = str(exc)
+ _quota_keywords = [
+ "insufficient_quota", "exceeded your current quota",
+ "rate_limit_exceeded", "429", "billing",
+ ]
+ if any(kw in error_msg.lower() for kw in _quota_keywords):
+ raise HTTPException(
+ status_code=429,
+ detail=(
+ "Your LLM provider credits have been exhausted or you've hit "
+ "a rate limit. Please check your plan and billing details, "
+ "or switch to a free local provider in Settings."
+ ),
+ ) from exc
+ _empty_llm_errors = (
+ "No valid task outputs",
+ "Invalid response from LLM call",
+ "None or empty",
+ )
+ if any(kw in error_msg for kw in _empty_llm_errors):
+ raise HTTPException(
+ status_code=502,
+ detail=(
+ "The LLM could not complete the task. This usually happens "
+ "with small local models (qwen2.5:0.5b, tinyllama, phi3:mini). "
+ "Try a larger model (llama3, qwen2.5:7b), enable Lite Mode "
+ "in Settings, or use a cloud provider."
+ ),
+ ) from exc
+ if isinstance(exc, TimeoutError) or "timed out" in error_msg.lower():
+ raise HTTPException(
+ status_code=504,
+ detail=(
+ "The agent operation timed out. The LLM provider may be "
+ "overloaded. Try again or switch to a faster provider."
+ ),
+ ) from exc
+ if "circuit breaker" in error_msg.lower():
+ raise HTTPException(
+ status_code=503,
+ detail=(
+ "The LLM provider is temporarily unavailable after repeated "
+ "failures. Please wait and try again shortly."
+ ),
+ ) from exc
+ raise
+ if isinstance(result, dict):
+ result.setdefault(
+ "mode",
+ "sticky" if req.branch_name else "hard-switch",
+ )
+ return result
+
+
+@app.get("/api/flow/current")
+async def api_get_flow(topology: Optional[str] = Query(None)):
+ """Return the agent flow definition as a graph.
+
+ If ``topology`` query param is provided, returns the graph for that
+ topology. Otherwise falls back to the user's saved preference, and
+ finally to the legacy ``get_flow_definition()`` output for full
+ backward compatibility.
+ """
+ tid = topology or get_saved_topology_preference()
+ if tid:
+ return _get_topology_graph(tid)
+ # Legacy path — returns the original hardcoded graph
+ flow = await get_flow_definition()
+ return flow
+
+
+# ============================================================================
+# Topology Registry Endpoints (additive — no existing behaviour changed)
+# ============================================================================
+
+@app.get("/api/flow/topologies")
+async def api_list_topologies():
+ """Return lightweight summaries of all available topology presets."""
+ return _list_topologies()
+
+
+@app.get("/api/flow/topology/{topology_id}")
+async def api_get_topology(topology_id: str):
+ """Return the full flow graph for a specific topology."""
+ return _get_topology_graph(topology_id)
+
+
+class ClassifyRequest(BaseModel):
+ message: str
+
+
+@app.post("/api/flow/classify")
+async def api_classify_message(req: ClassifyRequest):
+ """Auto-detect the best topology for a given user message.
+
+ Returns the recommended topology, confidence score, and up to 4
+ alternatives ranked by relevance.
+ """
+ result = _classify_message(req.message)
+ return result.to_dict()
+
+
+class TopologyPrefRequest(BaseModel):
+ topology: str
+
+
+@app.get("/api/settings/topology")
+async def api_get_topology_pref():
+ """Return the user's saved topology preference (or null)."""
+ pref = get_saved_topology_preference()
+ return {"topology": pref}
+
+
+@app.post("/api/settings/topology")
+async def api_set_topology_pref(req: TopologyPrefRequest):
+ """Save the user's preferred topology."""
+ save_topology_preference(req.topology)
+ return {"status": "ok", "topology": req.topology}
+
+
+# ============================================================================
+# Conversational Chat Endpoint (v2 upgrade)
+# ============================================================================
+
+@app.post("/api/chat/message")
+async def api_chat_message(req: ChatRequest, authorization: Optional[str] = Header(None)):
+ """
+ Unified conversational endpoint. The router analyses the message and
+ dispatches to the appropriate agent (issue, PR, search, review, learning,
+ or the existing plan+execute pipeline).
+ """
+ token = get_github_token(authorization)
+
+ logger.info(
+ "CHAT MESSAGE: %s/%s | message=%r | branch=%r",
+ req.repo_owner,
+ req.repo_name,
+ req.message[:80],
+ req.branch_name,
+ )
+
+ with execution_context(token, ref=req.branch_name):
+ full_name = f"{req.repo_owner}/{req.repo_name}"
+ try:
+ result = await dispatch_request(
+ req.message, full_name, token=token, branch_name=req.branch_name,
+ topology_id=req.topology_id,
+ )
+ except Exception as exc:
+ error_msg = str(exc)
+ _quota_keywords = [
+ "insufficient_quota", "exceeded your current quota",
+ "rate_limit_exceeded", "429", "billing",
+ ]
+ if any(kw in error_msg.lower() for kw in _quota_keywords):
+ raise HTTPException(
+ status_code=429,
+ detail=(
+ "Your LLM provider credits have been exhausted or you've hit "
+ "a rate limit. Please check your plan and billing details, "
+ "or switch to a free local provider in Settings."
+ ),
+ ) from exc
+ _empty_llm_errors = (
+ "No valid task outputs",
+ "Invalid response from LLM call",
+ "None or empty",
+ )
+ if any(kw in error_msg for kw in _empty_llm_errors):
+ raise HTTPException(
+ status_code=502,
+ detail=(
+ "The LLM could not complete the task. This usually happens "
+ "with small local models (qwen2.5:0.5b, tinyllama, phi3:mini). "
+ "Try a larger model (llama3, qwen2.5:7b), enable Lite Mode "
+ "in Settings, or use a cloud provider."
+ ),
+ ) from exc
+ if isinstance(exc, TimeoutError) or "timed out" in error_msg.lower():
+ raise HTTPException(
+ status_code=504,
+ detail=(
+ "The agent operation timed out. The LLM provider may be "
+ "overloaded. Try again or switch to a faster provider."
+ ),
+ ) from exc
+ if "circuit breaker" in error_msg.lower():
+ raise HTTPException(
+ status_code=503,
+ detail=(
+ "The LLM provider is temporarily unavailable after repeated "
+ "failures. Please wait and try again shortly."
+ ),
+ ) from exc
+ raise
+
+ # If auto_pr is requested and execution completed, create PR
+ if (
+ req.auto_pr
+ and isinstance(result, dict)
+ and result.get("category") == "plan_execute"
+ and result.get("plan")
+ ):
+ result["auto_pr_hint"] = (
+ "Plan generated. Execute it first, then auto-PR will be created."
+ )
+
+ return result
+
+
+@app.post("/api/chat/execute-with-pr")
+async def api_chat_execute_with_pr(
+ req: ExecutePlanRequest,
+ authorization: Optional[str] = Header(None),
+):
+ """Execute a plan AND automatically create a pull request afterwards."""
+ token = get_github_token(authorization)
+
+ with execution_context(token, ref=req.branch_name):
+ full_name = f"{req.repo_owner}/{req.repo_name}"
+ executor = execute_plan_lite if _is_lite_mode_active() else execute_plan
+ try:
+ result = await executor(
+ req.plan, full_name, token=token, branch_name=req.branch_name,
+ )
+ except Exception as exc:
+ error_msg = str(exc)
+ _quota_keywords = [
+ "insufficient_quota", "exceeded your current quota",
+ "rate_limit_exceeded", "429", "billing",
+ ]
+ if any(kw in error_msg.lower() for kw in _quota_keywords):
+ raise HTTPException(
+ status_code=429,
+ detail=(
+ "Your LLM provider credits have been exhausted. "
+ "Check billing or switch to a free local provider."
+ ),
+ ) from exc
+ if "No valid task outputs" in error_msg:
+ raise HTTPException(
+ status_code=502,
+ detail=(
+ "The LLM returned an empty response. Try enabling "
+ "Lite Mode for better results with small models."
+ ),
+ ) from exc
+ raise
+
+ if isinstance(result, dict) and result.get("status") == "completed":
+ branch = result.get("branch", req.branch_name)
+ if branch:
+ pr = await create_pr_after_execution(
+ full_name,
+ branch,
+ req.plan.goal,
+ result.get("executionLog", {}),
+ token=token,
+ )
+ if pr:
+ result["pull_request"] = {
+ "number": pr.get("number"),
+ "url": pr.get("html_url"),
+ "title": pr.get("title"),
+ }
+
+ result.setdefault(
+ "mode",
+ "sticky" if req.branch_name else "hard-switch",
+ )
+
+ return result
+
+
+# ============================================================================
+# Issue Endpoints (v2 upgrade)
+# ============================================================================
+
+@app.get("/api/repos/{owner}/{repo}/issues")
+async def api_list_issues(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ state: str = Query("open"),
+ labels: Optional[str] = Query(None),
+ per_page: int = Query(30, ge=1, le=100),
+ page: int = Query(1, ge=1),
+ authorization: Optional[str] = Header(None),
+):
+ """List issues for a repository."""
+ token = get_github_token(authorization)
+ issues = await github_issues.list_issues(
+ owner, repo, state=state, labels=labels,
+ per_page=per_page, page=page, token=token,
+ )
+ return {"issues": issues, "page": page, "per_page": per_page}
+
+
+@app.get("/api/repos/{owner}/{repo}/issues/{issue_number}")
+async def api_get_issue(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ issue_number: int = FPath(...),
+ authorization: Optional[str] = Header(None),
+):
+ """Get a single issue."""
+ token = get_github_token(authorization)
+ return await github_issues.get_issue(owner, repo, issue_number, token=token)
+
+
+@app.post("/api/repos/{owner}/{repo}/issues")
+async def api_create_issue(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ payload: IssueCreateRequest = ...,
+ authorization: Optional[str] = Header(None),
+):
+ """Create a new issue."""
+ token = get_github_token(authorization)
+ return await github_issues.create_issue(
+ owner, repo, payload.title,
+ body=payload.body, labels=payload.labels,
+ assignees=payload.assignees, milestone=payload.milestone,
+ token=token,
+ )
+
+
+@app.patch("/api/repos/{owner}/{repo}/issues/{issue_number}")
+async def api_update_issue(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ issue_number: int = FPath(...),
+ payload: IssueUpdateRequest = ...,
+ authorization: Optional[str] = Header(None),
+):
+ """Update an existing issue."""
+ token = get_github_token(authorization)
+ return await github_issues.update_issue(
+ owner, repo, issue_number,
+ title=payload.title, body=payload.body, state=payload.state,
+ labels=payload.labels, assignees=payload.assignees,
+ milestone=payload.milestone, token=token,
+ )
+
+
+@app.get("/api/repos/{owner}/{repo}/issues/{issue_number}/comments")
+async def api_list_issue_comments(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ issue_number: int = FPath(...),
+ authorization: Optional[str] = Header(None),
+):
+ """List comments on an issue."""
+ token = get_github_token(authorization)
+ return await github_issues.list_issue_comments(owner, repo, issue_number, token=token)
+
+
+@app.post("/api/repos/{owner}/{repo}/issues/{issue_number}/comments")
+async def api_add_issue_comment(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ issue_number: int = FPath(...),
+ payload: IssueCommentRequest = ...,
+ authorization: Optional[str] = Header(None),
+):
+ """Add a comment to an issue."""
+ token = get_github_token(authorization)
+ return await github_issues.add_issue_comment(
+ owner, repo, issue_number, payload.body, token=token,
+ )
+
+
+# ============================================================================
+# Pull Request Endpoints (v2 upgrade)
+# ============================================================================
+
+@app.get("/api/repos/{owner}/{repo}/pulls")
+async def api_list_pulls(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ state: str = Query("open"),
+ per_page: int = Query(30, ge=1, le=100),
+ page: int = Query(1, ge=1),
+ authorization: Optional[str] = Header(None),
+):
+ """List pull requests."""
+ token = get_github_token(authorization)
+ prs = await github_pulls.list_pull_requests(
+ owner, repo, state=state, per_page=per_page, page=page, token=token,
+ )
+ return {"pull_requests": prs, "page": page, "per_page": per_page}
+
+
+@app.get("/api/repos/{owner}/{repo}/pulls/{pull_number}")
+async def api_get_pull(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ pull_number: int = FPath(...),
+ authorization: Optional[str] = Header(None),
+):
+ """Get a single pull request."""
+ token = get_github_token(authorization)
+ return await github_pulls.get_pull_request(owner, repo, pull_number, token=token)
+
+
+@app.post("/api/repos/{owner}/{repo}/pulls")
+async def api_create_pull(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ payload: PRCreateRequest = ...,
+ authorization: Optional[str] = Header(None),
+):
+ """Create a new pull request."""
+ token = get_github_token(authorization)
+ return await github_pulls.create_pull_request(
+ owner, repo, title=payload.title, head=payload.head,
+ base=payload.base, body=payload.body, draft=payload.draft,
+ token=token,
+ )
+
+
+@app.put("/api/repos/{owner}/{repo}/pulls/{pull_number}/merge")
+async def api_merge_pull(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ pull_number: int = FPath(...),
+ payload: PRMergeRequest = ...,
+ authorization: Optional[str] = Header(None),
+):
+ """Merge a pull request."""
+ token = get_github_token(authorization)
+ return await github_pulls.merge_pull_request(
+ owner, repo, pull_number,
+ merge_method=payload.merge_method,
+ commit_title=payload.commit_title,
+ commit_message=payload.commit_message,
+ token=token,
+ )
+
+
+@app.get("/api/repos/{owner}/{repo}/pulls/{pull_number}/files")
+async def api_list_pr_files(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ pull_number: int = FPath(...),
+ authorization: Optional[str] = Header(None),
+):
+ """List files changed in a pull request."""
+ token = get_github_token(authorization)
+ return await github_pulls.list_pr_files(owner, repo, pull_number, token=token)
+
+
+# ============================================================================
+# Search Endpoints (v2 upgrade)
+# ============================================================================
+
+@app.get("/api/search/code")
+async def api_search_code(
+ q: str = Query(..., description="Search query"),
+ owner: Optional[str] = Query(None),
+ repo: Optional[str] = Query(None),
+ language: Optional[str] = Query(None),
+ per_page: int = Query(30, ge=1, le=100),
+ page: int = Query(1, ge=1),
+ authorization: Optional[str] = Header(None),
+):
+ """Search for code across GitHub."""
+ token = get_github_token(authorization)
+ return await github_search.search_code(
+ q, owner=owner, repo=repo, language=language,
+ per_page=per_page, page=page, token=token,
+ )
+
+
+@app.get("/api/search/issues")
+async def api_search_issues(
+ q: str = Query(..., description="Search query"),
+ owner: Optional[str] = Query(None),
+ repo: Optional[str] = Query(None),
+ state: Optional[str] = Query(None),
+ label: Optional[str] = Query(None),
+ per_page: int = Query(30, ge=1, le=100),
+ page: int = Query(1, ge=1),
+ authorization: Optional[str] = Header(None),
+):
+ """Search issues and pull requests."""
+ token = get_github_token(authorization)
+ return await github_search.search_issues(
+ q, owner=owner, repo=repo, state=state, label=label,
+ per_page=per_page, page=page, token=token,
+ )
+
+
+@app.get("/api/search/repositories")
+async def api_search_repositories(
+ q: str = Query(..., description="Search query"),
+ language: Optional[str] = Query(None),
+ sort: Optional[str] = Query(None),
+ per_page: int = Query(30, ge=1, le=100),
+ page: int = Query(1, ge=1),
+ authorization: Optional[str] = Header(None),
+):
+ """Search for repositories."""
+ token = get_github_token(authorization)
+ return await github_search.search_repositories(
+ q, language=language, sort=sort,
+ per_page=per_page, page=page, token=token,
+ )
+
+
+@app.get("/api/search/users")
+async def api_search_users(
+ q: str = Query(..., description="Search query"),
+ type_filter: Optional[str] = Query(None, alias="type"),
+ location: Optional[str] = Query(None),
+ language: Optional[str] = Query(None),
+ per_page: int = Query(30, ge=1, le=100),
+ page: int = Query(1, ge=1),
+ authorization: Optional[str] = Header(None),
+):
+ """Search for GitHub users and organizations."""
+ token = get_github_token(authorization)
+ return await github_search.search_users(
+ q, type_filter=type_filter, location=location, language=language,
+ per_page=per_page, page=page, token=token,
+ )
+
+
+# ============================================================================
+# Route Analysis Endpoint (v2 upgrade)
+# ============================================================================
+
+@app.post("/api/chat/route")
+async def api_chat_route(payload: dict):
+ """Preview how a message would be routed without executing it.
+
+ Useful for the frontend to display which agent(s) will handle the request.
+ """
+ message = payload.get("message", "")
+ if not message:
+ return JSONResponse({"error": "message is required"}, status_code=400)
+
+ workflow = route_request(message)
+ return {
+ "category": workflow.category.value,
+ "agents": [a.value for a in workflow.agents],
+ "description": workflow.description,
+ "requires_repo_context": workflow.requires_repo_context,
+ "entity_number": workflow.entity_number,
+ "metadata": workflow.metadata,
+ }
+
+
+# ============================================================================
+# Authentication Endpoints (Web Flow + Device Flow)
+# ============================================================================
+
+@app.get("/api/auth/url", response_model=AuthUrlResponse)
+async def api_get_auth_url():
+ """
+ Generate GitHub OAuth authorization URL (Web Flow).
+ Requires Client Secret to be configured.
+ """
+ auth_url, state = generate_authorization_url()
+ return AuthUrlResponse(authorization_url=auth_url, state=state)
+
+
+@app.post("/api/auth/callback", response_model=AuthSession)
+async def api_auth_callback(request: AuthCallbackRequest):
+ """
+ Handle GitHub OAuth callback (Web Flow).
+ Exchange the authorization code for an access token.
+ """
+ try:
+ session = await exchange_code_for_token(request.code, request.state)
+ return session
+ except ValueError as e:
+ return JSONResponse(
+ {"error": str(e)},
+ status_code=400,
+ )
+
+
+@app.post("/api/auth/validate", response_model=UserInfoResponse)
+async def api_validate_token(request: TokenValidationRequest):
+ """
+ Validate a GitHub access token and return user information.
+ """
+ user = await validate_token(request.access_token)
+ if user:
+ return UserInfoResponse(user=user, authenticated=True)
+ return UserInfoResponse(
+ user=GitHubUser(login="", id=0, avatar_url=""),
+ authenticated=False,
+ )
+
+
+@app.post("/api/auth/device/code")
+async def api_device_code():
+ """
+ Start the device login flow (Step 1).
+ Does NOT require a client secret.
+ """
+ try:
+ data = await initiate_device_flow()
+ return data
+ except Exception as e:
+ return JSONResponse({"error": str(e)}, status_code=500)
+
+
+@app.post("/api/auth/device/poll")
+async def api_device_poll(payload: dict):
+ """
+ Poll GitHub to check if user authorized the device (Step 2).
+ """
+ device_code = payload.get("device_code")
+ if not device_code:
+ return JSONResponse({"error": "Missing device_code"}, status_code=400)
+
+ try:
+ session = await poll_device_token(device_code)
+ if session:
+ return session
+
+ return JSONResponse({"status": "pending"}, status_code=202)
+ except ValueError as e:
+ return JSONResponse({"error": str(e)}, status_code=400)
+
+
+@app.get("/api/auth/status")
+async def api_auth_status():
+ """
+ Smart check: Do we have a secret (Web Flow) or just ID (Device Flow)?
+ This tells the frontend which UI to render.
+ """
+ has_secret = bool(os.getenv("GITHUB_CLIENT_SECRET"))
+ has_id = bool(os.getenv("GITHUB_CLIENT_ID", "Iv23litmRp80Z6wmlyRn"))
+
+ return {
+ "mode": "web" if has_secret else "device",
+ "configured": has_id,
+ "oauth_configured": has_secret,
+ "pat_configured": bool(os.getenv("GITPILOT_GITHUB_TOKEN") or os.getenv("GITHUB_TOKEN")),
+ }
+
+
+@app.get("/api/auth/app-url")
+async def api_get_app_url():
+ """Get GitHub App installation URL."""
+ app_slug = os.getenv("GITHUB_APP_SLUG", "gitpilota")
+ app_url = f"https://github.com/apps/{app_slug}"
+ return {
+ "app_url": app_url,
+ "app_slug": app_slug,
+ }
+
+
+@app.get("/api/auth/installation-status")
+async def api_check_installation_status():
+ """Check if GitHub App is installed for the current user."""
+ pat_token = os.getenv("GITPILOT_GITHUB_TOKEN") or os.getenv("GITHUB_TOKEN")
+
+ if pat_token:
+ user = await validate_token(pat_token)
+ if user:
+ return {
+ "installed": True,
+ "access_token": pat_token,
+ "user": user,
+ "auth_type": "pat",
+ }
+
+ github_app_id = os.getenv("GITHUB_APP_ID", "2313985")
+ if not github_app_id:
+ return {
+ "installed": False,
+ "message": "GitHub authentication not configured.",
+ "auth_type": "none",
+ }
+
+ return {
+ "installed": False,
+ "message": "GitHub App not installed.",
+ "auth_type": "github_app",
+ }
+
+
+@app.get("/api/auth/repo-access", response_model=RepoAccessResponse)
+async def api_check_repo_access(
+ owner: str = Query(...),
+ repo: str = Query(...),
+ authorization: Optional[str] = Header(None),
+):
+ """
+ Check if we have write access to a repository via User token or GitHub App.
+
+ This endpoint helps the frontend determine if it should show
+ installation prompts or if the user already has sufficient permissions.
+ """
+ token = get_github_token(authorization)
+ access_info = await check_repo_write_access(owner, repo, user_token=token)
+
+ return RepoAccessResponse(
+ can_write=access_info["can_write"],
+ app_installed=access_info["app_installed"],
+ auth_type=access_info["auth_type"],
+ )
+
+
+# ============================================================================
+# Session Endpoints (Phase 1)
+# ============================================================================
+
+@app.get("/api/sessions")
+async def api_list_sessions():
+ """List all saved sessions."""
+ return {"sessions": _session_mgr.list_sessions()}
+
+
+@app.post("/api/sessions")
+async def api_create_session(payload: dict):
+ """Create a new session.
+
+ Accepts either legacy single-repo or multi-repo format:
+ Legacy: {"repo_full_name": "owner/repo", "branch": "main"}
+ Multi: {"repos": [{full_name, branch, mode}], "active_repo": "owner/repo"}
+ """
+ repo = payload.get("repo_full_name", "")
+ branch = payload.get("branch")
+ name = payload.get("name") # optional — derived from first user prompt
+ session = _session_mgr.create(repo_full_name=repo, branch=branch, name=name)
+
+ # Multi-repo context support
+ if payload.get("repos"):
+ session.repos = payload["repos"]
+ session.active_repo = payload.get("active_repo", repo)
+ elif repo:
+ session.repos = [{"full_name": repo, "branch": branch or "main", "mode": "write"}]
+ session.active_repo = repo
+
+ _session_mgr.save(session)
+ return {"session_id": session.id, "status": session.status}
+
+
+@app.get("/api/sessions/{session_id}")
+async def api_get_session(session_id: str):
+ """Get session details."""
+ session = _session_mgr.load(session_id)
+ if not session:
+ raise HTTPException(status_code=404, detail="Session not found")
+ return {
+ "id": session.id,
+ "status": session.status,
+ "repo_full_name": session.repo_full_name,
+ "branch": session.branch,
+ "created_at": session.created_at,
+ "message_count": len(session.messages),
+ "checkpoint_count": len(session.checkpoints),
+ "repos": session.repos,
+ "active_repo": session.active_repo,
+ }
+
+
+@app.delete("/api/sessions/{session_id}")
+async def api_delete_session(session_id: str):
+ """Delete a session."""
+ deleted = _session_mgr.delete(session_id)
+ if not deleted:
+ raise HTTPException(status_code=404, detail="Session not found")
+ return {"deleted": True}
+
+
+@app.patch("/api/sessions/{session_id}/context")
+async def api_update_session_context(session_id: str, payload: dict):
+ """Add, remove, or activate repos in a session's multi-repo context.
+
+ Actions:
+ {"action": "add", "repo_full_name": "owner/repo", "branch": "main"}
+ {"action": "remove", "repo_full_name": "owner/repo"}
+ {"action": "set_active", "repo_full_name": "owner/repo"}
+ """
+ session = _session_mgr.load(session_id)
+ if not session:
+ raise HTTPException(status_code=404, detail="Session not found")
+
+ action = payload.get("action")
+ repo_name = payload.get("repo_full_name")
+ if not action or not repo_name:
+ raise HTTPException(status_code=400, detail="action and repo_full_name required")
+
+ if action == "add":
+ branch = payload.get("branch", "main")
+ if not any(r.get("full_name") == repo_name for r in session.repos):
+ session.repos.append({
+ "full_name": repo_name,
+ "branch": branch,
+ "mode": "read",
+ })
+ if not session.active_repo:
+ session.active_repo = repo_name
+ elif action == "remove":
+ session.repos = [r for r in session.repos if r.get("full_name") != repo_name]
+ if session.active_repo == repo_name:
+ session.active_repo = session.repos[0]["full_name"] if session.repos else None
+ elif action == "set_active":
+ if any(r.get("full_name") == repo_name for r in session.repos):
+ # Update mode flags
+ for r in session.repos:
+ r["mode"] = "write" if r.get("full_name") == repo_name else "read"
+ session.active_repo = repo_name
+ else:
+ raise HTTPException(status_code=400, detail="Repo not in session context")
+ else:
+ raise HTTPException(status_code=400, detail=f"Unknown action: {action}")
+
+ _session_mgr.save(session)
+ return {
+ "repos": session.repos,
+ "active_repo": session.active_repo,
+ }
+
+
+@app.post("/api/sessions/{session_id}/checkpoint")
+async def api_create_checkpoint(session_id: str, payload: dict):
+ """Create a checkpoint for a session."""
+ session = _session_mgr.load(session_id)
+ if not session:
+ raise HTTPException(status_code=404, detail="Session not found")
+ label = payload.get("label", "checkpoint")
+ cp = _session_mgr.create_checkpoint(session, label=label)
+ return {"checkpoint_id": cp.id, "label": cp.label, "created_at": cp.created_at}
+
+
+# ============================================================================
+# Hooks Endpoints (Phase 1)
+# ============================================================================
+
+@app.get("/api/hooks")
+async def api_list_hooks():
+ """List registered hooks."""
+ return {"hooks": _hook_mgr.list_hooks()}
+
+
+@app.post("/api/hooks")
+async def api_register_hook(payload: dict):
+ """Register a new hook."""
+ from .hooks import HookDefinition
+ try:
+ hook = HookDefinition(
+ event=HookEvent(payload["event"]),
+ name=payload["name"],
+ command=payload.get("command"),
+ blocking=payload.get("blocking", False),
+ timeout=payload.get("timeout", 30),
+ )
+ _hook_mgr.register(hook)
+ return {"registered": True, "name": hook.name, "event": hook.event.value}
+ except (KeyError, ValueError) as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+
+@app.delete("/api/hooks/{event}/{name}")
+async def api_unregister_hook(event: str, name: str):
+ """Unregister a hook by event and name."""
+ try:
+ _hook_mgr.unregister(HookEvent(event), name)
+ return {"unregistered": True}
+ except ValueError as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+
+# ============================================================================
+# Permissions Endpoints (Phase 1)
+# ============================================================================
+
+@app.get("/api/permissions")
+async def api_get_permissions():
+ """Get current permission policy."""
+ return _perm_mgr.to_dict()
+
+
+@app.put("/api/permissions/mode")
+async def api_set_permission_mode(payload: dict):
+ """Set the permission mode (normal, plan, auto)."""
+ mode_str = payload.get("mode", "normal")
+ try:
+ _perm_mgr.policy.mode = PermissionMode(mode_str)
+ return {"mode": _perm_mgr.policy.mode.value}
+ except ValueError:
+ raise HTTPException(status_code=400, detail=f"Invalid mode: {mode_str}")
+
+
+# ============================================================================
+# Project Context / Memory Endpoints (Phase 1)
+# ============================================================================
+
+@app.get("/api/repos/{owner}/{repo}/context")
+async def api_get_project_context(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+):
+ """Get project conventions and memory for a repository workspace."""
+ from pathlib import Path as StdPath
+ workspace_path = StdPath.home() / ".gitpilot" / "workspaces" / owner / repo
+ if not workspace_path.exists():
+ return {"conventions": "", "rules": [], "auto_memory": {}, "system_prompt": ""}
+ mgr = MemoryManager(workspace_path)
+ ctx = mgr.load_context()
+ return {
+ "conventions": ctx.conventions,
+ "rules": ctx.rules,
+ "auto_memory": ctx.auto_memory,
+ "system_prompt": ctx.to_system_prompt(),
+ }
+
+
+@app.post("/api/repos/{owner}/{repo}/context/init")
+async def api_init_project_context(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+):
+ """Initialize .gitpilot/ directory with template GITPILOT.md."""
+ from pathlib import Path as StdPath
+ workspace_path = StdPath.home() / ".gitpilot" / "workspaces" / owner / repo
+ workspace_path.mkdir(parents=True, exist_ok=True)
+ mgr = MemoryManager(workspace_path)
+ md_path = mgr.init_project()
+ return {"initialized": True, "path": str(md_path)}
+
+
+@app.post("/api/repos/{owner}/{repo}/context/pattern")
+async def api_add_learned_pattern(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ payload: dict = ...,
+):
+ """Add a learned pattern to auto-memory."""
+ from pathlib import Path as StdPath
+ pattern = payload.get("pattern", "")
+ if not pattern:
+ raise HTTPException(status_code=400, detail="pattern is required")
+ workspace_path = StdPath.home() / ".gitpilot" / "workspaces" / owner / repo
+ workspace_path.mkdir(parents=True, exist_ok=True)
+ mgr = MemoryManager(workspace_path)
+ mgr.add_learned_pattern(pattern)
+ return {"added": True, "pattern": pattern}
+
+
+# ============================================================================
+# Context Vault Endpoints (additive — Context + Use Case system)
+# ============================================================================
+
+def _workspace_path(owner: str, repo: str) -> Path:
+ """Resolve the local workspace path for a repo."""
+ return Path.home() / ".gitpilot" / "workspaces" / owner / repo
+
+
+@app.get("/api/repos/{owner}/{repo}/context/assets")
+async def api_list_context_assets(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+):
+ """List all uploaded context assets for a repository."""
+ vault = ContextVault(_workspace_path(owner, repo))
+ assets = vault.list_assets()
+ return {"assets": [a.to_dict() for a in assets]}
+
+
+@app.post("/api/repos/{owner}/{repo}/context/assets/upload")
+async def api_upload_context_asset(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ file: UploadFile = File(...),
+):
+ """Upload a file to the project context vault."""
+ vault = ContextVault(_workspace_path(owner, repo))
+ content = await file.read()
+ mime = file.content_type or ""
+ filename = file.filename or "upload"
+
+ try:
+ meta = vault.upload_asset(filename, content, mime=mime)
+ return {"asset": meta.to_dict()}
+ except ValueError as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+
+@app.delete("/api/repos/{owner}/{repo}/context/assets/{asset_id}")
+async def api_delete_context_asset(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ asset_id: str = FPath(...),
+):
+ """Delete a context asset."""
+ vault = ContextVault(_workspace_path(owner, repo))
+ vault.delete_asset(asset_id)
+ return {"deleted": True, "asset_id": asset_id}
+
+
+@app.get("/api/repos/{owner}/{repo}/context/assets/{asset_id}/download")
+async def api_download_context_asset(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ asset_id: str = FPath(...),
+):
+ """Download a raw context asset file."""
+ vault = ContextVault(_workspace_path(owner, repo))
+ asset_path = vault.get_asset_path(asset_id)
+ if not asset_path:
+ raise HTTPException(status_code=404, detail="Asset not found")
+ filename = vault.get_asset_filename(asset_id)
+ return FileResponse(asset_path, filename=filename)
+
+
+# ============================================================================
+# Use Case Endpoints (additive — guided requirement clarification)
+# ============================================================================
+
+@app.get("/api/repos/{owner}/{repo}/use-cases")
+async def api_list_use_cases(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+):
+ """List all use cases for a repository."""
+ mgr = UseCaseManager(_workspace_path(owner, repo))
+ return {"use_cases": mgr.list_use_cases()}
+
+
+@app.post("/api/repos/{owner}/{repo}/use-cases")
+async def api_create_use_case(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ payload: dict = ...,
+):
+ """Create a new use case."""
+ title = payload.get("title", "New Use Case")
+ initial_notes = payload.get("initial_notes", "")
+ mgr = UseCaseManager(_workspace_path(owner, repo))
+ uc = mgr.create_use_case(title=title, initial_notes=initial_notes)
+ return {"use_case": uc.to_dict()}
+
+
+@app.get("/api/repos/{owner}/{repo}/use-cases/{use_case_id}")
+async def api_get_use_case(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ use_case_id: str = FPath(...),
+):
+ """Get a single use case with messages and spec."""
+ mgr = UseCaseManager(_workspace_path(owner, repo))
+ uc = mgr.get_use_case(use_case_id)
+ if not uc:
+ raise HTTPException(status_code=404, detail="Use case not found")
+ return {"use_case": uc.to_dict()}
+
+
+@app.post("/api/repos/{owner}/{repo}/use-cases/{use_case_id}/chat")
+async def api_use_case_chat(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ use_case_id: str = FPath(...),
+ payload: dict = ...,
+):
+ """Send a guided chat message and get assistant response + updated spec."""
+ message = payload.get("message", "")
+ if not message:
+ raise HTTPException(status_code=400, detail="message is required")
+ mgr = UseCaseManager(_workspace_path(owner, repo))
+ uc = mgr.chat(use_case_id, message)
+ if not uc:
+ raise HTTPException(status_code=404, detail="Use case not found")
+ return {"use_case": uc.to_dict()}
+
+
+@app.post("/api/repos/{owner}/{repo}/use-cases/{use_case_id}/finalize")
+async def api_finalize_use_case(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ use_case_id: str = FPath(...),
+):
+ """Finalize a use case: mark active, export markdown spec."""
+ mgr = UseCaseManager(_workspace_path(owner, repo))
+ uc = mgr.finalize(use_case_id)
+ if not uc:
+ raise HTTPException(status_code=404, detail="Use case not found")
+ return {"use_case": uc.to_dict()}
+
+
+# ============================================================================
+# MCP Endpoints (Phase 2)
+# ============================================================================
+
+@app.get("/api/mcp/servers")
+async def api_mcp_list_servers():
+ """List configured MCP servers and their connection status."""
+ return _mcp_client.to_dict()
+
+
+@app.post("/api/mcp/connect/{server_name}")
+async def api_mcp_connect(server_name: str):
+ """Connect to a named MCP server."""
+ try:
+ conn = await _mcp_client.connect(server_name)
+ return {
+ "connected": True,
+ "server": server_name,
+ "tools": [{"name": t.name, "description": t.description} for t in conn.tools],
+ }
+ except Exception as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+
+@app.post("/api/mcp/disconnect/{server_name}")
+async def api_mcp_disconnect(server_name: str):
+ """Disconnect from a named MCP server."""
+ await _mcp_client.disconnect(server_name)
+ return {"disconnected": True, "server": server_name}
+
+
+@app.post("/api/mcp/call")
+async def api_mcp_call_tool(payload: dict):
+ """Call a tool on a connected MCP server."""
+ server = payload.get("server", "")
+ tool_name = payload.get("tool", "")
+ params = payload.get("params", {})
+ if not server or not tool_name:
+ raise HTTPException(status_code=400, detail="server and tool are required")
+ conn = _mcp_client._connections.get(server)
+ if not conn:
+ raise HTTPException(status_code=404, detail=f"Not connected to server: {server}")
+ try:
+ result = await _mcp_client.call_tool(conn, tool_name, params)
+ return {"result": result}
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=str(e))
+
+
+# ============================================================================
+# Plugin Endpoints (Phase 2)
+# ============================================================================
+
+@app.get("/api/plugins")
+async def api_list_plugins():
+ """List installed plugins."""
+ plugins = _plugin_mgr.list_installed()
+ return {"plugins": [p.to_dict() for p in plugins]}
+
+
+@app.post("/api/plugins/install")
+async def api_install_plugin(payload: dict):
+ """Install a plugin from a git URL or local path."""
+ source = payload.get("source", "")
+ if not source:
+ raise HTTPException(status_code=400, detail="source is required")
+ try:
+ info = _plugin_mgr.install(source)
+ return {"installed": True, "plugin": info.to_dict()}
+ except Exception as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+
+@app.delete("/api/plugins/{name}")
+async def api_uninstall_plugin(name: str):
+ """Uninstall a plugin by name."""
+ removed = _plugin_mgr.uninstall(name)
+ if not removed:
+ raise HTTPException(status_code=404, detail=f"Plugin not found: {name}")
+ return {"uninstalled": True, "name": name}
+
+
+# ============================================================================
+# Skills Endpoints (Phase 2)
+# ============================================================================
+
+@app.get("/api/skills")
+async def api_list_skills():
+ """List all available skills."""
+ return {"skills": _skill_mgr.list_skills()}
+
+
+@app.post("/api/skills/invoke")
+async def api_invoke_skill(payload: dict):
+ """Invoke a skill by name."""
+ name = payload.get("name", "")
+ context = payload.get("context", {})
+ if not name:
+ raise HTTPException(status_code=400, detail="name is required")
+ prompt = _skill_mgr.invoke(name, context)
+ if prompt is None:
+ raise HTTPException(status_code=404, detail=f"Skill not found: {name}")
+ return {"skill": name, "rendered_prompt": prompt}
+
+
+@app.post("/api/skills/reload")
+async def api_reload_skills():
+ """Reload skills from all sources."""
+ count = _skill_mgr.load_all()
+ return {"reloaded": True, "count": count}
+
+
+# ============================================================================
+# Vision Endpoints (Phase 2)
+# ============================================================================
+
+@app.post("/api/vision/analyze")
+async def api_vision_analyze(payload: dict):
+ """Analyze an image with a text prompt."""
+ from .vision import VisionAnalyzer
+ image_path = payload.get("image_path", "")
+ prompt = payload.get("prompt", "Describe this image.")
+ provider = payload.get("provider", "openai")
+ if not image_path:
+ raise HTTPException(status_code=400, detail="image_path is required")
+ try:
+ analyzer = VisionAnalyzer(provider=provider)
+ result = await analyzer.analyze_image(Path(image_path), prompt)
+ return result.to_dict()
+ except Exception as e:
+ raise HTTPException(status_code=400, detail=str(e))
+
+
+# ============================================================================
+# Model Router Endpoints (Phase 2)
+# ============================================================================
+
+@app.post("/api/model-router/select")
+async def api_model_select(payload: dict):
+ """Preview which model would be selected for a request."""
+ request = payload.get("request", "")
+ category = payload.get("category")
+ if not request:
+ raise HTTPException(status_code=400, detail="request is required")
+ selection = _model_router.select(request, category)
+ return {
+ "model": selection.model,
+ "tier": selection.tier.value,
+ "complexity": selection.complexity.value,
+ "provider": selection.provider,
+ "reason": selection.reason,
+ }
+
+
+@app.get("/api/model-router/usage")
+async def api_model_usage():
+ """Get model usage summary and budget status."""
+ return _model_router.get_usage_summary()
+
+
+# ============================================================================
+# Agent Teams Endpoints (Phase 3)
+# ============================================================================
+
+@app.post("/api/agent-teams/plan")
+async def api_team_plan(payload: dict):
+ """Split a complex task into parallel subtasks."""
+ task = payload.get("task", "")
+ if not task:
+ raise HTTPException(status_code=400, detail="task is required")
+ subtasks = _agent_team.plan_and_split(task)
+ return {"subtasks": [{"id": s.id, "title": s.title, "description": s.description} for s in subtasks]}
+
+
+@app.post("/api/agent-teams/execute")
+async def api_team_execute(payload: dict):
+ """Execute subtasks in parallel and merge results."""
+ task = payload.get("task", "")
+ if not task:
+ raise HTTPException(status_code=400, detail="task is required")
+ subtasks = _agent_team.plan_and_split(task)
+ result = await _agent_team.execute_parallel(subtasks)
+ return result.to_dict()
+
+
+# ============================================================================
+# Learning Engine Endpoints (Phase 3)
+# ============================================================================
+
+@app.post("/api/learning/evaluate")
+async def api_learning_evaluate(payload: dict):
+ """Evaluate an action outcome for learning."""
+ action = payload.get("action", "")
+ outcome = payload.get("outcome", {})
+ repo = payload.get("repo", "")
+ if not action:
+ raise HTTPException(status_code=400, detail="action is required")
+ evaluation = _learning_engine.evaluate_outcome(action, outcome, repo=repo)
+ return {
+ "action": evaluation.action,
+ "success": evaluation.success,
+ "score": evaluation.score,
+ "feedback": evaluation.feedback,
+ }
+
+
+@app.get("/api/learning/insights/{owner}/{repo}")
+async def api_learning_insights(owner: str = FPath(...), repo: str = FPath(...)):
+ """Get learned insights for a repository."""
+ repo_name = f"{owner}/{repo}"
+ insights = _learning_engine.get_repo_insights(repo_name)
+ return {
+ "repo": repo_name,
+ "patterns": insights.patterns,
+ "preferred_style": insights.preferred_style,
+ "success_rate": insights.success_rate,
+ "total_evaluations": insights.total_evaluations,
+ }
+
+
+@app.post("/api/learning/style")
+async def api_learning_set_style(payload: dict):
+ """Set preferred coding style for a repository."""
+ repo = payload.get("repo", "")
+ style = payload.get("style", {})
+ if not repo:
+ raise HTTPException(status_code=400, detail="repo is required")
+ _learning_engine.set_preferred_style(repo, style)
+ return {"repo": repo, "style": style}
+
+
+# ============================================================================
+# Cross-Repo Intelligence Endpoints (Phase 3)
+# ============================================================================
+
+@app.post("/api/cross-repo/dependencies")
+async def api_cross_repo_dependencies(payload: dict):
+ """Analyze dependencies from provided file contents."""
+ files = payload.get("files", {})
+ if not files:
+ raise HTTPException(status_code=400, detail="files dict is required (filename -> content)")
+ graph = _cross_repo.analyze_dependencies_from_files(files)
+ return graph.to_dict()
+
+
+@app.post("/api/cross-repo/impact")
+async def api_cross_repo_impact(payload: dict):
+ """Analyze impact of updating a package."""
+ files = payload.get("files", {})
+ package_name = payload.get("package", "")
+ new_version = payload.get("new_version")
+ if not package_name:
+ raise HTTPException(status_code=400, detail="package is required")
+ graph = _cross_repo.analyze_dependencies_from_files(files)
+ report = _cross_repo.impact_analysis(graph, package_name, new_version)
+ return report.to_dict()
+
+
+# ============================================================================
+# Predictions Endpoints (Phase 3)
+# ============================================================================
+
+@app.post("/api/predictions/suggest")
+async def api_predictions_suggest(payload: dict):
+ """Get proactive suggestions based on context."""
+ context = payload.get("context", "")
+ if not context:
+ raise HTTPException(status_code=400, detail="context is required")
+ suggestions = _predictive_engine.predict(context)
+ return {"suggestions": [s.to_dict() for s in suggestions]}
+
+
+@app.get("/api/predictions/rules")
+async def api_predictions_rules():
+ """List all prediction rules."""
+ return {"rules": _predictive_engine.list_rules()}
+
+
+# ============================================================================
+# Security Scanner Endpoints (Phase 3)
+# ============================================================================
+
+@app.post("/api/security/scan-file")
+async def api_security_scan_file(payload: dict):
+ """Scan a single file for security issues."""
+ file_path = payload.get("file_path", "")
+ if not file_path:
+ raise HTTPException(status_code=400, detail="file_path is required")
+ findings = _security_scanner.scan_file(file_path)
+ return {"findings": [f.to_dict() for f in findings], "count": len(findings)}
+
+
+@app.post("/api/security/scan-directory")
+async def api_security_scan_directory(payload: dict):
+ """Recursively scan a directory for security issues."""
+ directory = payload.get("directory", "")
+ if not directory:
+ raise HTTPException(status_code=400, detail="directory is required")
+ result = _security_scanner.scan_directory(directory)
+ return result.to_dict()
+
+
+@app.post("/api/security/scan-diff")
+async def api_security_scan_diff(payload: dict):
+ """Scan a git diff for security issues in added lines."""
+ diff_text = payload.get("diff", "")
+ if not diff_text:
+ raise HTTPException(status_code=400, detail="diff is required")
+ findings = _security_scanner.scan_diff(diff_text)
+ return {"findings": [f.to_dict() for f in findings], "count": len(findings)}
+
+
+# ============================================================================
+# Natural Language Database Endpoints (Phase 3)
+# ============================================================================
+
+@app.post("/api/nl-database/translate")
+async def api_nl_translate(payload: dict):
+ """Translate natural language to SQL."""
+ question = payload.get("question", "")
+ dialect = payload.get("dialect", "postgresql")
+ tables = payload.get("tables", [])
+ if not question:
+ raise HTTPException(status_code=400, detail="question is required")
+ # Lazy import — nl_database pulls in SQL parsing libraries
+ from .nl_database import NLQueryEngine, QueryDialect, TableSchema
+ engine = NLQueryEngine(dialect=QueryDialect(dialect))
+ for t in tables:
+ engine.add_table(TableSchema(
+ name=t["name"],
+ columns=t.get("columns", []),
+ primary_key=t.get("primary_key"),
+ ))
+ sql = engine.translate(question)
+ error = engine.validate_query(sql)
+ return {"question": question, "sql": sql, "valid": error is None, "error": error}
+
+
+@app.post("/api/nl-database/explain")
+async def api_nl_explain(payload: dict):
+ """Explain what a SQL query does in plain English."""
+ sql = payload.get("sql", "")
+ if not sql:
+ raise HTTPException(status_code=400, detail="sql is required")
+ explanation = _nl_engine.explain(sql)
+ return {"sql": sql, "explanation": explanation}
+
+
+# ============================================================================
+# Branch Listing Endpoint (Claude-Code-on-Web Parity)
+# ============================================================================
+
+class BranchInfo(BaseModel):
+ name: str
+ is_default: bool = False
+ protected: bool = False
+ commit_sha: Optional[str] = None
+
+
+class BranchListResponse(BaseModel):
+ repository: str
+ default_branch: str
+ page: int
+ per_page: int
+ has_more: bool
+ branches: List[BranchInfo]
+
+
+@app.get("/api/repos/{owner}/{repo}/branches", response_model=BranchListResponse)
+async def api_list_branches(
+ owner: str = FPath(...),
+ repo: str = FPath(...),
+ page: int = Query(1, ge=1),
+ per_page: int = Query(100, ge=1, le=100),
+ query: Optional[str] = Query(None, description="Substring filter"),
+ authorization: Optional[str] = Header(None),
+):
+ """List branches for a repository with optional search filtering."""
+ import httpx as _httpx
+
+ token = get_github_token(authorization)
+ if not token:
+ raise HTTPException(status_code=401, detail="GitHub token required")
+
+ headers = {
+ "Authorization": f"Bearer {token}",
+ "Accept": "application/vnd.github+json",
+ "User-Agent": "gitpilot",
+ }
+ timeout = _httpx.Timeout(connect=10.0, read=30.0, write=30.0, pool=10.0)
+
+ async with _httpx.AsyncClient(
+ base_url="https://api.github.com", headers=headers, timeout=timeout
+ ) as client:
+ # Fetch repo info for default_branch
+ repo_resp = await client.get(f"/repos/{owner}/{repo}")
+ if repo_resp.status_code >= 400:
+ logging.warning(
+ "branches: repo lookup failed %s/%s → %s %s",
+ owner, repo, repo_resp.status_code, repo_resp.text[:200],
+ )
+ raise HTTPException(
+ status_code=repo_resp.status_code,
+ detail=f"Cannot access repository: {repo_resp.status_code}",
+ )
+
+ repo_data = repo_resp.json()
+ default_branch_name = repo_data.get("default_branch", "main")
+
+ # Fetch ALL branch pages (GitHub caps at 100 per page)
+ all_raw = []
+ current_page = page
+ while True:
+ branch_resp = await client.get(
+ f"/repos/{owner}/{repo}/branches",
+ params={"page": current_page, "per_page": per_page},
+ )
+ if branch_resp.status_code >= 400:
+ logging.warning(
+ "branches: list failed %s/%s page=%s → %s %s",
+ owner, repo, current_page, branch_resp.status_code, branch_resp.text[:200],
+ )
+ raise HTTPException(
+ status_code=branch_resp.status_code,
+ detail=f"Failed to list branches: {branch_resp.status_code}",
+ )
+
+ page_data = branch_resp.json() if isinstance(branch_resp.json(), list) else []
+ all_raw.extend(page_data)
+
+ # Check if there are more pages
+ link_header = branch_resp.headers.get("Link", "") or ""
+ if 'rel="next"' not in link_header or len(page_data) < per_page:
+ break
+ current_page += 1
+ # Safety: cap at 10 pages (1000 branches)
+ if current_page - page >= 10:
+ break
+
+ q = (query or "").strip().lower()
+
+ branches = []
+ for b in all_raw:
+ name = (b.get("name") or "").strip()
+ if not name:
+ continue
+ if q and q not in name.lower():
+ continue
+ branches.append(BranchInfo(
+ name=name,
+ is_default=(name == default_branch_name),
+ protected=bool(b.get("protected", False)),
+ commit_sha=(b.get("commit") or {}).get("sha"),
+ ))
+
+ # Sort: default branch first, then alphabetical
+ branches.sort(key=lambda x: (0 if x.is_default else 1, x.name.lower()))
+
+ return BranchListResponse(
+ repository=f"{owner}/{repo}",
+ default_branch=default_branch_name,
+ page=page,
+ per_page=per_page,
+ has_more=False,
+ branches=branches,
+ )
+
+
+# ============================================================================
+# Environment Configuration Endpoints (Claude-Code-on-Web Parity)
+# ============================================================================
+
+import json as _json
+_ENV_ROOT = Path.home() / ".gitpilot" / "environments"
+
+
+class EnvironmentConfig(BaseModel):
+ id: Optional[str] = None
+ name: str = "Default"
+ network_access: str = Field("limited", description="limited | full | none")
+ env_vars: dict = Field(default_factory=dict)
+
+
+class EnvironmentListResponse(BaseModel):
+ environments: List[EnvironmentConfig]
+
+
+@app.get("/api/environments", response_model=EnvironmentListResponse)
+async def api_list_environments():
+ """List all environment configurations."""
+ _ENV_ROOT.mkdir(parents=True, exist_ok=True)
+ envs = []
+ for path in sorted(_ENV_ROOT.glob("*.json")):
+ try:
+ data = _json.loads(path.read_text())
+ envs.append(EnvironmentConfig(**data))
+ except Exception:
+ continue
+ if not envs:
+ envs.append(EnvironmentConfig(id="default", name="Default", network_access="limited"))
+ return EnvironmentListResponse(environments=envs)
+
+
+@app.post("/api/environments")
+async def api_create_environment(config: EnvironmentConfig):
+ """Create a new environment configuration."""
+ import uuid
+ _ENV_ROOT.mkdir(parents=True, exist_ok=True)
+ config.id = config.id or uuid.uuid4().hex[:12]
+ path = _ENV_ROOT / f"{config.id}.json"
+ path.write_text(_json.dumps(config.model_dump(), indent=2))
+ return config.model_dump()
+
+
+@app.put("/api/environments/{env_id}")
+async def api_update_environment(env_id: str, config: EnvironmentConfig):
+ """Update an environment configuration."""
+ _ENV_ROOT.mkdir(parents=True, exist_ok=True)
+ path = _ENV_ROOT / f"{env_id}.json"
+ config.id = env_id
+ path.write_text(_json.dumps(config.model_dump(), indent=2))
+ return config.model_dump()
+
+
+@app.delete("/api/environments/{env_id}")
+async def api_delete_environment(env_id: str):
+ """Delete an environment configuration."""
+ path = _ENV_ROOT / f"{env_id}.json"
+ if path.exists():
+ path.unlink()
+ return {"deleted": True}
+ raise HTTPException(status_code=404, detail="Environment not found")
+
+
+# ============================================================================
+# Session Messages + Diff Endpoints (Claude-Code-on-Web Parity)
+# ============================================================================
+
+@app.post("/api/sessions/{session_id}/message")
+async def api_add_session_message(session_id: str, payload: dict):
+ """Add a message to a session's conversation history."""
+ try:
+ session = _session_mgr.load(session_id)
+ except FileNotFoundError:
+ raise HTTPException(status_code=404, detail="Session not found")
+ role = payload.get("role", "user")
+ content = payload.get("content", "")
+ session.add_message(role, content, **payload.get("metadata", {}))
+ _session_mgr.save(session)
+ return {"message_count": len(session.messages)}
+
+
+@app.get("/api/sessions/{session_id}/messages")
+async def api_get_session_messages(session_id: str):
+ """Get all messages for a session."""
+ try:
+ session = _session_mgr.load(session_id)
+ except FileNotFoundError:
+ raise HTTPException(status_code=404, detail="Session not found")
+ return {
+ "session_id": session.id,
+ "messages": [
+ {
+ "role": m.role,
+ "content": m.content,
+ "timestamp": m.timestamp,
+ "metadata": m.metadata,
+ }
+ for m in session.messages
+ ],
+ }
+
+
+@app.get("/api/sessions/{session_id}/diff")
+async def api_get_session_diff(session_id: str):
+ """Get diff stats for a session (placeholder for sandbox integration)."""
+ try:
+ session = _session_mgr.load(session_id)
+ except FileNotFoundError:
+ raise HTTPException(status_code=404, detail="Session not found")
+ diff = session.metadata.get("diff", {
+ "files_changed": 0,
+ "additions": 0,
+ "deletions": 0,
+ "files": [],
+ })
+ return {"session_id": session.id, "diff": diff}
+
+
+@app.post("/api/sessions/{session_id}/status")
+async def api_update_session_status(session_id: str, payload: dict):
+ """Update session status (active, completed, failed, waiting)."""
+ try:
+ session = _session_mgr.load(session_id)
+ except FileNotFoundError:
+ raise HTTPException(status_code=404, detail="Session not found")
+ new_status = payload.get("status", "active")
+ if new_status not in ("active", "paused", "completed", "failed", "waiting"):
+ raise HTTPException(status_code=400, detail="Invalid status")
+ session.status = new_status
+ _session_mgr.save(session)
+ return {"session_id": session.id, "status": session.status}
+
+
+# ============================================================================
+# WebSocket Streaming Endpoint (Claude-Code-on-Web Parity)
+# ============================================================================
+
+from fastapi import WebSocket, WebSocketDisconnect
+
+
+async def _safe_ws_send_json(websocket: WebSocket, data: dict) -> bool:
+ """Send JSON over a WebSocket, swallowing disconnect errors.
+
+ Returns True if the send succeeded, False if the client has disconnected.
+ This prevents ClientDisconnected / WebSocketDisconnect from crashing the
+ handler when the client closes mid-response (common with Vite HMR,
+ browser tab close, or network drops).
+
+ Best-practice pattern from Starlette docs:
+ https://www.starlette.io/websockets/#disconnect
+ """
+ try:
+ await websocket.send_json(data)
+ return True
+ except WebSocketDisconnect:
+ return False
+ except Exception as exc:
+ # Catches uvicorn.protocols.utils.ClientDisconnected and other
+ # transport-layer errors without importing uvicorn internals
+ exc_name = type(exc).__name__
+ if exc_name in ("ClientDisconnected", "ConnectionClosedError",
+ "ConnectionClosedOK", "WebSocketDisconnect"):
+ return False
+ # Re-raise unexpected errors so they show up in logs
+ raise
+
+
+@app.websocket("/ws/sessions/{session_id}")
+async def session_websocket(websocket: WebSocket, session_id: str):
+ """
+ Real-time bidirectional communication for a coding session.
+
+ Server events:
+ { type: "agent_message", content: "..." }
+ { type: "tool_use", tool: "bash", input: "npm test" }
+ { type: "tool_result", tool: "bash", output: "All tests passed" }
+ { type: "diff_update", stats: { additions: N, deletions: N, files: N } }
+ { type: "status_change", status: "completed" }
+ { type: "error", message: "..." }
+
+ Client events:
+ { type: "user_message", content: "..." }
+ { type: "cancel" }
+ """
+ await websocket.accept()
+
+ # Verify session exists
+ try:
+ session = _session_mgr.load(session_id)
+ except FileNotFoundError:
+ await _safe_ws_send_json(websocket, {"type": "error", "message": "Session not found"})
+ try:
+ await websocket.close()
+ except Exception:
+ pass
+ return
+
+ # Send session history on connect (may fail if client already gone)
+ if not await _safe_ws_send_json(websocket, {
+ "type": "session_restored",
+ "session_id": session.id,
+ "status": session.status,
+ "message_count": len(session.messages),
+ }):
+ logger.info(f"WebSocket disconnected before handshake for session {session_id}")
+ return
+
+ try:
+ while True:
+ try:
+ data = await websocket.receive_json()
+ except WebSocketDisconnect:
+ break
+
+ event_type = data.get("type", "")
+
+ if event_type == "user_message":
+ content = data.get("content", "")
+ session.add_message("user", content)
+ _session_mgr.save(session)
+
+ # Acknowledge receipt
+ if not await _safe_ws_send_json(websocket, {
+ "type": "message_received",
+ "message_index": len(session.messages) - 1,
+ }):
+ break
+
+ # Stream agent response (integration point for agentic.py)
+ if not await _safe_ws_send_json(websocket, {
+ "type": "status_change",
+ "status": "active",
+ }):
+ break
+
+ # Agent processing hook — when the agent orchestrator is wired,
+ # replace this with actual streaming from agentic.py
+ try:
+ repo_full = session.repo_full_name or ""
+ parts = repo_full.split("/", 1)
+ if len(parts) == 2 and content.strip():
+ # Use canonical dispatcher signature
+ result = await dispatch_request(
+ user_request=content,
+ repo_full_name=f"{parts[0]}/{parts[1]}",
+ branch_name=session.branch,
+ )
+ answer = ""
+ if isinstance(result, dict):
+ answer = (
+ result.get("result")
+ or result.get("answer")
+ or result.get("message")
+ or result.get("summary")
+ or (result.get("plan", {}) or {}).get("summary")
+ or str(result)
+ )
+ else:
+ answer = str(result)
+
+ # Stream the response
+ if not await _safe_ws_send_json(websocket, {
+ "type": "agent_message",
+ "content": answer,
+ }):
+ # Client disconnected — still persist the answer for session history
+ session.add_message("assistant", answer)
+ _session_mgr.save(session)
+ break
+
+ session.add_message("assistant", answer)
+ _session_mgr.save(session)
+ else:
+ if not await _safe_ws_send_json(websocket, {
+ "type": "agent_message",
+ "content": "Session is not connected to a repository.",
+ }):
+ break
+ except Exception as agent_err:
+ logger.error(f"Agent error in WS session {session_id}: {agent_err}")
+ err_str = str(agent_err)
+ # Friendly messages for common LLM errors
+ _q_kw = ["insufficient_quota", "exceeded your current quota", "rate_limit_exceeded", "429"]
+ if any(kw in err_str.lower() for kw in _q_kw):
+ err_str = (
+ "Your LLM provider credits have been exhausted or you've "
+ "hit a rate limit. Please check your billing details or "
+ "switch to a free local provider (Ollama / OllaBridge) in Settings."
+ )
+ elif "No valid task outputs" in err_str or "Invalid response from LLM call" in err_str:
+ err_str = (
+ "The LLM returned an empty response. This often happens "
+ "with small/reasoning models. Try a larger model or enable Lite Mode."
+ )
+ if not await _safe_ws_send_json(websocket, {
+ "type": "error",
+ "message": err_str,
+ }):
+ break
+
+ if not await _safe_ws_send_json(websocket, {
+ "type": "status_change",
+ "status": "waiting",
+ }):
+ break
+
+ elif event_type == "cancel":
+ if not await _safe_ws_send_json(websocket, {
+ "type": "status_change",
+ "status": "waiting",
+ }):
+ break
+
+ elif event_type == "ping":
+ if not await _safe_ws_send_json(websocket, {"type": "pong"}):
+ break
+
+ except WebSocketDisconnect:
+ logger.info(f"WebSocket disconnected for session {session_id}")
+ except Exception as e:
+ # Don't log as error if it's a disconnect-related exception
+ exc_name = type(e).__name__
+ if exc_name in ("ClientDisconnected", "ConnectionClosedError", "ConnectionClosedOK"):
+ logger.info(f"WebSocket client disconnected for session {session_id}")
+ else:
+ logger.error(f"WebSocket error for session {session_id}: {e}")
+ await _safe_ws_send_json(websocket, {"type": "error", "message": str(e)})
+
+
+# ─── Redesigned API Endpoints (Phase 1–4) ────────────────────────────────
+
+from gitpilot.models import (
+ ProviderTestRequest as _ProviderTestRequest,
+ StartSessionRequest as _StartSessionRequest,
+ ChatMessageRequest as _ChatMessageRequest,
+)
+
+
+@app.get("/api/status")
+async def api_status():
+ """Normalized status endpoint for the redesigned extension/UI."""
+ from gitpilot.models import (
+ StatusResponse, ProviderStatusResponse, ProviderName,
+ WorkspaceCapabilitySummary, GithubStatusSummary, ProviderHealth,
+ )
+ from gitpilot.settings import autoconfigure_local_provider
+ from gitpilot.github_api import get_github_status_summary
+
+ s = autoconfigure_local_provider()
+ provider_summary = s.get_provider_summary()
+
+ # Build provider status
+ provider = ProviderStatusResponse(
+ configured=provider_summary.configured,
+ name=ProviderName(provider_summary.name.value if hasattr(provider_summary.name, 'value') else str(provider_summary.name)),
+ source=provider_summary.source,
+ model=provider_summary.model,
+ base_url=provider_summary.base_url,
+ connection_type=provider_summary.connection_type,
+ has_api_key=provider_summary.has_api_key,
+ health=provider_summary.health,
+ models_available=provider_summary.models_available,
+ warning=provider_summary.warning,
+ )
+
+ # Workspace capabilities
+ workspace = WorkspaceCapabilitySummary(
+ folder_mode_available=True,
+ local_git_available=True,
+ github_mode_available=False,
+ )
+
+ # GitHub status — wrap with timeout to prevent slow first-load
+ # (GitHub API calls over WSL/slow networks can take 5-10s first time)
+ github = GithubStatusSummary()
+ try:
+ github = await _asyncio.wait_for(get_github_status_summary(), timeout=3.0)
+ workspace.github_mode_available = github.connected
+ except _asyncio.TimeoutError:
+ logger.warning("[api/status] GitHub status check timed out after 3s, returning cached/default")
+ except Exception as exc:
+ logger.debug("[api/status] GitHub status check failed: %s", exc)
+
+ return StatusResponse(
+ server_ready=True,
+ provider=provider,
+ workspace=workspace,
+ github=github,
+ )
+
+
+@app.get("/api/providers/status")
+async def api_providers_status():
+ """Get detailed status for the active provider."""
+ from gitpilot.settings import autoconfigure_local_provider
+ from gitpilot.llm_provider import test_provider_connection
+
+ s = autoconfigure_local_provider()
+ summary = await test_provider_connection(s)
+ return summary
+
+
+@app.post("/api/providers/test")
+async def api_providers_test(req: _ProviderTestRequest):
+ """Test a specific provider configuration."""
+ from gitpilot.models import (
+ ProviderTestRequest, ProviderTestResponse, ProviderName,
+ ProviderHealth,
+ )
+ from gitpilot.settings import get_settings, AppSettings
+ from gitpilot.llm_provider import test_provider_connection
+ import copy
+
+ s = autoconfigure_local_provider()
+ # Apply test overrides temporarily
+ test_settings = copy.deepcopy(s)
+
+ provider = req.provider
+ if provider == ProviderName.openai and req.openai:
+ if req.openai.api_key:
+ test_settings.openai.api_key = req.openai.api_key
+ if req.openai.base_url:
+ test_settings.openai.base_url = req.openai.base_url
+ if req.openai.model:
+ test_settings.openai.model = req.openai.model
+ test_settings.provider = test_settings.provider.__class__("openai")
+ elif provider == ProviderName.claude and req.claude:
+ if req.claude.api_key:
+ test_settings.claude.api_key = req.claude.api_key
+ if req.claude.base_url:
+ test_settings.claude.base_url = req.claude.base_url
+ if req.claude.model:
+ test_settings.claude.model = req.claude.model
+ test_settings.provider = test_settings.provider.__class__("claude")
+ elif provider == ProviderName.watsonx and req.watsonx:
+ if req.watsonx.api_key:
+ test_settings.watsonx.api_key = req.watsonx.api_key
+ if req.watsonx.project_id:
+ test_settings.watsonx.project_id = req.watsonx.project_id
+ if req.watsonx.base_url:
+ test_settings.watsonx.base_url = req.watsonx.base_url
+ if req.watsonx.model_id:
+ test_settings.watsonx.model_id = req.watsonx.model_id
+ test_settings.provider = test_settings.provider.__class__("watsonx")
+ elif provider == ProviderName.ollama and req.ollama:
+ if req.ollama.base_url:
+ test_settings.ollama.base_url = req.ollama.base_url
+ if req.ollama.model:
+ test_settings.ollama.model = req.ollama.model
+ test_settings.provider = test_settings.provider.__class__("ollama")
+ elif provider == ProviderName.ollabridge and req.ollabridge:
+ if req.ollabridge.base_url:
+ test_settings.ollabridge.base_url = req.ollabridge.base_url
+ if req.ollabridge.model:
+ test_settings.ollabridge.model = req.ollabridge.model
+ if req.ollabridge.api_key:
+ test_settings.ollabridge.api_key = req.ollabridge.api_key
+ test_settings.provider = test_settings.provider.__class__("ollabridge")
+
+ summary = await test_provider_connection(test_settings)
+ return ProviderTestResponse(
+ configured=summary.configured,
+ name=summary.name,
+ source=summary.source,
+ model=summary.model,
+ base_url=summary.base_url,
+ connection_type=summary.connection_type,
+ has_api_key=summary.has_api_key,
+ health=summary.health,
+ models_available=summary.models_available,
+ warning=summary.warning,
+ details=f"Provider {provider.value} test completed",
+ )
+
+
+@app.post("/api/session/start")
+async def api_session_start(req: _StartSessionRequest):
+ """Start a new session by mode (folder, local_git, github)."""
+ from gitpilot.models import (
+ StartSessionRequest, StartSessionResponse, WorkspaceMode,
+ )
+ from gitpilot.session import SessionManager
+
+ mgr = SessionManager()
+
+ if req.mode == WorkspaceMode.folder:
+ if not req.folder_path:
+ raise HTTPException(status_code=422, detail="folder_path is required for folder mode")
+ session = mgr.create_folder_session(req.folder_path)
+ elif req.mode == WorkspaceMode.local_git:
+ repo_root = req.repo_root or req.folder_path
+ if not repo_root:
+ raise HTTPException(status_code=422, detail="repo_root is required for local_git mode")
+ session = mgr.create_local_git_session(repo_root, req.branch)
+ elif req.mode == WorkspaceMode.github:
+ if not req.repo_full_name:
+ raise HTTPException(status_code=422, detail="repo_full_name is required for github mode")
+ session = mgr.create_github_session(req.repo_full_name, req.branch)
+ else:
+ raise HTTPException(status_code=400, detail=f"Unknown mode: {req.mode}")
+
+ return StartSessionResponse(
+ session_id=session.id,
+ mode=req.mode,
+ title=session.name,
+ folder_path=session.folder_path,
+ repo_root=session.repo_root,
+ repo_full_name=session.repo_full_name,
+ branch=session.branch,
+ )
+
+
+@app.post("/api/chat/send")
+async def api_chat_message_v2(req: _ChatMessageRequest):
+ """Normalized chat message endpoint for the redesigned extension."""
+ from gitpilot.models import ChatMessageRequest, ChatMessageResponse
+ from gitpilot.session import SessionManager
+ import uuid
+
+ mgr = SessionManager()
+
+ # Load session
+ try:
+ session = mgr.load(req.session_id)
+ except Exception:
+ raise HTTPException(status_code=404, detail=f"Session {req.session_id} not found")
+
+ # Use the canonical dispatcher for chat
+ answer = ""
+ plan = None
+ references = []
+
+ repo_full = session.repo_full_name or ""
+ try:
+ if repo_full:
+ result = await dispatch_request(
+ user_request=req.message,
+ repo_full_name=repo_full,
+ branch_name=session.branch,
+ )
+ if isinstance(result, dict):
+ answer = (
+ result.get("result")
+ or result.get("answer")
+ or result.get("message")
+ or result.get("summary")
+ or str(result)
+ )
+ plan = result.get("plan")
+ references = result.get("references", [])
+ else:
+ answer = str(result)
+ else:
+ # Folder-mode: use LLM directly for simple chat
+ from gitpilot.llm_provider import build_llm
+ llm = build_llm()
+ local_prompt = _build_local_repo_aware_prompt(req, session)
+ answer = llm.call(
+ [{"role": "user", "content": local_prompt}]
+ )
+ except Exception as e:
+ err_str = str(e)
+ _q_kw = ["insufficient_quota", "exceeded your current quota", "rate_limit_exceeded", "429"]
+ if any(kw in err_str.lower() for kw in _q_kw):
+ answer = (
+ "Your LLM provider credits have been exhausted or you've hit a "
+ "rate limit. Please check your billing details or switch to a "
+ "free local provider (Ollama / OllaBridge) in Settings."
+ )
+ elif "No valid task outputs" in err_str:
+ answer = (
+ "The LLM returned an empty response. This often happens with "
+ "small models. Try enabling Lite Mode in Settings."
+ )
+ else:
+ answer = f"Error processing message: {err_str}"
+
+ # Store message in session
+ from gitpilot.session import Message
+ session.messages.append(Message(role="user", content=req.message))
+ session.messages.append(Message(role="assistant", content=answer))
+ mgr.save(session)
+
+ # Extract structured edits from the LLM answer so the VS Code
+ # extension can offer an "Apply Patch" button for file creation.
+ edits = _extract_edits_from_answer(answer) if answer else []
+
+ return ChatMessageResponse(
+ session_id=req.session_id,
+ answer=answer,
+ message_id=str(uuid.uuid4()),
+ plan=plan,
+ edits=edits,
+ references=references,
+ )
+
+
+@app.get("/api/workspace/summary")
+async def api_workspace_summary(folder_path: str = Query(default=".")):
+ """Get workspace summary for UI display."""
+ from gitpilot.workspace import summarize_workspace
+ return await summarize_workspace(folder_path)
+
+
+@app.get("/api/security/scan-workspace")
+async def api_security_scan_workspace(path: str = Query(default=".")):
+ """Quick action security scan for workspace."""
+ from gitpilot.security import scan_current_workspace
+ return scan_current_workspace(path)
+
+
+# ============================================================================
+# Static Files & Frontend Serving (SPA Support)
+# ============================================================================
+
+STATIC_DIR = Path(__file__).resolve().parent / "web"
+ASSETS_DIR = STATIC_DIR / "assets"
+
+if ASSETS_DIR.exists():
+ app.mount("/assets", StaticFiles(directory=ASSETS_DIR), name="assets")
+
+if STATIC_DIR.exists():
+ app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static")
+
+
+@app.get("/api/ping")
+async def ping():
+ """Zero-dependency ping — used by frontend initApp() to detect when
+ the backend is accepting requests. Returns immediately without touching
+ any modules, settings, or external APIs. Always fast even during
+ CrewAI warmup or GitHub API outages.
+ """
+ return {"ok": True, "service": "gitpilot", "version": __version__}
+
+
+@app.get("/api/health")
+async def health_check():
+ """Lightweight health check — always fast, used by HF Spaces HEALTHCHECK."""
+ return {"status": "healthy", "service": "gitpilot-backend"}
+
+
+@app.get("/api/health/deep")
+async def deep_health():
+ """Deep health check — verifies LLM provider connectivity and system status."""
+ from .resilience import deep_health_check
+ result = await deep_health_check()
+ status_code = 200 if result["status"] == "healthy" else 503
+ return JSONResponse(content=result, status_code=status_code)
+
+
+@app.get("/healthz")
+async def healthz():
+ """Health check endpoint (Render/Kubernetes standard)."""
+ return {"status": "healthy", "service": "gitpilot-backend"}
+
+
+@app.get("/", include_in_schema=False)
+async def index():
+ """Serve the React App entry point."""
+ index_file = STATIC_DIR / "index.html"
+ if index_file.exists():
+ return FileResponse(index_file)
+ return JSONResponse(
+ {"message": "GitPilot UI not built. The static files directory is missing."},
+ status_code=500,
+ )
+
+
+@app.get("/{full_path:path}", include_in_schema=False)
+async def catch_all_spa_routes(full_path: str):
+ """
+ Catch-all route to serve index.html for frontend routing.
+ Excludes '/api' paths to ensure genuine API 404s are returned as JSON.
+ """
+ if full_path.startswith("api/"):
+ return JSONResponse({"detail": "Not Found"}, status_code=404)
+
+ index_file = STATIC_DIR / "index.html"
+ if index_file.exists():
+ return FileResponse(index_file)
+
+ return JSONResponse(
+ {"message": "GitPilot UI not built. The static files directory is missing."},
+ status_code=500,
+ )
+
+# ---------------------------------------------------------------------------
+# OllaBridge Cloud Extension (additive, non-destructive)
+# ---------------------------------------------------------------------------
+try:
+ from .api_ollabridge_ext import apply_ollabridge_extension as _apply_ob
+ _apply_ob(app)
+ del _apply_ob
+except ImportError:
+ pass # Extension not available, skip gracefully
+
+
+# ============================================================================
+# V2 Streaming Agent Endpoints (additive, non-destructive)
+#
+# These endpoints use the unified AgentEventBus protocol so every client
+# (VS Code, React web, HF Spaces) receives the same JSON event shapes.
+#
+# Existing endpoints are NOT modified. These are /api/v2/ prefixed.
+# ============================================================================
+
+import asyncio as _asyncio
+from fastapi import Request as _Request
+from fastapi.responses import StreamingResponse as _StreamingResponse
+from gitpilot.agent_events import get_bus as _get_bus, remove_bus as _remove_bus, EventType as _EvType
+from gitpilot.agent_executor import StreamingAgentExecutor as _StreamingExecutor
+from gitpilot.approval_protocol import ApprovalGate as _ApprovalGate
+from gitpilot.workspace import WorkspaceManager as _V2WorkspaceManager
+
+# Track active executors for cancellation
+_active_executors: dict[str, _StreamingExecutor] = {}
+
+
+@app.post("/api/v2/chat/stream", tags=["v2-streaming"])
+async def v2_chat_stream(request: _Request):
+ """
+ Server-Sent Events endpoint for agent execution.
+
+ Returns text/event-stream. Each line is:
+ data: {"type": "text_delta", "text": "..."}\n\n
+ data: {"type": "tool_start", "name": "read_file", ...}\n\n
+ data: {"type": "done", ...}\n\n
+
+ This is the PREFERRED endpoint for:
+ - Hugging Face Spaces (SSE works through nginx/proxies)
+ - VS Code extension (can consume SSE via fetch ReadableStream)
+ - Any HTTP client that supports streaming
+ """
+ body = await request.json()
+ user_message = body.get("message", "")
+ session_id = body.get("session_id", "")
+ permission_mode = body.get("permission_mode", "normal")
+
+ if not user_message:
+ return JSONResponse({"error": "message is required"}, status_code=400)
+
+ # Load session (reuse existing session manager)
+ session = None
+ repo_full_name = ""
+ branch = None
+ token = body.get("token")
+
+ if session_id:
+ try:
+ session = _session_mgr.load(session_id)
+ repo_full_name = session.repo_full_name or ""
+ branch = session.branch
+ except FileNotFoundError:
+ return JSONResponse({"error": "Session not found"}, status_code=404)
+
+ bus = _get_bus(session_id or "ephemeral")
+ gate = _ApprovalGate(bus, mode=permission_mode)
+
+ # Resolve workspace (if session has a local workspace)
+ workspace = None
+ if session and repo_full_name:
+ try:
+ parts = repo_full_name.split("/", 1)
+ if len(parts) == 2:
+ ws_mgr = _V2WorkspaceManager()
+ workspace = await ws_mgr.ensure_workspace(
+ owner=parts[0], repo=parts[1],
+ token=token, branch=branch,
+ )
+ except Exception as ws_err:
+ logger.warning("Could not resolve workspace: %s", ws_err)
+
+ executor = _StreamingExecutor(
+ bus=bus, gate=gate, workspace=workspace,
+ ws_manager=_V2WorkspaceManager(),
+ )
+ _active_executors[session_id or "ephemeral"] = executor
+
+ sub_id, _queue = bus.subscribe()
+
+ async def event_generator():
+ """Run agent in background, yield events as SSE."""
+ # Start execution as a background task
+ exec_task = _asyncio.create_task(
+ executor.execute(
+ user_message=user_message,
+ repo_full_name=repo_full_name,
+ branch=branch,
+ token=token,
+ )
+ )
+
+ try:
+ async for event in bus.stream(sub_id):
+ yield event.to_sse()
+ if event.type in (_EvType.DONE, _EvType.ERROR):
+ break
+ finally:
+ bus.unsubscribe(sub_id)
+ _active_executors.pop(session_id or "ephemeral", None)
+
+ # Ensure the task completes
+ if not exec_task.done():
+ exec_task.cancel()
+ try:
+ await exec_task
+ except (_asyncio.CancelledError, Exception):
+ pass
+
+ # Save assistant message to session
+ if session and exec_task.done() and not exec_task.cancelled():
+ try:
+ result = exec_task.result()
+ if result:
+ summary = result.get("summary", "") if isinstance(result, dict) else str(result)
+ session.add_message("assistant", summary[:5000])
+ _session_mgr.save(session)
+ except Exception:
+ pass
+
+ _remove_bus(session_id or "ephemeral")
+
+ return _StreamingResponse(
+ event_generator(),
+ media_type="text/event-stream",
+ headers={
+ "Cache-Control": "no-cache",
+ "Connection": "keep-alive",
+ "X-Accel-Buffering": "no",
+ },
+ )
+
+
+@app.post("/api/v2/approval/respond", tags=["v2-streaming"])
+async def v2_approval_respond(request: _Request):
+ """
+ Client sends approval/denial for a tool execution.
+ Used by all clients (web, VS Code, HF Spaces).
+ """
+ body = await request.json()
+ session_id = body.get("session_id", "ephemeral")
+ request_id = body.get("request_id", "")
+ approved = body.get("approved", False)
+ scope = body.get("scope", "once")
+
+ if not request_id:
+ return JSONResponse({"error": "request_id is required"}, status_code=400)
+
+ # The approval gate is created per-stream, so we emit an event
+ # that the gate's listener will pick up
+ bus = _get_bus(session_id)
+ from gitpilot.agent_events import approval_resolved
+ await bus.emit(approval_resolved(request_id, approved))
+
+ return {"status": "resolved", "request_id": request_id, "approved": approved}
+
+
+@app.post("/api/v2/agent/cancel", tags=["v2-streaming"])
+async def v2_agent_cancel(request: _Request):
+ """Cancel the running agent stream for a session."""
+ body = await request.json()
+ session_id = body.get("session_id", "ephemeral")
+
+ executor = _active_executors.get(session_id)
+ if executor:
+ executor.cancel()
+ return {"status": "cancelled", "session_id": session_id}
+
+ return JSONResponse({"error": "No active executor for this session"}, status_code=404)
+
+
+@app.websocket("/ws/v2/sessions/{session_id}")
+async def v2_session_websocket(websocket: WebSocket, session_id: str):
+ """
+ V2 WebSocket with full agent streaming protocol.
+
+ Same event types as SSE endpoint. Client can also send:
+ { type: "user_message", content: "..." }
+ { type: "approval_response", request_id: "...", approved: true, scope: "session" }
+ { type: "cancel" }
+ { type: "ping" }
+ """
+ await websocket.accept()
+
+ try:
+ session = _session_mgr.load(session_id)
+ except FileNotFoundError:
+ await _safe_ws_send_json(websocket, {"type": "error", "message": "Session not found"})
+ try:
+ await websocket.close()
+ except Exception:
+ pass
+ return
+
+ if not await _safe_ws_send_json(websocket, {
+ "type": "session_restored",
+ "session_id": session.id,
+ "status": session.status,
+ "protocol": "v2",
+ }):
+ logger.info("V2 WebSocket disconnected before handshake for session %s", session_id)
+ return
+
+ bus = _get_bus(session_id)
+ gate = _ApprovalGate(bus)
+ sub_id, _queue = bus.subscribe()
+
+ # Forward bus events -> WebSocket
+ async def forward_events():
+ try:
+ async for event in bus.stream(sub_id):
+ if not await _safe_ws_send_json(websocket, event.to_dict()):
+ break
+ except Exception:
+ pass
+
+ forwarder = _asyncio.create_task(forward_events())
+
+ try:
+ while True:
+ try:
+ data = await websocket.receive_json()
+ except WebSocketDisconnect:
+ break
+ event_type = data.get("type", "")
+
+ if event_type == "user_message":
+ content = data.get("content", "")
+ if not content:
+ continue
+
+ session.add_message("user", content)
+ _session_mgr.save(session)
+
+ # Resolve workspace
+ workspace = None
+ repo_full = session.repo_full_name or ""
+ parts = repo_full.split("/", 1)
+ if len(parts) == 2:
+ try:
+ ws_mgr = _V2WorkspaceManager()
+ workspace = await ws_mgr.ensure_workspace(
+ owner=parts[0], repo=parts[1],
+ token=data.get("token"),
+ branch=session.branch,
+ )
+ except Exception:
+ pass
+
+ executor = _StreamingExecutor(
+ bus=bus, gate=gate, workspace=workspace,
+ ws_manager=_V2WorkspaceManager(),
+ )
+ _active_executors[session_id] = executor
+
+ # Run agent (non-blocking)
+ _asyncio.create_task(executor.execute(
+ user_message=content,
+ repo_full_name=repo_full,
+ branch=session.branch,
+ token=data.get("token"),
+ ))
+
+ elif event_type == "approval_response":
+ gate.resolve(
+ request_id=data.get("request_id", ""),
+ approved=data.get("approved", False),
+ scope=data.get("scope", "once"),
+ )
+
+ elif event_type == "cancel":
+ executor = _active_executors.get(session_id)
+ if executor:
+ executor.cancel()
+
+ elif event_type == "ping":
+ if not await _safe_ws_send_json(websocket, {"type": "pong"}):
+ break
+
+ except WebSocketDisconnect:
+ logger.info("V2 WebSocket disconnected for session %s", session_id)
+ except Exception as e:
+ logger.error("V2 WebSocket error for session %s: %s", session_id, e)
+ finally:
+ forwarder.cancel()
+ bus.unsubscribe(sub_id)
+ _active_executors.pop(session_id, None)
+ gate.cancel_all()
+ _remove_bus(session_id)
diff --git a/gitpilot/api_ollabridge_ext.py b/gitpilot/api_ollabridge_ext.py
new file mode 100644
index 0000000000000000000000000000000000000000..b0803a1fa5dd7007e3248796aadd6a12e2f82ee3
--- /dev/null
+++ b/gitpilot/api_ollabridge_ext.py
@@ -0,0 +1,106 @@
+"""OllaBridge Cloud integration extension for GitPilot API.
+
+This module patches the FastAPI app at import time to add:
+- OllaBridge as a first-class LLM provider in settings
+- /api/ollabridge/* proxy endpoints (pairing, models, health)
+
+Completely additive - does not modify api.py.
+Imported automatically via __init__.py or cli.py startup.
+"""
+from __future__ import annotations
+
+import logging
+
+from pydantic import BaseModel
+
+from .settings import (
+ AppSettings,
+ LLMProvider,
+ get_settings,
+ set_provider,
+ update_settings,
+)
+
+logger = logging.getLogger(__name__)
+
+
+# Extended SettingsResponse that includes ollabridge
+class SettingsResponseExt(BaseModel):
+ provider: LLMProvider
+ providers: list[LLMProvider]
+ openai: dict
+ claude: dict
+ watsonx: dict
+ ollama: dict
+ ollabridge: dict = {}
+ ollabridge_connection_type: str | None = None
+ langflow_url: str
+ has_langflow_plan_flow: bool
+
+
+ALL_PROVIDERS = [
+ LLMProvider.ollabridge,
+ LLMProvider.openai,
+ LLMProvider.claude,
+ LLMProvider.watsonx,
+ LLMProvider.ollama,
+]
+
+
+def _build_settings_response(s: AppSettings) -> SettingsResponseExt:
+ ollabridge_connection_type = "local"
+ if s.ollabridge.api_key:
+ ollabridge_connection_type = "api_key"
+
+ # Warn if user included /v1 in base_url
+ ob_base = s.ollabridge.base_url or ""
+ if ob_base.rstrip("/").endswith("/v1"):
+ # The response should carry a warning; we'll handle this in the settings response
+ pass
+
+ return SettingsResponseExt(
+ provider=s.provider,
+ providers=ALL_PROVIDERS,
+ openai=s.openai.model_dump(),
+ claude=s.claude.model_dump(),
+ watsonx=s.watsonx.model_dump(),
+ ollama=s.ollama.model_dump(),
+ ollabridge=s.ollabridge.model_dump(),
+ ollabridge_connection_type=ollabridge_connection_type,
+ langflow_url=s.langflow_url,
+ has_langflow_plan_flow=bool(s.langflow_plan_flow_id),
+ )
+
+
+def apply_ollabridge_extension(app):
+ """Apply OllaBridge integration to the FastAPI app.
+
+ Call this after the app is created but before it starts serving.
+ Adds/overrides the settings endpoints to include ollabridge,
+ and mounts the ollabridge proxy router.
+ """
+ from .ollabridge_proxy import router as ollabridge_router
+
+ # Mount proxy routes
+ app.include_router(ollabridge_router)
+ logger.info("OllaBridge proxy mounted at /api/ollabridge/*")
+
+ # Override settings endpoints to include ollabridge
+ @app.get("/api/settings", response_model=SettingsResponseExt)
+ async def api_get_settings_ext():
+ return _build_settings_response(get_settings())
+
+ class ProviderUpdate(BaseModel):
+ provider: LLMProvider
+
+ @app.post("/api/settings/provider", response_model=SettingsResponseExt)
+ async def api_set_provider_ext(update: ProviderUpdate):
+ s = set_provider(update.provider)
+ return _build_settings_response(s)
+
+ @app.put("/api/settings/llm", response_model=SettingsResponseExt)
+ async def api_update_llm_settings_ext(updates: dict):
+ s = update_settings(updates)
+ return _build_settings_response(s)
+
+ logger.info("OllaBridge settings endpoints registered (overrides original)")
diff --git a/gitpilot/approval_protocol.py b/gitpilot/approval_protocol.py
new file mode 100644
index 0000000000000000000000000000000000000000..703922f14120590f824c10658c8b5928f8288176
--- /dev/null
+++ b/gitpilot/approval_protocol.py
@@ -0,0 +1,142 @@
+# gitpilot/approval_protocol.py
+"""
+Tool approval protocol for agent execution.
+
+When an agent wants to run a dangerous tool (write_file, run_command,
+git_commit), the ApprovalGate pauses execution and waits for the user
+to approve via WebSocket, SSE callback, or VS Code postMessage.
+
+Permission modes:
+ - "normal" Ask user before dangerous tools (default)
+ - "auto" Approve everything automatically
+ - "plan" Block all writes and commands (read-only)
+"""
+from __future__ import annotations
+
+import asyncio
+import logging
+from typing import Dict
+
+from .agent_events import AgentEventBus, approval_needed, approval_resolved
+
+logger = logging.getLogger(__name__)
+
+DANGEROUS_TOOLS = frozenset({
+ # VS Code local agent tool names
+ "write_file",
+ "edit_file",
+ "run_command",
+ "git_commit",
+ # CrewAI tool display names
+ "Write local file",
+ "Delete local file",
+ "Run shell command",
+ "Git commit",
+})
+
+
+class ApprovalGate:
+ """
+ Async approval gate between the agent and tool execution.
+
+ Flow:
+ 1. Agent calls gate.check(tool, args, summary)
+ 2. Gate emits APPROVAL_NEEDED event to the bus
+ 3. Gate creates an asyncio.Future and waits
+ 4. Client sends approval response
+ 5. resolve() sets the Future result, agent proceeds or skips
+ """
+
+ def __init__(self, bus: AgentEventBus, mode: str = "normal") -> None:
+ self._bus = bus
+ self._mode = mode
+ self._pending: Dict[str, asyncio.Future] = {}
+ self._session_allowed: set[str] = set()
+
+ @property
+ def mode(self) -> str:
+ return self._mode
+
+ @mode.setter
+ def mode(self, value: str) -> None:
+ self._mode = value
+
+ async def check(
+ self,
+ tool_name: str,
+ tool_args: dict,
+ summary: str = "",
+ diff_preview: str | None = None,
+ ) -> bool:
+ """
+ Returns True if the tool may proceed. Blocks until user responds.
+ """
+ if tool_name not in DANGEROUS_TOOLS:
+ return True
+
+ if self._mode == "auto":
+ return True
+
+ if self._mode == "plan":
+ await self._bus.emit(
+ approval_resolved(f"denied-plan-{id(self)}", approved=False)
+ )
+ return False
+
+ if tool_name in self._session_allowed:
+ return True
+
+ # Normal mode: ask user
+ request_id = f"approval-{id(self)}-{len(self._pending)}"
+ future: asyncio.Future = asyncio.get_event_loop().create_future()
+ self._pending[request_id] = future
+
+ risk = "high" if tool_name in ("run_command", "Run shell command") else "medium"
+
+ await self._bus.emit(
+ approval_needed(
+ request_id=request_id,
+ tool=tool_name,
+ args=tool_args,
+ summary=summary
+ or f"{tool_name}({', '.join(f'{k}={v!r}' for k, v in list(tool_args.items())[:3])})",
+ diff_preview=diff_preview,
+ risk=risk,
+ )
+ )
+
+ try:
+ result = await asyncio.wait_for(future, timeout=120.0)
+ except asyncio.TimeoutError:
+ logger.warning(
+ "Approval timed out for %s (request %s)", tool_name, request_id
+ )
+ self._pending.pop(request_id, None)
+ return False
+
+ self._pending.pop(request_id, None)
+
+ approved = result.get("approved", False)
+ scope = result.get("scope", "once")
+
+ if approved and scope == "session":
+ self._session_allowed.add(tool_name)
+
+ await self._bus.emit(approval_resolved(request_id, approved))
+ return approved
+
+ def resolve(
+ self, request_id: str, approved: bool, scope: str = "once"
+ ) -> None:
+ """Called by the transport layer when the user responds."""
+ future = self._pending.get(request_id)
+ if future and not future.done():
+ future.set_result({"approved": approved, "scope": scope})
+
+ def cancel_all(self) -> None:
+ """Deny all pending approvals (e.g., on session close)."""
+ for future in self._pending.values():
+ if not future.done():
+ future.set_result({"approved": False, "scope": "once"})
+ self._pending.clear()
+ self._session_allowed.clear()
diff --git a/gitpilot/checkpoints.py b/gitpilot/checkpoints.py
new file mode 100644
index 0000000000000000000000000000000000000000..04725b02d13c6e021f90d87b6b14f0685487f485
--- /dev/null
+++ b/gitpilot/checkpoints.py
@@ -0,0 +1,275 @@
+# gitpilot/checkpoints.py
+"""Project checkpointing via a shadow git repository.
+
+A checkpoint is a three-part snapshot taken before a mutating tool
+call:
+
+1. A git commit in a shadow repo at
+ ``~/.gitpilot/history/``. This commit contains a
+ copy of all tracked files (plus untracked, ignoring ``.git/``).
+2. The conversation transcript up to that point, serialised as JSON.
+3. A descriptor of the tool call that was about to run.
+
+Restoring a checkpoint copies the snapshot files back into the
+workspace and re-emits the saved transcript so the conversation can be
+resumed deterministically.
+
+The module is opt-in and side-effect-free until :meth:`CheckpointStore.snapshot`
+is called. It deliberately uses Python's ``git`` CLI rather than a
+library to keep dependencies minimal.
+"""
+from __future__ import annotations
+
+import hashlib
+import json
+import logging
+import shutil
+import subprocess
+import time
+from dataclasses import asdict, dataclass, field
+from pathlib import Path
+from typing import Any, Dict, List, Optional
+
+logger = logging.getLogger(__name__)
+
+HISTORY_ROOT = Path.home() / ".gitpilot" / "history"
+META_DIR = "meta"
+SNAP_DIR = "snapshot"
+TRANSCRIPT_FILE = "transcript.json"
+DESCRIPTOR_FILE = "tool_call.json"
+
+
+@dataclass
+class CheckpointRecord:
+ """Lightweight checkpoint summary returned to callers."""
+
+ id: str
+ timestamp: float
+ tool_name: str
+ target_path: Optional[str] = None
+ note: str = ""
+ files_changed: int = 0
+ commit_sha: Optional[str] = None
+
+ def to_dict(self) -> Dict[str, Any]:
+ return asdict(self)
+
+
+@dataclass
+class ToolCallDescriptor:
+ """The tool call that was about to run when the checkpoint was made."""
+
+ name: str
+ arguments: Dict[str, Any] = field(default_factory=dict)
+ target_path: Optional[str] = None
+ note: str = ""
+
+ def to_dict(self) -> Dict[str, Any]:
+ return asdict(self)
+
+
+class CheckpointStore:
+ """Manage checkpoints for a single workspace."""
+
+ def __init__(self, workspace_path: Path, history_root: Optional[Path] = None) -> None:
+ self.workspace_path = workspace_path.resolve()
+ root = history_root or HISTORY_ROOT
+ self.history_dir = root / _workspace_hash(self.workspace_path)
+
+ # ------------------------------------------------------------------
+ # Lifecycle
+ # ------------------------------------------------------------------
+ def init(self) -> None:
+ self.history_dir.mkdir(parents=True, exist_ok=True)
+ snap = self.history_dir / SNAP_DIR
+ snap.mkdir(exist_ok=True)
+ if not (snap / ".git").exists():
+ self._git(snap, "init", "-q")
+ self._git(snap, "config", "user.email", "checkpoints@gitpilot.local")
+ self._git(snap, "config", "user.name", "GitPilot Checkpoints")
+ (self.history_dir / META_DIR).mkdir(exist_ok=True)
+
+ # ------------------------------------------------------------------
+ # Snapshot / restore
+ # ------------------------------------------------------------------
+ def snapshot(
+ self,
+ descriptor: ToolCallDescriptor,
+ transcript: Optional[List[Dict[str, Any]]] = None,
+ ) -> CheckpointRecord:
+ """Capture the workspace + transcript + tool call descriptor."""
+ self.init()
+ snap = self.history_dir / SNAP_DIR
+ files_changed = _mirror_workspace(self.workspace_path, snap)
+ ts = time.time()
+ ckpt_id = _format_id(ts, descriptor)
+ meta_dir = self.history_dir / META_DIR / ckpt_id
+ meta_dir.mkdir(parents=True, exist_ok=True)
+ (meta_dir / TRANSCRIPT_FILE).write_text(
+ json.dumps(transcript or [], indent=2), encoding="utf-8"
+ )
+ (meta_dir / DESCRIPTOR_FILE).write_text(
+ json.dumps(descriptor.to_dict(), indent=2), encoding="utf-8"
+ )
+ commit_sha: Optional[str] = None
+ try:
+ self._git(snap, "add", "-A")
+ res = self._git(snap, "commit", "-q", "--allow-empty", "-m", ckpt_id, capture=True)
+ commit_sha = self._git(snap, "rev-parse", "HEAD", capture=True).strip() or None
+ _ = res
+ except Exception as e:
+ logger.warning("checkpoint commit failed: %s", e)
+ record = CheckpointRecord(
+ id=ckpt_id,
+ timestamp=ts,
+ tool_name=descriptor.name,
+ target_path=descriptor.target_path,
+ note=descriptor.note,
+ files_changed=files_changed,
+ commit_sha=commit_sha,
+ )
+ (meta_dir / "record.json").write_text(
+ json.dumps(record.to_dict(), indent=2), encoding="utf-8"
+ )
+ return record
+
+ def list(self) -> List[CheckpointRecord]:
+ out: List[CheckpointRecord] = []
+ meta_root = self.history_dir / META_DIR
+ if not meta_root.exists():
+ return out
+ for child in sorted(meta_root.iterdir(), reverse=True):
+ record_file = child / "record.json"
+ if not record_file.exists():
+ continue
+ try:
+ data = json.loads(record_file.read_text(encoding="utf-8"))
+ out.append(CheckpointRecord(**data))
+ except Exception as e:
+ logger.debug("could not load checkpoint %s: %s", child, e)
+ return out
+
+ def restore(self, checkpoint_id: str) -> Dict[str, Any]:
+ """Restore files for ``checkpoint_id`` and return the transcript."""
+ meta_dir = self.history_dir / META_DIR / checkpoint_id
+ if not meta_dir.exists():
+ raise FileNotFoundError(f"unknown checkpoint: {checkpoint_id}")
+ snap = self.history_dir / SNAP_DIR
+ record_path = meta_dir / "record.json"
+ if not record_path.exists():
+ raise FileNotFoundError("missing record.json")
+ record = json.loads(record_path.read_text(encoding="utf-8"))
+ sha = record.get("commit_sha")
+ if sha:
+ try:
+ self._git(snap, "checkout", "-q", sha, "--", ".")
+ except Exception as e:
+ logger.warning("checkout of %s failed: %s", sha, e)
+ # Mirror snapshot files back into the workspace (additive only —
+ # we never delete files the user may have created since).
+ _restore_workspace(snap, self.workspace_path)
+ transcript_path = meta_dir / TRANSCRIPT_FILE
+ descriptor_path = meta_dir / DESCRIPTOR_FILE
+ return {
+ "record": record,
+ "transcript": json.loads(transcript_path.read_text(encoding="utf-8"))
+ if transcript_path.exists() else [],
+ "tool_call": json.loads(descriptor_path.read_text(encoding="utf-8"))
+ if descriptor_path.exists() else {},
+ }
+
+ # ------------------------------------------------------------------
+ # Maintenance
+ # ------------------------------------------------------------------
+ def prune(self, keep_last: int = 50) -> int:
+ records = self.list()
+ if len(records) <= keep_last:
+ return 0
+ removed = 0
+ for record in records[keep_last:]:
+ target = self.history_dir / META_DIR / record.id
+ if target.exists():
+ shutil.rmtree(target, ignore_errors=True)
+ removed += 1
+ return removed
+
+ # ------------------------------------------------------------------
+ # Internals
+ # ------------------------------------------------------------------
+ def _git(self, cwd: Path, *args: str, capture: bool = False) -> str:
+ proc = subprocess.run(
+ ["git", *args],
+ cwd=str(cwd),
+ check=False,
+ capture_output=True,
+ text=True,
+ timeout=30,
+ )
+ if proc.returncode != 0:
+ raise RuntimeError(proc.stderr.strip() or f"git {args[0]} failed")
+ return proc.stdout if capture else ""
+
+
+# ----------------------------------------------------------------------
+# Helpers
+# ----------------------------------------------------------------------
+
+_DEFAULT_IGNORES = {".git", ".gitpilot", "__pycache__", "node_modules", ".venv", ".tox"}
+
+
+def _workspace_hash(workspace: Path) -> str:
+ return hashlib.sha1(str(workspace).encode("utf-8")).hexdigest()[:12]
+
+
+def _format_id(ts: float, descriptor: ToolCallDescriptor) -> str:
+ iso = time.strftime("%Y%m%dT%H%M%SZ", time.gmtime(ts))
+ tool = descriptor.name.replace("/", "_")
+ suffix = f"-{Path(descriptor.target_path).name}" if descriptor.target_path else ""
+ return f"{iso}-{tool}{suffix}"[:120]
+
+
+def _mirror_workspace(src: Path, dst: Path) -> int:
+ """Copy ``src`` into ``dst`` (overwriting), skipping ignored paths."""
+ count = 0
+ # Wipe existing snapshot content (but keep its .git/).
+ for entry in list(dst.iterdir()):
+ if entry.name == ".git":
+ continue
+ if entry.is_dir():
+ shutil.rmtree(entry, ignore_errors=True)
+ else:
+ try:
+ entry.unlink()
+ except OSError:
+ pass
+ for path in src.rglob("*"):
+ rel = path.relative_to(src)
+ if any(part in _DEFAULT_IGNORES for part in rel.parts):
+ continue
+ target = dst / rel
+ if path.is_dir():
+ target.mkdir(parents=True, exist_ok=True)
+ continue
+ try:
+ target.parent.mkdir(parents=True, exist_ok=True)
+ shutil.copy2(path, target)
+ count += 1
+ except OSError:
+ continue
+ return count
+
+
+def _restore_workspace(src: Path, dst: Path) -> None:
+ for path in src.rglob("*"):
+ rel = path.relative_to(src)
+ if rel.parts and rel.parts[0] == ".git":
+ continue
+ target = dst / rel
+ if path.is_dir():
+ target.mkdir(parents=True, exist_ok=True)
+ continue
+ try:
+ target.parent.mkdir(parents=True, exist_ok=True)
+ shutil.copy2(path, target)
+ except OSError:
+ continue
diff --git a/gitpilot/cli.py b/gitpilot/cli.py
new file mode 100644
index 0000000000000000000000000000000000000000..a0c860a996bf5c0bb79dca68b3d7eeab72a5994c
--- /dev/null
+++ b/gitpilot/cli.py
@@ -0,0 +1,847 @@
+from __future__ import annotations
+
+import os
+import sys
+import threading
+import time
+import webbrowser
+from pathlib import Path
+
+import typer
+import uvicorn
+from rich.console import Console
+from rich.panel import Panel
+from rich.table import Table
+
+from .version import __version__
+from .settings import get_settings, LLMProvider
+from .model_catalog import list_models_for_provider
+
+
+cli = typer.Typer(add_completion=False, help="GitPilot - Agentic AI assistant for GitHub")
+console = Console()
+
+
+def _check_configuration():
+ """Check and display configuration status."""
+ issues = []
+ warnings = []
+
+ # Check for .env file
+ env_file = Path.cwd() / ".env"
+ has_env = env_file.exists()
+
+ # Check GitHub token
+ github_token = os.getenv("GITPILOT_GITHUB_TOKEN") or os.getenv("GITHUB_TOKEN")
+ if not github_token:
+ issues.append("❌ GitHub token not found")
+ warnings.append(" Set GITPILOT_GITHUB_TOKEN or GITHUB_TOKEN in .env")
+ warnings.append(" Get token at: https://github.com/settings/tokens")
+
+ # Check LLM provider configuration
+ settings = get_settings()
+ provider = settings.provider
+
+ provider_configured = False
+ if provider == LLMProvider.openai:
+ api_key = settings.openai.api_key or os.getenv("OPENAI_API_KEY")
+ provider_configured = bool(api_key)
+ elif provider == LLMProvider.claude:
+ api_key = settings.claude.api_key or os.getenv("ANTHROPIC_API_KEY")
+ provider_configured = bool(api_key)
+ elif provider == LLMProvider.watsonx:
+ api_key = settings.watsonx.api_key or os.getenv("WATSONX_API_KEY")
+ provider_configured = bool(api_key)
+ elif provider == LLMProvider.ollama:
+ # Ollama doesn't require API key, just needs to be running
+ provider_configured = True
+
+ if not provider_configured:
+ issues.append(f"❌ {provider.value.upper()} API key not configured")
+ warnings.append(f" Configure in Admin UI or set environment variable")
+
+ return has_env, github_token is not None, provider_configured, issues, warnings
+
+
+def _display_startup_banner(host: str, port: int):
+ """Display a professional startup banner with configuration status."""
+ console.print()
+
+ # Header
+ console.print(Panel.fit(
+ f"[bold cyan]GitPilot[/bold cyan] [dim]v{__version__}[/dim]\n"
+ "[white]Agentic AI Assistant for GitHub Repositories[/white]",
+ border_style="cyan"
+ ))
+
+ # Check configuration
+ has_env, has_github, has_llm, issues, warnings = _check_configuration()
+ settings = get_settings()
+
+ # Configuration table
+ table = Table(show_header=False, box=None, padding=(0, 2))
+ table.add_column("Key", style="cyan")
+ table.add_column("Value", style="white")
+
+ # Environment file status
+ env_status = "✅ Found" if has_env else "⚠️ Not found (using defaults)"
+ table.add_row("Environment File", env_status)
+
+ # GitHub token status
+ github_status = "✅ Configured" if has_github else "❌ Not configured"
+ table.add_row("GitHub Token", github_status)
+
+ # LLM Provider status
+ provider_name = settings.provider.value.upper()
+ llm_status = f"✅ {provider_name}" if has_llm else f"⚠️ {provider_name} (not configured)"
+ table.add_row("LLM Provider", llm_status)
+
+ # Server info
+ table.add_row("Server", f"http://{host}:{port}")
+
+ console.print(table)
+ console.print()
+
+ # Display issues and warnings
+ if issues:
+ console.print("[bold yellow]⚠️ Configuration Issues:[/bold yellow]")
+ for issue in issues:
+ console.print(f" {issue}")
+ for warning in warnings:
+ console.print(f" [dim]{warning}[/dim]")
+ console.print()
+
+ # Setup instructions if needed
+ if not has_env and (not has_github or not has_llm):
+ console.print(Panel(
+ "[bold]Quick Setup:[/bold]\n\n"
+ "1. Copy .env.template to .env:\n"
+ " [cyan]cp .env.template .env[/cyan]\n\n"
+ "2. Edit .env and add your credentials\n\n"
+ "3. Or configure via Admin UI in your browser\n\n"
+ "[dim]See README.md for detailed setup instructions[/dim]",
+ title="[yellow]Setup Required[/yellow]",
+ border_style="yellow"
+ ))
+ else:
+ console.print("[bold green]✓[/bold green] GitPilot is ready!")
+ console.print()
+ console.print("[bold]Next Steps:[/bold]")
+ console.print(" • Open the Admin UI to configure LLM providers")
+ console.print(" • Select a repository in the Workspace tab")
+ console.print(" • Start chatting with your AI coding assistant")
+
+ console.print()
+ console.print("[dim]Press Ctrl+C to stop the server[/dim]")
+ console.print()
+
+
+def _run_server(host: str, port: int, reload: bool = False):
+ """Run the FastAPI server."""
+ uvicorn.run(
+ "gitpilot.api:app",
+ host=host,
+ port=port,
+ reload=reload,
+ log_level="info",
+ )
+
+
+def _maybe_bootstrap_workspace(workspace: Path) -> None:
+ """Silently run the first-run wizard when the workspace is fresh.
+
+ Triggers only when *all* of these are true:
+
+ - ``.env`` does not exist
+ - ``.gitpilot/`` does not exist
+ - ``AGENTS.md`` does not exist
+
+ Picks a sensible non-interactive default for the model provider:
+
+ - if ``OPENAI_API_KEY`` / ``ANTHROPIC_API_KEY`` / ``WATSONX_API_KEY``
+ is already set in the environment, use that provider;
+ - otherwise default to Ollama (which needs no key) so the user
+ can keep going without picking up extra credentials.
+
+ Errors are logged and swallowed — bootstrapping must never block
+ ``gitpilot serve``.
+ """
+ try:
+ env_file = workspace / ".env"
+ gitpilot_dir = workspace / ".gitpilot"
+ agents_md = workspace / "AGENTS.md"
+ if env_file.exists() or gitpilot_dir.exists() or agents_md.exists():
+ return # workspace already configured, leave it alone
+
+ # Pick a provider that won't fail on missing credentials.
+ provider = "ollama"
+ api_key = None
+ for env_var, name in (
+ ("ANTHROPIC_API_KEY", "anthropic"),
+ ("OPENAI_API_KEY", "openai"),
+ ("WATSONX_API_KEY", "watsonx"),
+ ):
+ value = os.environ.get(env_var)
+ if value:
+ provider = name
+ api_key = value
+ break
+
+ # Turn the flag on locally; the wizard rejects calls otherwise.
+ from . import flags as _flags
+ from .init_wizard import (
+ FLAG_INIT_WIZARD,
+ WizardAnswers,
+ run_wizard,
+ )
+
+ previous = _flags.is_on(FLAG_INIT_WIZARD)
+ _flags.set_override(FLAG_INIT_WIZARD, True)
+ try:
+ result = run_wizard(
+ workspace,
+ presets=WizardAnswers(
+ provider=provider,
+ api_key=api_key,
+ mode_slug="coder",
+ workspace_trust=True,
+ ),
+ )
+ finally:
+ _flags.set_override(FLAG_INIT_WIZARD, previous)
+
+ if result.aborted:
+ return
+ console.print(
+ f"[green]✓[/green] First-run bootstrap: wrote "
+ f"{len(result.files_written)} file(s), provider={provider} "
+ f"(re-run with --skip-init to disable)."
+ )
+ except Exception:
+ # Never block serve startup on a bootstrap hiccup.
+ import logging
+ logging.getLogger(__name__).debug("workspace bootstrap failed", exc_info=True)
+
+
+@cli.command()
+def serve(
+ host: str = typer.Option("127.0.0.1", "--host", "-h", help="Host to bind"),
+ port: int = typer.Option(8000, "--port", "-p", help="Port to bind"),
+ reload: bool = typer.Option(False, "--reload", help="Enable auto-reload"),
+ open_browser: bool = typer.Option(True, "--open/--no-open", help="Open browser"),
+ skip_init: bool = typer.Option(
+ False, "--skip-init",
+ help="Do not auto-run the first-run wizard when the workspace is fresh.",
+ ),
+):
+ """Start the GitPilot server with web UI.
+
+ First-run convenience: when the current workspace has no ``.env``,
+ no ``.gitpilot/`` directory, and no ``AGENTS.md``, we silently
+ bootstrap a minimal config with sensible defaults (Ollama if no
+ provider env var is set; otherwise the matching provider). The
+ user gets a two-command onboarding — ``pip install`` then
+ ``gitpilot serve`` — without giving up the explicit-flag flow.
+ Pass ``--skip-init`` to opt out.
+ """
+ if not skip_init:
+ _maybe_bootstrap_workspace(Path.cwd())
+
+ # Check if port is already in use (prevent double-start)
+ import socket
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
+ if s.connect_ex((host, port)) == 0:
+ console.print(
+ f"[yellow]⚠[/yellow] Port {port} is already in use. "
+ f"GitPilot may already be running."
+ )
+ console.print(
+ f"[dim]Run 'make stop' or kill the process on port {port} first.[/dim]"
+ )
+ sys.exit(1)
+
+ # Display startup banner
+ _display_startup_banner(host, port)
+
+ # Start server in background thread
+ thread = threading.Thread(
+ target=_run_server,
+ kwargs={"host": host, "port": port, "reload": reload},
+ daemon=False,
+ )
+ thread.start()
+
+ # Open browser after a short delay
+ if open_browser:
+ time.sleep(1.5)
+ try:
+ webbrowser.open(f"http://{host}:{port}")
+ console.print(f"[green]✓[/green] Browser opened at http://{host}:{port}")
+ except Exception:
+ console.print(f"[yellow]![/yellow] Please open http://{host}:{port} in your browser")
+
+ # Wait for server thread
+ try:
+ thread.join()
+ except KeyboardInterrupt:
+ console.print("\n[yellow]Shutting down GitPilot...[/yellow]")
+ sys.exit(0)
+
+
+@cli.command()
+def config():
+ """Show current configuration."""
+ console.print()
+ console.print(Panel.fit(
+ "[bold cyan]GitPilot Configuration[/bold cyan]",
+ border_style="cyan"
+ ))
+
+ settings = get_settings()
+
+ # Configuration details
+ table = Table(title="Settings", show_header=True, header_style="bold cyan")
+ table.add_column("Setting", style="cyan")
+ table.add_column("Value", style="white")
+ table.add_column("Source", style="dim")
+
+ # Provider
+ env_provider = os.getenv("GITPILOT_PROVIDER")
+ provider_source = "Environment" if env_provider else "Settings file"
+ table.add_row("Active Provider", settings.provider.value, provider_source)
+
+ # GitHub token
+ github_token = os.getenv("GITPILOT_GITHUB_TOKEN") or os.getenv("GITHUB_TOKEN")
+ github_status = "Configured" if github_token else "Not set"
+ github_source = "Environment" if github_token else "N/A"
+ table.add_row("GitHub Token", github_status, github_source)
+
+ # Provider-specific config
+ if settings.provider == LLMProvider.openai:
+ api_key = settings.openai.api_key or os.getenv("OPENAI_API_KEY")
+ key_status = "Configured" if api_key else "Not set"
+ key_source = "Environment" if os.getenv("OPENAI_API_KEY") else ("Settings" if settings.openai.api_key else "N/A")
+ table.add_row("OpenAI API Key", key_status, key_source)
+ table.add_row("OpenAI Model", settings.openai.model or "gpt-4o-mini", "Settings")
+
+ elif settings.provider == LLMProvider.claude:
+ api_key = settings.claude.api_key or os.getenv("ANTHROPIC_API_KEY")
+ key_status = "Configured" if api_key else "Not set"
+ key_source = "Environment" if os.getenv("ANTHROPIC_API_KEY") else ("Settings" if settings.claude.api_key else "N/A")
+ table.add_row("Claude API Key", key_status, key_source)
+ table.add_row("Claude Model", settings.claude.model, "Settings")
+
+ elif settings.provider == LLMProvider.watsonx:
+ api_key = settings.watsonx.api_key or os.getenv("WATSONX_API_KEY")
+ key_status = "Configured" if api_key else "Not set"
+ key_source = "Environment" if os.getenv("WATSONX_API_KEY") else ("Settings" if settings.watsonx.api_key else "N/A")
+ table.add_row("Watsonx API Key", key_status, key_source)
+ table.add_row("Watsonx Model", settings.watsonx.model_id, "Settings")
+
+ elif settings.provider == LLMProvider.ollama:
+ table.add_row("Ollama URL", settings.ollama.base_url, "Settings")
+ table.add_row("Ollama Model", settings.ollama.model, "Settings")
+
+ console.print(table)
+ console.print()
+ console.print(f"[dim]Settings file: ~/.gitpilot/settings.json[/dim]")
+ console.print()
+
+
+@cli.command()
+def version():
+ """Show GitPilot version."""
+ console.print(f"GitPilot [cyan]v{__version__}[/cyan]")
+
+
+# ---------------------------------------------------------------------------
+# Batch P1-E — `gitpilot doctor` health-check sub-command. Additive.
+# Removal is a one-line revert.
+# ---------------------------------------------------------------------------
+@cli.command("doctor", help="Run install / environment health checks.")
+def doctor_command(
+ workspace: Path = typer.Option(Path.cwd(), "--workspace", "-w", help="Workspace directory"),
+ offline: bool = typer.Option(False, "--offline", help="Skip every network probe"),
+ json_out: bool = typer.Option(False, "--json", help="Emit machine-readable JSON"),
+) -> None:
+ from .doctor import render_json, render_text, run_checks
+
+ report = run_checks(workspace, offline=offline)
+ console.print(render_json(report) if json_out else render_text(report))
+ raise typer.Exit(code=report.exit_code)
+
+
+def main():
+ """Main entry point - run server by default."""
+ if len(sys.argv) == 1:
+ # No arguments, run server with defaults
+ import socket
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
+ if s.connect_ex(("127.0.0.1", 8000)) == 0:
+ console.print(
+ "[yellow]⚠[/yellow] Port 8000 is already in use. "
+ "GitPilot may already be running."
+ )
+ sys.exit(1)
+ _display_startup_banner("127.0.0.1", 8000)
+ try:
+ _run_server("127.0.0.1", 8000, reload=False)
+ except KeyboardInterrupt:
+ console.print("\n[yellow]Shutting down GitPilot...[/yellow]")
+ sys.exit(0)
+ else:
+ # Run CLI commands
+ cli()
+
+
+def serve_only():
+ """Entry point for gitpilot-api command."""
+ console.print("[cyan]GitPilot API Server[/cyan]")
+ console.print("[dim]Starting on http://127.0.0.1:8000[/dim]\n")
+ try:
+ _run_server("127.0.0.1", 8000, reload=False)
+ except KeyboardInterrupt:
+ console.print("\n[yellow]Shutting down...[/yellow]")
+ sys.exit(0)
+@cli.command()
+def run(
+ repo: str = typer.Option(..., "--repo", "-r", help="Repository as owner/repo"),
+ message: str = typer.Option("", "--message", "-m", help="User request message"),
+ branch: str = typer.Option(None, "--branch", "-b", help="Target branch"),
+ auto_pr: bool = typer.Option(False, "--auto-pr", help="Create PR after execution"),
+ from_pr: int = typer.Option(None, "--from-pr", help="Fetch context from PR number"),
+ headless: bool = typer.Option(False, "--headless", help="Non-interactive JSON output"),
+):
+ """Run GitPilot non-interactively (headless mode for CI/CD)."""
+ import asyncio
+ import sys
+
+ if not message and not sys.stdin.isatty():
+ message = sys.stdin.read().strip()
+
+ if not message:
+ console.print("[red]Error:[/red] --message is required (or pipe via stdin)")
+ raise typer.Exit(code=1)
+
+ token = os.getenv("GITPILOT_GITHUB_TOKEN") or os.getenv("GITHUB_TOKEN")
+ if not token:
+ console.print("[red]Error:[/red] GITPILOT_GITHUB_TOKEN or GITHUB_TOKEN must be set")
+ raise typer.Exit(code=1)
+
+ from .headless import run_headless
+
+ result = asyncio.run(run_headless(
+ repo_full_name=repo,
+ message=message,
+ token=token,
+ branch=branch,
+ auto_pr=auto_pr,
+ from_pr=from_pr,
+ ))
+
+ if headless:
+ # Pure JSON for CI/CD consumption
+ console.print(result.to_json())
+ else:
+ if result.success:
+ console.print(f"[green]Success:[/green] {result.output[:500]}")
+ else:
+ console.print(f"[red]Failed:[/red] {result.error}")
+ if result.pr_url:
+ console.print(f"[cyan]PR:[/cyan] {result.pr_url}")
+
+ raise typer.Exit(code=0 if result.success else 1)
+
+
+@cli.command("init")
+def init_project(
+ path: str = typer.Argument(".", help="Project directory to initialise"),
+ wizard: bool = typer.Option(
+ False, "--wizard",
+ help="Run the interactive first-run wizard (provider, key, mode, trust).",
+ ),
+ provider: str = typer.Option(
+ None, "--provider",
+ help="Wizard preset: anthropic | openai | watsonx | ollama (non-interactive).",
+ ),
+ mode_slug: str = typer.Option(
+ None, "--mode",
+ help="Wizard preset: coder | planner | reviewer (non-interactive).",
+ ),
+ api_key: str = typer.Option(
+ None, "--api-key",
+ help="Wizard preset: API key for the chosen provider (non-interactive).",
+ ),
+ no_trust: bool = typer.Option(
+ False, "--no-trust",
+ help="Wizard preset: skip recording workspace trust.",
+ ),
+ overwrite: bool = typer.Option(
+ False, "--overwrite",
+ help="Wizard: overwrite existing .env / .gitpilot/modes.yaml / AGENTS.md.",
+ ),
+):
+ """Initialize .gitpilot/ directory with template GITPILOT.md.
+
+ Default behaviour is unchanged. Pass ``--wizard`` for the
+ Batch P3-G first-run flow that also writes a provider-aware ``.env``,
+ a starter ``.gitpilot/modes.yaml``, and a trust entry. Provider /
+ mode / key can be pre-supplied for CI use; missing prompts are
+ asked interactively.
+ """
+ from pathlib import Path as StdPath
+ from .memory import MemoryManager
+
+ workspace = StdPath(path).resolve()
+
+ if wizard:
+ from . import flags as _flags
+ from .init_wizard import (
+ FLAG_INIT_WIZARD,
+ WizardAnswers,
+ WizardError,
+ run_wizard,
+ )
+ if not _flags.is_on(FLAG_INIT_WIZARD):
+ console.print(
+ "[yellow]The init_wizard flag is off.[/yellow] "
+ "Enable it with [bold]GITPILOT_FLAGS=\"init_wizard=1\"[/bold] "
+ "and re-run, or omit --wizard for the legacy init."
+ )
+ raise typer.Exit(code=2)
+ presets = WizardAnswers(
+ provider=provider or "anthropic",
+ api_key=api_key,
+ mode_slug=mode_slug or "coder",
+ workspace_trust=not no_trust,
+ overwrite_env=overwrite,
+ overwrite_modes=overwrite,
+ overwrite_agents_md=overwrite,
+ )
+ # Force non-interactive mode only when all required answers are present.
+ try:
+ result = run_wizard(workspace, presets=presets)
+ except WizardError as err:
+ console.print(f"[red]Wizard error:[/red] {err}")
+ raise typer.Exit(code=1) from err
+
+ # Render the outcome. Secrets are never printed.
+ for written in result.files_written:
+ console.print(f"[green]wrote[/green] {written}")
+ for skipped, why in result.files_skipped:
+ console.print(f"[yellow]skipped[/yellow] {skipped} ({why})")
+ if result.trust_recorded:
+ console.print("[green]trusted[/green] workspace recorded in ~/.gitpilot/trusted.json")
+ if result.aborted:
+ console.print(f"[red]aborted[/red] {result.reason}")
+ raise typer.Exit(code=1)
+ console.print(f"[dim]done in {result.duration_ms} ms[/dim]")
+ return
+
+ mgr = MemoryManager(workspace)
+ md_path = mgr.init_project()
+ console.print(f"[green]Initialized:[/green] {md_path}")
+ console.print("[dim]Edit .gitpilot/GITPILOT.md to add your project conventions.[/dim]")
+
+
+@cli.command("plugin")
+def plugin_cmd(
+ action: str = typer.Argument(..., help="install | uninstall | list"),
+ source: str = typer.Argument(None, help="Git URL, local path, or plugin name"),
+):
+ """Manage GitPilot plugins."""
+ from .plugins import PluginManager
+
+ mgr = PluginManager()
+
+ if action == "list":
+ plugins = mgr.list_installed()
+ if not plugins:
+ console.print("[dim]No plugins installed.[/dim]")
+ return
+ table = Table(show_header=True, header_style="bold cyan")
+ table.add_column("Name")
+ table.add_column("Version")
+ table.add_column("Description")
+ for p in plugins:
+ table.add_row(p.name, p.version, p.description)
+ console.print(table)
+
+ elif action == "install":
+ if not source:
+ console.print("[red]Error:[/red] source is required for install")
+ raise typer.Exit(code=1)
+ try:
+ info = mgr.install(source)
+ console.print(f"[green]Installed:[/green] {info.name} v{info.version}")
+ except Exception as e:
+ console.print(f"[red]Error:[/red] {e}")
+ raise typer.Exit(code=1)
+
+ elif action == "uninstall":
+ if not source:
+ console.print("[red]Error:[/red] plugin name is required")
+ raise typer.Exit(code=1)
+ if mgr.uninstall(source):
+ console.print(f"[green]Uninstalled:[/green] {source}")
+ else:
+ console.print(f"[yellow]Not found:[/yellow] {source}")
+
+ else:
+ console.print(f"[red]Unknown action:[/red] {action}. Use: install, uninstall, list")
+ raise typer.Exit(code=1)
+
+
+@cli.command("skill")
+def skill_cmd(
+ name: str = typer.Argument(None, help="Skill name to invoke (or 'list')"),
+):
+ """List or invoke skills."""
+ from .skills import SkillManager
+
+ mgr = SkillManager(workspace_path=Path.cwd())
+ mgr.load_all()
+
+ if not name or name == "list":
+ skills = mgr.list_skills()
+ if not skills:
+ console.print("[dim]No skills found.[/dim]")
+ console.print("[dim]Create .gitpilot/skills/*.md to add skills.[/dim]")
+ return
+ table = Table(show_header=True, header_style="bold cyan")
+ table.add_column("Name")
+ table.add_column("Description")
+ table.add_column("Auto")
+ for s in skills:
+ table.add_row(s["name"], s["description"], str(s.get("auto_trigger", False)))
+ console.print(table)
+ else:
+ prompt = mgr.invoke(name)
+ if prompt is None:
+ console.print(f"[red]Skill not found:[/red] {name}")
+ raise typer.Exit(code=1)
+ console.print(f"[cyan]/{name}[/cyan]")
+ console.print(prompt)
+
+
+@cli.command("scan")
+def scan_cmd(
+ path: str = typer.Argument(".", help="Directory or file to scan"),
+ min_confidence: float = typer.Option(0.5, "--min-confidence", help="Minimum confidence threshold"),
+):
+ """Run AI-powered security scan on a directory or file."""
+ from .security import SecurityScanner
+
+ scanner = SecurityScanner(min_confidence=min_confidence)
+ target = Path(path).resolve()
+
+ if target.is_file():
+ findings = scanner.scan_file(str(target))
+ if not findings:
+ console.print("[green]No security issues found.[/green]")
+ return
+ table = Table(show_header=True, header_style="bold red")
+ table.add_column("Severity")
+ table.add_column("Rule")
+ table.add_column("Title")
+ table.add_column("Line")
+ table.add_column("File")
+ for f in findings:
+ table.add_row(f.severity.value, f.rule_id, f.title, str(f.line_number), f.file_path)
+ console.print(table)
+ else:
+ result = scanner.scan_directory(str(target))
+ console.print(f"[cyan]Scanned:[/cyan] {result.files_scanned} files in {result.scan_duration_ms:.0f}ms")
+ if not result.findings:
+ console.print("[green]No security issues found.[/green]")
+ return
+ console.print(f"[yellow]Found {len(result.findings)} issues:[/yellow]")
+ for sev, count in sorted(result.summary.items()):
+ color = "red" if sev in ("critical", "high") else "yellow" if sev == "medium" else "dim"
+ console.print(f" [{color}]{sev}: {count}[/{color}]")
+ console.print()
+ table = Table(show_header=True, header_style="bold red")
+ table.add_column("Severity")
+ table.add_column("Rule")
+ table.add_column("Title")
+ table.add_column("Line")
+ table.add_column("File")
+ for f in result.findings[:50]:
+ table.add_row(f.severity.value, f.rule_id, f.title, str(f.line_number), f.file_path)
+ console.print(table)
+ if len(result.findings) > 50:
+ console.print(f"[dim]... and {len(result.findings) - 50} more[/dim]")
+
+
+@cli.command("predict")
+def predict_cmd(
+ context: str = typer.Argument(..., help="Context string to get predictions for"),
+):
+ """Get proactive suggestions based on context."""
+ from .predictions import PredictiveEngine
+
+ engine = PredictiveEngine()
+ suggestions = engine.predict(context)
+
+ if not suggestions:
+ console.print("[dim]No suggestions for this context.[/dim]")
+ return
+
+ for s in suggestions:
+ score_color = "green" if s.relevance_score >= 0.8 else "yellow" if s.relevance_score >= 0.6 else "dim"
+ console.print(f" [{score_color}][{s.relevance_score:.0%}][/{score_color}] [bold]{s.title}[/bold]")
+ console.print(f" {s.description}")
+ console.print(f" [cyan]Prompt:[/cyan] {s.prompt}")
+ console.print()
+
+
+@cli.command("list-models")
+def list_models_cmd(
+ provider: str = typer.Option(
+ None,
+ "--provider",
+ "-p",
+ help="LLM provider (openai, claude, watsonx, ollama). Defaults to active provider.",
+ )
+):
+ """List LLM models available for the configured provider."""
+ settings = get_settings()
+
+ if provider is None:
+ target = settings.provider
+ else:
+ # Normalize to enum
+ try:
+ target = LLMProvider(provider)
+ except ValueError:
+ console.print(f"[red]Unknown provider:[/red] {provider}")
+ raise typer.Exit(code=1)
+
+ models, error = list_models_for_provider(target, settings)
+
+ console.print()
+ console.print(
+ Panel.fit(
+ f"[bold cyan]Models for provider[/bold cyan] [white]{target.value}[/white]",
+ border_style="cyan",
+ )
+ )
+
+ if error:
+ console.print(f"[yellow]Warning:[/yellow] {error}")
+
+ if not models:
+ console.print("No models found.")
+ return
+
+ table = Table(show_header=True, header_style="bold cyan")
+ table.add_column("#", style="dim", justify="right")
+ table.add_column("Model ID", style="white")
+
+ for i, m in enumerate(models, start=1):
+ table.add_row(str(i), m)
+
+ console.print(table)
+ console.print()
+
+
+@cli.command("generate")
+def generate_cmd(
+ message: str = typer.Option(..., "--message", "-m", help="What to generate (e.g. 'Flask hello world app')"),
+ output_dir: str = typer.Option(".", "--output", "-o", help="Directory to write generated files into"),
+ dry_run: bool = typer.Option(False, "--dry-run", help="Show what would be generated without writing files"),
+):
+ """Generate code locally using the configured LLM.
+
+ Creates files on disk from a natural-language prompt. No GitHub
+ required — works with any LLM provider (Ollama, OpenAI, Claude).
+
+ Examples::
+
+ gitpilot generate -m "Create a Flask hello world app"
+ gitpilot generate -m "Python CLI with click" -o my-project
+ gitpilot generate -m "React component for a todo list" --dry-run
+ """
+ import re
+
+ settings = get_settings()
+ provider_name = settings.provider.value
+ model_name = "default"
+ provider_settings = getattr(settings, provider_name, None)
+ if provider_settings:
+ model_name = getattr(provider_settings, "model", None) or "default"
+
+ console.print(f"[dim]Provider:[/dim] {provider_name} · [dim]Model:[/dim] {model_name}")
+ console.print(f"[dim]Output:[/dim] {os.path.abspath(output_dir)}")
+ console.print()
+
+ prompt = (
+ "You are GitPilot, a code generation assistant.\n"
+ "Generate the requested project files. For EACH file, output it in this EXACT format:\n"
+ "\n"
+ "```language filepath\n"
+ "...complete file content...\n"
+ "```\n"
+ "\n"
+ "Example:\n"
+ "```python app.py\n"
+ "from flask import Flask\n"
+ "app = Flask(__name__)\n"
+ "```\n"
+ "\n"
+ "Rules:\n"
+ "- Opening fence: triple backticks + language + space + relative filepath\n"
+ "- Output COMPLETE file content, not snippets\n"
+ "- Generate ALL files needed for a working project\n"
+ "- Include a README.md if appropriate\n"
+ "\n"
+ f"User request: {message}\n"
+ )
+
+ console.print("[bold]Generating...[/bold]", end="")
+
+ try:
+ from .llm_provider import build_llm
+ llm = build_llm()
+ answer = llm.call([{"role": "user", "content": prompt}])
+ except Exception as e:
+ console.print(f"\n[red]LLM error:[/red] {e}")
+ raise typer.Exit(code=1)
+
+ console.print(" [green]done[/green]\n")
+
+ # Extract structured edits using the same extractor as the API
+ from .api import _extract_edits_from_answer
+
+ edits = _extract_edits_from_answer(answer)
+
+ if not edits:
+ console.print("[yellow]No files extracted from LLM response.[/yellow]")
+ console.print("[dim]Raw response (first 2000 chars):[/dim]")
+ console.print(answer[:2000])
+ raise typer.Exit(code=1)
+
+ files_written = []
+ for edit in edits:
+ safe_path = edit["file"]
+ if not safe_path:
+ continue
+
+ content = edit.get("content", "").rstrip()
+
+ if dry_run:
+ console.print(f" [cyan]Would create:[/cyan] {safe_path} ({len(content)} bytes)")
+ else:
+ full_path = os.path.join(output_dir, safe_path)
+ os.makedirs(os.path.dirname(full_path) or ".", exist_ok=True)
+ with open(full_path, "w", encoding="utf-8") as f:
+ f.write(content + "\n")
+ files_written.append(safe_path)
+ console.print(f" [green]Created:[/green] {safe_path} ({len(content)} bytes)")
+
+ console.print()
+ if dry_run:
+ console.print(f"[dim]Dry run: {len(edits)} file(s) would be created.[/dim]")
+ else:
+ console.print(f"[green]Generated {len(files_written)} file(s) in {os.path.abspath(output_dir)}[/green]")
+
diff --git a/gitpilot/context_budget.py b/gitpilot/context_budget.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ce7417b1d67ac5a5c06620f7890efbdff28f17a
--- /dev/null
+++ b/gitpilot/context_budget.py
@@ -0,0 +1,246 @@
+# gitpilot/context_budget.py
+"""Conversation context budgeting and auto-condensation.
+
+Strategy (additive — opt-in via :class:`BudgetPolicy` or the global default):
+
+* Maintain a running token total per session.
+* When the total crosses ``condense_at`` (default 70 % of ``max_tokens``)
+ fold the oldest non-essential messages into a single summary block,
+ preserving:
+ - system instructions
+ - tool definitions
+ - the AGENTS.md block
+ - the last N turns
+* Drop oversize tool outputs first — they're the cheapest to lose and the
+ costliest to keep.
+* Provide a stable :class:`ContextStats` snapshot that the API surfaces as
+ ``{prompt_tokens, max_tokens, ratio}`` so the web UI can render a live
+ token counter.
+
+The token estimator is best-effort: it uses ``tiktoken`` when available
+and falls back to a ``len(text) / 4`` heuristic. Counts do not need to
+be exact — they only steer condensation timing.
+"""
+from __future__ import annotations
+
+import logging
+from dataclasses import dataclass, field
+from typing import Any, Callable, Dict, List, Literal, Optional
+
+logger = logging.getLogger(__name__)
+
+Role = Literal["system", "user", "assistant", "tool"]
+Importance = Literal["pinned", "normal", "drop-first"]
+
+
+# ----------------------------------------------------------------------
+# Token estimation
+# ----------------------------------------------------------------------
+
+_TIKTOKEN: Any = None
+try: # pragma: no cover - depends on environment
+ import tiktoken
+
+ _TIKTOKEN = tiktoken.get_encoding("cl100k_base")
+except Exception: # pragma: no cover - tiktoken optional
+ _TIKTOKEN = None
+
+
+def estimate_tokens(text: str) -> int:
+ """Return an estimated token count for ``text``."""
+ if not text:
+ return 0
+ if _TIKTOKEN is not None:
+ try:
+ return len(_TIKTOKEN.encode(text))
+ except Exception:
+ pass
+ # Heuristic fallback — close enough to steer condensation thresholds.
+ return max(1, len(text) // 4)
+
+
+# ----------------------------------------------------------------------
+# Data model
+# ----------------------------------------------------------------------
+
+@dataclass
+class Message:
+ """One conversation turn or fragment."""
+
+ role: Role
+ content: str
+ importance: Importance = "normal"
+ tokens: int = 0
+ meta: Dict[str, str] = field(default_factory=dict)
+
+ def __post_init__(self) -> None:
+ if not self.tokens:
+ self.tokens = estimate_tokens(self.content)
+
+
+@dataclass
+class BudgetPolicy:
+ """Knobs for context budgeting."""
+
+ max_tokens: int = 200_000
+ condense_at_ratio: float = 0.70
+ keep_recent_turns: int = 6
+ large_tool_output_tokens: int = 4_000
+ summary_label: str = "Conversation summary (older turns condensed)"
+
+ @property
+ def condense_at(self) -> int:
+ return int(self.max_tokens * self.condense_at_ratio)
+
+
+@dataclass
+class ContextStats:
+ """Snapshot suitable for surfacing in the chat UI."""
+
+ prompt_tokens: int
+ max_tokens: int
+ ratio: float
+ condensations: int
+
+ def to_dict(self) -> Dict[str, object]:
+ return {
+ "prompt_tokens": self.prompt_tokens,
+ "max_tokens": self.max_tokens,
+ "ratio": round(self.ratio, 4),
+ "condensations": self.condensations,
+ }
+
+
+# ----------------------------------------------------------------------
+# Budget manager
+# ----------------------------------------------------------------------
+
+SummariseFn = Callable[[List[Message]], str]
+
+
+def _default_summariser(messages: List[Message]) -> str:
+ """Deterministic, dependency-free fallback summariser.
+
+ Produces a compact bulleted recap. Production deployments can pass a
+ smarter summariser that delegates to an LLM.
+ """
+ bullets: List[str] = []
+ for m in messages:
+ first_line = m.content.strip().splitlines()[0] if m.content.strip() else ""
+ if not first_line:
+ continue
+ truncated = first_line[:140] + ("…" if len(first_line) > 140 else "")
+ bullets.append(f"- ({m.role}) {truncated}")
+ if len(bullets) >= 40:
+ break
+ return "\n".join(bullets) or "_no older content to summarise_"
+
+
+class ContextBudgetManager:
+ """Track token usage and condense history when the budget is tight."""
+
+ def __init__(
+ self,
+ policy: Optional[BudgetPolicy] = None,
+ summariser: Optional[SummariseFn] = None,
+ ) -> None:
+ self.policy = policy or BudgetPolicy()
+ self._summariser = summariser or _default_summariser
+ self._messages: List[Message] = []
+ self._condensations = 0
+
+ # ------------------------------------------------------------------
+ # Mutation API
+ # ------------------------------------------------------------------
+ def add(self, message: Message) -> None:
+ self._messages.append(message)
+
+ def add_text(self, role: Role, content: str, **kwargs: Any) -> None:
+ self.add(Message(role=role, content=content, **kwargs))
+
+ def extend(self, messages: List[Message]) -> None:
+ self._messages.extend(messages)
+
+ def clear(self) -> None:
+ self._messages.clear()
+ self._condensations = 0
+
+ # ------------------------------------------------------------------
+ # Inspection
+ # ------------------------------------------------------------------
+ def total_tokens(self) -> int:
+ return sum(m.tokens for m in self._messages)
+
+ def stats(self) -> ContextStats:
+ total = self.total_tokens()
+ return ContextStats(
+ prompt_tokens=total,
+ max_tokens=self.policy.max_tokens,
+ ratio=total / self.policy.max_tokens if self.policy.max_tokens else 0.0,
+ condensations=self._condensations,
+ )
+
+ def messages(self) -> List[Message]:
+ return list(self._messages)
+
+ # ------------------------------------------------------------------
+ # Condensation
+ # ------------------------------------------------------------------
+ def needs_condense(self) -> bool:
+ return self.total_tokens() >= self.policy.condense_at
+
+ def condense(self) -> int:
+ """Fold older non-essential messages into a single summary entry.
+
+ Returns the number of tokens removed. A no-op when nothing
+ eligible is found, which leaves the running total unchanged.
+ """
+ if not self._messages:
+ return 0
+
+ before = self.total_tokens()
+
+ # 1. Drop oversize tool outputs first.
+ for m in self._messages:
+ if (
+ m.role == "tool"
+ and m.importance != "pinned"
+ and m.tokens >= self.policy.large_tool_output_tokens
+ ):
+ replacement = "_tool output dropped to free context budget_"
+ m.content = replacement
+ m.tokens = estimate_tokens(replacement)
+ m.meta = {**m.meta, "condensed": "1"}
+
+ if self.total_tokens() < self.policy.condense_at:
+ self._condensations += 1
+ return before - self.total_tokens()
+
+ # 2. Split keep-recent vs. condensable.
+ pinned: List[Message] = [m for m in self._messages if m.importance == "pinned"]
+ rest: List[Message] = [m for m in self._messages if m.importance != "pinned"]
+ keep_n = max(0, self.policy.keep_recent_turns)
+ condensable = rest[:-keep_n] if keep_n else rest
+ kept_recent = rest[-keep_n:] if keep_n else []
+
+ if not condensable:
+ self._condensations += 1
+ return before - self.total_tokens()
+
+ summary_text = self._summariser(condensable)
+ summary_msg = Message(
+ role="system",
+ content=f"## {self.policy.summary_label}\n\n{summary_text}",
+ importance="pinned",
+ meta={"summary": "1"},
+ )
+
+ self._messages = pinned + [summary_msg] + kept_recent
+ self._condensations += 1
+ return before - self.total_tokens()
+
+ def maybe_condense(self) -> int:
+ """Condense iff the budget is over the threshold."""
+ if self.needs_condense():
+ return self.condense()
+ return 0
diff --git a/gitpilot/context_cache.py b/gitpilot/context_cache.py
new file mode 100644
index 0000000000000000000000000000000000000000..4993a47a300513a186011b8e5919407a8dec1fd3
--- /dev/null
+++ b/gitpilot/context_cache.py
@@ -0,0 +1,257 @@
+# gitpilot/context_cache.py
+"""In-process LRU memoisation for the workspace context pack.
+
+Batch P2-C — additive. :func:`gitpilot.context_pack.build_context_pack`
+re-scans the workspace on every turn, which is the right behaviour for
+correctness but wasteful when nothing has changed: most turns reuse
+the same conventions, the same active use case, and the same vault
+chunks.
+
+``build_cached`` wraps the original builder with an LRU keyed on the
+workspace path, the active mode slug, the query string, and a digest
+of the *mtimes* of the files that contribute to the pack. Because the
+key incorporates mtimes, edits to the relevant files invalidate the
+cache automatically. Callers must not edit files via the cache layer
+itself — touching ``AGENTS.md`` or ``.gitpilot/*`` is enough.
+
+Behaviour matrix
+----------------
+* ``context_cache`` flag off (default) → straight passthrough to
+ :func:`gitpilot.context_pack.build_context_pack`. Zero new state.
+* Flag on → memoised; cache size capped to keep memory bounded.
+
+The cache is *strict per workspace*: cross-workspace contamination is
+impossible because the workspace path is part of the key.
+"""
+from __future__ import annotations
+
+import hashlib
+import logging
+import threading
+import time
+from collections import OrderedDict
+from dataclasses import dataclass
+from pathlib import Path
+from typing import Any, Dict, Iterable, Optional, Tuple
+
+from . import flags
+
+logger = logging.getLogger(__name__)
+
+FLAG_CONTEXT_CACHE = "context_cache"
+DEFAULT_CACHE_SIZE = 32
+
+# Files that contribute to the cache key — touching any of these
+# invalidates the entry on the next call.
+_FINGERPRINT_FILES: Tuple[str, ...] = (
+ "AGENTS.md",
+ ".gitpilot/AGENTS.md",
+ ".gitpilot/GITPILOT.md",
+ ".gitpilot/modes.yaml",
+ ".gitpilotrules",
+)
+_FINGERPRINT_DIRS: Tuple[str, ...] = (
+ ".gitpilot/rules",
+ ".gitpilot/skills",
+ ".gitpilot/uploads",
+)
+
+
+# ----------------------------------------------------------------------
+# Stats
+# ----------------------------------------------------------------------
+
+@dataclass
+class CacheStats:
+ """Snapshot of the in-process cache state."""
+
+ size: int
+ capacity: int
+ hits: int
+ misses: int
+
+ @property
+ def hit_ratio(self) -> float:
+ total = self.hits + self.misses
+ return (self.hits / total) if total else 0.0
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "size": self.size,
+ "capacity": self.capacity,
+ "hits": self.hits,
+ "misses": self.misses,
+ "hit_ratio": round(self.hit_ratio, 4),
+ }
+
+
+# ----------------------------------------------------------------------
+# Cache
+# ----------------------------------------------------------------------
+
+class _LRUCache:
+ """Tiny LRU keyed on ``(workspace, mode, query, mtime_digest)``."""
+
+ def __init__(self, capacity: int = DEFAULT_CACHE_SIZE) -> None:
+ self._capacity = max(1, int(capacity))
+ self._store: "OrderedDict[Tuple[str, Optional[str], str, str], str]" = OrderedDict()
+ self._lock = threading.RLock()
+ self._hits = 0
+ self._misses = 0
+
+ def get(self, key: Tuple[str, Optional[str], str, str]) -> Optional[str]:
+ with self._lock:
+ value = self._store.get(key)
+ if value is None:
+ self._misses += 1
+ return None
+ self._store.move_to_end(key)
+ self._hits += 1
+ return value
+
+ def put(self, key: Tuple[str, Optional[str], str, str], value: str) -> None:
+ with self._lock:
+ self._store[key] = value
+ self._store.move_to_end(key)
+ while len(self._store) > self._capacity:
+ self._store.popitem(last=False)
+
+ def clear(self) -> None:
+ with self._lock:
+ self._store.clear()
+ self._hits = 0
+ self._misses = 0
+
+ def stats(self) -> CacheStats:
+ with self._lock:
+ return CacheStats(
+ size=len(self._store),
+ capacity=self._capacity,
+ hits=self._hits,
+ misses=self._misses,
+ )
+
+
+_CACHE = _LRUCache()
+
+
+def get_cache_stats() -> CacheStats:
+ """Return a snapshot of the global cache state."""
+ return _CACHE.stats()
+
+
+def clear_cache() -> None:
+ """Drop every cached entry. Useful for tests and ``/admin`` hooks."""
+ _CACHE.clear()
+
+
+def set_capacity(capacity: int) -> None:
+ """Resize the cache. Effective on the next ``put``."""
+ global _CACHE
+ new_cache = _LRUCache(capacity=capacity)
+ # Preserve recent entries up to the new capacity.
+ with _CACHE._lock:
+ for key, value in list(_CACHE._store.items())[-capacity:]:
+ new_cache.put(key, value)
+ new_cache._hits = _CACHE._hits
+ new_cache._misses = _CACHE._misses
+ _CACHE = new_cache
+
+
+# ----------------------------------------------------------------------
+# Public builder
+# ----------------------------------------------------------------------
+
+def build_cached(
+ workspace_path: Path,
+ query: str = "",
+ *,
+ mode_slug: Optional[str] = None,
+ enabled: Optional[bool] = None,
+ **builder_kwargs: object,
+) -> str:
+ """Memoised wrapper around :func:`context_pack.build_context_pack`.
+
+ When the ``context_cache`` flag is off (or ``enabled=False``)
+ this calls the underlying builder directly and returns its output —
+ nothing is cached. Otherwise the result is keyed on
+ ``(workspace, mode_slug, query, mtime_digest)`` and reused on hit.
+ """
+ from .context_pack import build_context_pack # local import (avoid cycle)
+
+ flag_on = enabled if enabled is not None else flags.is_on(FLAG_CONTEXT_CACHE)
+ if not flag_on:
+ return build_context_pack(workspace_path, query=query, **builder_kwargs) # type: ignore[arg-type]
+
+ workspace_path = workspace_path.resolve()
+ digest = _mtimes_digest(workspace_path)
+ key = (str(workspace_path), mode_slug, query, digest)
+ hit = _CACHE.get(key)
+ if hit is not None:
+ return hit
+ value = build_context_pack(workspace_path, query=query, **builder_kwargs) # type: ignore[arg-type]
+ _CACHE.put(key, value)
+ return value
+
+
+# ----------------------------------------------------------------------
+# Mtime digest
+# ----------------------------------------------------------------------
+
+def _mtimes_digest(workspace_path: Path) -> str:
+ """SHA-256 of (path, mtime_ns) pairs for the fingerprint set."""
+ h = hashlib.sha256()
+ for rel in _FINGERPRINT_FILES:
+ path = workspace_path / rel
+ if path.exists() and path.is_file():
+ try:
+ stat = path.stat()
+ except OSError:
+ continue
+ h.update(rel.encode("utf-8"))
+ h.update(b"\0")
+ h.update(str(stat.st_mtime_ns).encode("ascii"))
+ h.update(b"\0")
+ h.update(str(stat.st_size).encode("ascii"))
+ h.update(b"\0")
+ for rel in _FINGERPRINT_DIRS:
+ directory = workspace_path / rel
+ if not directory.is_dir():
+ continue
+ for child in sorted(_walk_files(directory)):
+ try:
+ stat = child.stat()
+ except OSError:
+ continue
+ h.update(str(child).encode("utf-8"))
+ h.update(b"\0")
+ h.update(str(stat.st_mtime_ns).encode("ascii"))
+ h.update(b"\0")
+ return h.hexdigest()[:32]
+
+
+def _walk_files(directory: Path) -> Iterable[Path]:
+ for child in directory.rglob("*"):
+ if child.is_file():
+ yield child
+
+
+# ----------------------------------------------------------------------
+# Maintenance utilities (mostly for tests / admin)
+# ----------------------------------------------------------------------
+
+def warm(workspace_path: Path, queries: Iterable[str], *, mode_slug: Optional[str] = None) -> int:
+ """Pre-populate the cache for a list of common queries. Returns the
+ number of entries inserted (cache may already contain some)."""
+ inserted = 0
+ for q in queries:
+ start_size = _CACHE.stats().size
+ build_cached(workspace_path, q, mode_slug=mode_slug, enabled=True)
+ if _CACHE.stats().size != start_size:
+ inserted += 1
+ return inserted
+
+
+def now() -> float:
+ """Wall-clock helper used by tests that want monotonic timestamps."""
+ return time.monotonic()
diff --git a/gitpilot/context_meter.py b/gitpilot/context_meter.py
new file mode 100644
index 0000000000000000000000000000000000000000..f565b6abeeb5f4ad170b230933114fa08ad9fd64
--- /dev/null
+++ b/gitpilot/context_meter.py
@@ -0,0 +1,385 @@
+"""Context-window usage meter — read-only snapshot for the chat UI.
+
+Computes the active LLM's context-window utilisation: provider, model,
+token budget, what's currently occupying that budget, and a short
+human-readable description of the agent topology in use.
+
+Token counting is best-effort. When :mod:`tiktoken` is available we use
+it (cl100k_base — accurate for OpenAI/Anthropic). For local providers
+without a published tokenizer we fall back to a ``len(text) // 4``
+heuristic; callers can recognise that case via ``is_estimate=True`` and
+the UI prefixes the numbers with ``≈`` to flag the imprecision.
+
+Pure, side-effect-free, no I/O beyond reading settings — safe to call
+from a hot endpoint on every popover open.
+"""
+from __future__ import annotations
+
+from dataclasses import dataclass, field
+from typing import Dict, Iterable, Mapping, Optional
+
+from .context_budget import _TIKTOKEN, estimate_tokens
+from .settings import AppSettings, LLMProvider
+
+FLAG_CONTEXT_METER = "context_meter"
+
+# ----------------------------------------------------------------------
+# Context-window catalogue
+# ----------------------------------------------------------------------
+# Conservative values — when in doubt round DOWN. We'd rather show a
+# user "94% full" against a 7 800-token estimate than claim "47% full"
+# against a 16 000 number the provider won't actually honour.
+
+_DEFAULT_CONTEXT_WINDOW = 8_192
+
+_OPENAI_WINDOWS: Mapping[str, int] = {
+ "gpt-4o": 128_000,
+ "gpt-4o-mini": 128_000,
+ "gpt-4-turbo": 128_000,
+ "gpt-4": 8_192,
+ "gpt-3.5-turbo": 16_385,
+ "o1": 200_000,
+ "o1-mini": 128_000,
+ "o3-mini": 200_000,
+}
+
+_CLAUDE_WINDOWS: Mapping[str, int] = {
+ "claude-opus-4-7": 200_000,
+ "claude-sonnet-4-6": 200_000,
+ "claude-sonnet-4-5": 200_000,
+ "claude-haiku-4-5": 200_000,
+ "claude-3-7-sonnet": 200_000,
+ "claude-3-5-sonnet": 200_000,
+ "claude-3-5-haiku": 200_000,
+ "claude-3-opus": 200_000,
+ "claude-3-sonnet": 200_000,
+ "claude-3-haiku": 200_000,
+}
+
+_WATSONX_WINDOWS: Mapping[str, int] = {
+ "meta-llama/llama-3-3-70b-instruct": 131_072,
+ "meta-llama/llama-3-1-70b-instruct": 131_072,
+ "meta-llama/llama-3-1-8b-instruct": 131_072,
+ "ibm/granite-3-8b-instruct": 4_096,
+}
+
+# Ollama / OllaBridge — keyed on the *family* prefix. Anything not
+# matched falls back to the conservative 8 k default. These are the
+# advertised values; users running with a smaller ``num_ctx`` will see
+# the bar fill faster than expected, which is the safe direction.
+_OLLAMA_FAMILY_WINDOWS: Mapping[str, int] = {
+ "llama3": 8_192,
+ "llama3.1": 131_072,
+ "llama3.2": 131_072,
+ "llama2": 4_096,
+ "qwen2.5": 32_768,
+ "qwen2": 32_768,
+ "mistral": 32_768,
+ "mixtral": 32_768,
+ "phi3": 4_096,
+ "phi": 2_048,
+ "gemma2": 8_192,
+ "gemma": 8_192,
+ "codellama": 16_384,
+ "deepseek-coder": 16_384,
+}
+
+
+def _ollama_window(model: str) -> int:
+ """Look up the context window for an Ollama model tag (e.g. ``llama3:8b``)."""
+ family = model.split(":", 1)[0].lower()
+ if family in _OLLAMA_FAMILY_WINDOWS:
+ return _OLLAMA_FAMILY_WINDOWS[family]
+ # Try a prefix match for variants like "llama3.1:8b-instruct".
+ for prefix, window in _OLLAMA_FAMILY_WINDOWS.items():
+ if family.startswith(prefix):
+ return window
+ return _DEFAULT_CONTEXT_WINDOW
+
+
+# ----------------------------------------------------------------------
+# Public dataclass
+# ----------------------------------------------------------------------
+
+@dataclass
+class ContextUsage:
+ """Snapshot of the active model's context-window utilisation."""
+
+ provider: str
+ model: str
+ context_window: int
+ used: int
+ reserved_response: int
+ topology: str
+ tool_count: int
+ breakdown: Dict[str, int] = field(default_factory=dict)
+ is_estimate: bool = False
+ """True when token counts come from the chars/4 heuristic rather than
+ a real tokenizer. The UI prefixes such numbers with ``≈``."""
+
+ @property
+ def free(self) -> int:
+ return max(0, self.context_window - self.used - self.reserved_response)
+
+ @property
+ def percent_used(self) -> float:
+ if self.context_window <= 0:
+ return 0.0
+ return round(100.0 * self.used / self.context_window, 1)
+
+ def to_dict(self) -> Dict[str, object]:
+ return {
+ "provider": self.provider,
+ "model": self.model,
+ "context_window": self.context_window,
+ "used": self.used,
+ "reserved_response": self.reserved_response,
+ "free": self.free,
+ "percent_used": self.percent_used,
+ "topology": self.topology,
+ "tool_count": self.tool_count,
+ "breakdown": dict(self.breakdown),
+ "is_estimate": self.is_estimate,
+ }
+
+
+# ----------------------------------------------------------------------
+# Resolvers
+# ----------------------------------------------------------------------
+
+def resolve_provider_model(settings: AppSettings) -> tuple[str, str]:
+ """Return ``(provider_display_name, model_id)`` for the active config."""
+ p = settings.provider
+ if p == LLMProvider.openai:
+ return ("OpenAI", settings.openai.model or "gpt-4o-mini")
+ if p == LLMProvider.claude:
+ return ("Anthropic", settings.claude.model or "claude-sonnet-4-5")
+ if p == LLMProvider.watsonx:
+ return ("watsonx", settings.watsonx.model_id or "")
+ if p == LLMProvider.ollama:
+ return ("Ollama", settings.ollama.model or "llama3")
+ if p == LLMProvider.ollabridge:
+ return ("OllaBridge", settings.ollabridge.model or "")
+ return (str(p), "")
+
+
+def resolve_context_window(settings: AppSettings) -> int:
+ """Return the advertised context-window size for the active model."""
+ p = settings.provider
+ if p == LLMProvider.openai:
+ return _OPENAI_WINDOWS.get(settings.openai.model, _DEFAULT_CONTEXT_WINDOW)
+ if p == LLMProvider.claude:
+ return _CLAUDE_WINDOWS.get(settings.claude.model, 200_000)
+ if p == LLMProvider.watsonx:
+ return _WATSONX_WINDOWS.get(settings.watsonx.model_id, _DEFAULT_CONTEXT_WINDOW)
+ if p == LLMProvider.ollama:
+ return _ollama_window(settings.ollama.model)
+ if p == LLMProvider.ollabridge:
+ return _ollama_window(settings.ollabridge.model)
+ return _DEFAULT_CONTEXT_WINDOW
+
+
+def has_real_tokenizer(settings: AppSettings) -> bool:
+ """True when token counts will come from a real tokenizer rather
+ than the chars/4 heuristic. ``cl100k_base`` is a reasonable
+ approximation for OpenAI and Anthropic; local model tokenizers are
+ not bundled, so Ollama/OllaBridge falls back to the estimate."""
+ if _TIKTOKEN is None:
+ return False
+ return settings.provider in (LLMProvider.openai, LLMProvider.claude)
+
+
+# ----------------------------------------------------------------------
+# Topology string
+# ----------------------------------------------------------------------
+
+def describe_topology(
+ *,
+ lite_mode: bool,
+ tool_count: int,
+ extra_tools: int = 0,
+) -> str:
+ """Build the one-line topology description shown in the popover.
+
+ ``extra_tools`` covers MCP / plugin tools registered at runtime — the
+ caller passes it in so this module stays import-free of those
+ optional subsystems.
+ """
+ total_tools = tool_count + extra_tools
+ if lite_mode:
+ return "lite · prompt-only · 0 tools · no repo I/O"
+ return f"single-agent · CrewAI ReAct · {total_tools} tools"
+
+
+# ----------------------------------------------------------------------
+# Token-count helpers
+# ----------------------------------------------------------------------
+
+def count_tokens(text: str) -> int:
+ """Thin wrapper around :func:`context_budget.estimate_tokens` so
+ callers don't have to know about the fallback hierarchy."""
+ return estimate_tokens(text)
+
+
+def sum_tokens(texts: Iterable[str]) -> int:
+ return sum(count_tokens(t) for t in texts if t)
+
+
+# ----------------------------------------------------------------------
+# Real breakdown sources
+# ----------------------------------------------------------------------
+
+# Snapshot of the planner / executor / explorer persona strings that go
+# into every LLM call. We pin them here as constants (rather than
+# importing from ``agentic``) so this module stays import-light and the
+# token math is deterministic in tests. When those personae change in
+# ``agentic.py``, update these strings.
+_PLANNER_BACKSTORY = (
+ "You are an experienced staff engineer who creates plans based on FACTS, not assumptions. "
+ "You have received a complete exploration report of the repository. "
+ "You ONLY create plans for files that actually exist in the exploration report. "
+ "You are extremely careful with DELETE actions - you verify the file exists "
+ "and that it's not on the 'keep' list before marking it for deletion. "
+ "When users ask to delete files, you delete individual FILES, not directory names. "
+ "When users ask to ANALYZE files and GENERATE new content (code, docs, examples), "
+ "you create plans that READ existing files and CREATE new files with generated content. "
+ "You understand that 'analyze X and create Y' means: use tools to read X, then plan to CREATE Y. "
+ "You never make changes yourself, only create detailed plans."
+)
+
+_PLANNER_ROLE = "Repository Refactor Planner"
+_PLANNER_GOAL = (
+ "Design safe, step-by-step refactor plans based on ACTUAL repository state "
+ "discovered during exploration"
+)
+
+_EXPLORER_ROLE = "Repository Explorer"
+_EXPLORER_GOAL = (
+ "Thoroughly explore the repository structure, identify key files, and report findings"
+)
+_EXPLORER_BACKSTORY = (
+ "You are a meticulous code archaeologist. You use the available tools to "
+ "list files, read content, and build a complete picture of the repository "
+ "before any change is planned."
+)
+
+_LITE_ROLE = "GitPilot Lite"
+_LITE_GOAL = "Help the user with their repository"
+_LITE_BACKSTORY = "You are a helpful coding assistant. Be concise."
+
+
+def system_prompt_text(*, lite_mode: bool) -> str:
+ """Return the persona text that the active topology will inject into
+ every LLM call. Used for the ``system_prompt`` breakdown row."""
+ if lite_mode:
+ return " ".join((_LITE_ROLE, _LITE_GOAL, _LITE_BACKSTORY))
+ return " ".join(
+ (
+ _EXPLORER_ROLE,
+ _EXPLORER_GOAL,
+ _EXPLORER_BACKSTORY,
+ _PLANNER_ROLE,
+ _PLANNER_GOAL,
+ _PLANNER_BACKSTORY,
+ )
+ )
+
+
+def count_system_prompt_tokens(*, lite_mode: bool) -> int:
+ return count_tokens(system_prompt_text(lite_mode=lite_mode))
+
+
+def count_messages_tokens(messages: Iterable[object]) -> int:
+ """Sum estimated tokens over an iterable of message-like objects.
+
+ Accepts any object exposing a ``.content`` attribute (matches the
+ :class:`gitpilot.session.Message` dataclass) or a ``"content"``
+ mapping key. Other shapes are ignored, which is the safe default
+ for partially-typed history records.
+ """
+ total = 0
+ for m in messages:
+ if m is None:
+ continue
+ if isinstance(m, dict):
+ content = m.get("content")
+ else:
+ content = getattr(m, "content", None)
+ if isinstance(content, str) and content:
+ total += count_tokens(content)
+ return total
+
+
+def count_tool_schema_tokens(tool_lists: Iterable[Iterable[object]]) -> int:
+ """Sum tokens over every tool's ``name`` + ``description`` + JSON
+ schema across the supplied tool lists. This approximates what the
+ LLM sees in its function/tool-calling preamble.
+
+ Tools that don't expose name/description are skipped silently —
+ we're not the place to enforce CrewAI tool contracts.
+ """
+ import json as _json
+
+ total = 0
+ for group in tool_lists:
+ if not group:
+ continue
+ for tool in group:
+ name = getattr(tool, "name", "") or ""
+ description = getattr(tool, "description", "") or ""
+ schema = getattr(tool, "args_schema", None)
+ schema_text = ""
+ if schema is not None:
+ # Pydantic v2 model class — model_json_schema() is cheap.
+ model_schema = getattr(schema, "model_json_schema", None)
+ if callable(model_schema):
+ try:
+ schema_text = _json.dumps(model_schema())
+ except Exception: # pragma: no cover - defensive
+ schema_text = ""
+ else:
+ schema_text = str(schema)
+ total += count_tokens(f"{name} {description} {schema_text}")
+ return total
+
+
+# ----------------------------------------------------------------------
+# Top-level builder
+# ----------------------------------------------------------------------
+
+# Reserved-for-response budget: the LLM needs headroom to actually emit
+# an answer. 4 k is a sane fixed value across providers — small enough
+# not to crowd Ollama's 8 k window, large enough for a reasonable plan.
+RESERVED_RESPONSE_TOKENS = 4_096
+
+
+def build_usage(
+ settings: AppSettings,
+ *,
+ breakdown: Mapping[str, int],
+ tool_count: int,
+ lite_mode: bool,
+ extra_tools: int = 0,
+ reserved_response: Optional[int] = None,
+) -> ContextUsage:
+ """Assemble a :class:`ContextUsage` from the inputs the API endpoint
+ can cheaply collect. All token counts come from the caller — this
+ function only does arithmetic and lookup, so it's trivially testable."""
+ provider, model = resolve_provider_model(settings)
+ window = resolve_context_window(settings)
+ reserved = RESERVED_RESPONSE_TOKENS if reserved_response is None else reserved_response
+ used = sum(int(v) for v in breakdown.values() if v)
+ topology = describe_topology(
+ lite_mode=lite_mode, tool_count=tool_count, extra_tools=extra_tools
+ )
+ return ContextUsage(
+ provider=provider,
+ model=model,
+ context_window=window,
+ used=used,
+ reserved_response=reserved,
+ topology=topology,
+ tool_count=tool_count + extra_tools,
+ breakdown=dict(breakdown),
+ is_estimate=not has_real_tokenizer(settings),
+ )
diff --git a/gitpilot/context_pack.py b/gitpilot/context_pack.py
new file mode 100644
index 0000000000000000000000000000000000000000..86dc9cc361f12ec704e50acb1765cad146e02633
--- /dev/null
+++ b/gitpilot/context_pack.py
@@ -0,0 +1,151 @@
+# gitpilot/context_pack.py
+"""Context Pack — compose a bounded, token-safe context injection for agents.
+
+Non-destructive, additive feature. If no context assets or use cases exist
+the pack is empty and agents behave exactly as before.
+
+Usage in agentic.py / agent builders:
+ from .context_pack import build_context_pack
+ pack = build_context_pack(workspace_path, query=goal)
+ # Prepend ``pack`` to agent backstory or system prompt.
+"""
+from __future__ import annotations
+
+import logging
+from pathlib import Path
+from typing import Optional
+
+logger = logging.getLogger(__name__)
+
+# ---------------------------------------------------------------------------
+# Limits (keep total injection well under 8 K chars to avoid token blowups)
+# ---------------------------------------------------------------------------
+MAX_CONVENTIONS_CHARS = 2_000
+MAX_USE_CASE_CHARS = 2_000
+MAX_CHUNKS_CHARS = 3_000
+MAX_CHUNKS = 8
+
+
+def build_context_pack(
+ workspace_path: Path,
+ query: str = "",
+ *,
+ include_conventions: bool = True,
+ include_use_case: bool = True,
+ include_assets: bool = True,
+ max_total_chars: int = 7_000,
+) -> str:
+ """Build a markdown context pack for agent prompt injection.
+
+ Returns an empty string when nothing is available (zero overhead).
+ """
+ parts: list[str] = []
+ total = 0
+
+ # 1) Conventions / Rules (existing MemoryManager)
+ if include_conventions:
+ section = _conventions_section(workspace_path)
+ if section and total + len(section) <= max_total_chars:
+ parts.append(section)
+ total += len(section)
+
+ # 2) Active Use Case
+ if include_use_case:
+ section = _use_case_section(workspace_path)
+ if section and total + len(section) <= max_total_chars:
+ parts.append(section)
+ total += len(section)
+
+ # 3) Relevant context chunks from uploaded assets
+ if include_assets and query:
+ remaining = max_total_chars - total
+ section = _assets_section(workspace_path, query, max_chars=min(remaining, MAX_CHUNKS_CHARS))
+ if section:
+ parts.append(section)
+
+ if not parts:
+ return ""
+
+ return "## Project Context Pack (auto)\n\n" + "\n\n".join(parts)
+
+
+# ---------------------------------------------------------------------------
+# Section builders
+# ---------------------------------------------------------------------------
+def _conventions_section(workspace_path: Path) -> str:
+ try:
+ from .memory import MemoryManager
+
+ mgr = MemoryManager(workspace_path)
+ ctx = mgr.load_context()
+ prompt = ctx.to_system_prompt()
+ if not prompt:
+ return ""
+ return "### Conventions\n\n" + prompt[:MAX_CONVENTIONS_CHARS]
+ except Exception:
+ logger.debug("Could not load conventions for context pack", exc_info=True)
+ return ""
+
+
+def _use_case_section(workspace_path: Path) -> str:
+ try:
+ from .use_case import UseCaseManager
+
+ mgr = UseCaseManager(workspace_path)
+ uc = mgr.get_active_use_case()
+ if not uc:
+ return ""
+
+ spec = uc.spec
+ lines = ["### Active Use Case"]
+ if spec.title:
+ lines.append(f"- **Title:** {spec.title}")
+ if spec.summary:
+ lines.append(f"- **Summary:** {spec.summary}")
+ if spec.problem:
+ lines.append(f"- **Problem:** {spec.problem}")
+ if spec.users:
+ lines.append(f"- **Users:** {spec.users}")
+ if spec.requirements:
+ lines.append("- **Requirements:**")
+ for r in spec.requirements[:10]:
+ lines.append(f" - {r}")
+ if spec.acceptance_criteria:
+ lines.append("- **Acceptance Criteria:**")
+ for ac in spec.acceptance_criteria[:10]:
+ lines.append(f" - {ac}")
+ if spec.constraints:
+ lines.append("- **Constraints:**")
+ for c in spec.constraints[:5]:
+ lines.append(f" - {c}")
+
+ result = "\n".join(lines)
+ return result[:MAX_USE_CASE_CHARS]
+ except Exception:
+ logger.debug("Could not load active use case for context pack", exc_info=True)
+ return ""
+
+
+def _assets_section(
+ workspace_path: Path,
+ query: str,
+ max_chars: int = MAX_CHUNKS_CHARS,
+) -> str:
+ try:
+ from .context_vault import ContextVault
+
+ vault = ContextVault(workspace_path)
+ chunks = vault.search_chunks(query, max_chunks=MAX_CHUNKS, max_chars=max_chars)
+ if not chunks:
+ return ""
+
+ lines = [f"### Relevant References (Top {len(chunks)})"]
+ for chunk in chunks:
+ lines.append(
+ f"[Asset: {chunk.filename} | chunk {chunk.chunk_index}]\n{chunk.text}"
+ )
+
+ return "\n\n".join(lines)[:max_chars]
+ except Exception:
+ logger.debug("Could not search context vault for context pack", exc_info=True)
+ return ""
diff --git a/gitpilot/context_vault.py b/gitpilot/context_vault.py
new file mode 100644
index 0000000000000000000000000000000000000000..1fb08f450dc9f393bb3d12985e07a2747e8123e9
--- /dev/null
+++ b/gitpilot/context_vault.py
@@ -0,0 +1,532 @@
+# gitpilot/context_vault.py
+"""Context Vault — upload, extract, index, and retrieve project context assets.
+
+Non-destructive, additive feature. Stores everything under:
+ ~/.gitpilot/workspaces/{owner}/{repo}/.gitpilot/context/
+
+Directory layout:
+ context/
+ assets/ raw uploaded files
+ extracted/ extracted text + metadata JSON
+ index/ SQLite metadata + chunk index
+ use_cases/ structured use-case JSON + markdown exports
+
+This module handles asset lifecycle (upload, extract, index, delete)
+and chunk retrieval for context-pack injection into agent prompts.
+"""
+from __future__ import annotations
+
+import json
+import logging
+import os
+import re
+import shutil
+import sqlite3
+import time
+import uuid
+from dataclasses import asdict, dataclass, field
+from pathlib import Path
+from typing import Any, Dict, List, Optional
+
+logger = logging.getLogger(__name__)
+
+# ---------------------------------------------------------------------------
+# Limits
+# ---------------------------------------------------------------------------
+MAX_UPLOAD_BYTES = 200 * 1024 * 1024 # 200 MB default
+MAX_EXTRACT_CHARS = 500_000
+CHUNK_SIZE = 800 # chars per chunk (approx)
+CHUNK_OVERLAP = 100
+MAX_RETRIEVAL_CHUNKS = 8
+MAX_RETRIEVAL_CHARS = 6_000
+
+
+# ---------------------------------------------------------------------------
+# Data classes
+# ---------------------------------------------------------------------------
+@dataclass
+class AssetMeta:
+ asset_id: str
+ filename: str
+ mime: str
+ size_bytes: int
+ created_at: str
+ extracted_chars: int = 0
+ indexed_chunks: int = 0
+ notes: str = ""
+
+ def to_dict(self) -> dict:
+ return asdict(self)
+
+
+@dataclass
+class ExtractedAsset:
+ asset_id: str
+ filename: str
+ mime: str
+ extracted_text: str
+ pages: Optional[int] = None
+ created_at: str = ""
+ notes: str = ""
+
+
+@dataclass
+class ChunkResult:
+ asset_id: str
+ filename: str
+ chunk_index: int
+ text: str
+ score: float = 0.0
+
+
+# ---------------------------------------------------------------------------
+# Vault manager
+# ---------------------------------------------------------------------------
+class ContextVault:
+ """Manages per-repo context vault under .gitpilot/context/."""
+
+ def __init__(self, workspace_path: Path):
+ self.workspace_path = workspace_path
+ self.vault_dir = workspace_path / ".gitpilot" / "context"
+ self.assets_dir = self.vault_dir / "assets"
+ self.extracted_dir = self.vault_dir / "extracted"
+ self.index_dir = self.vault_dir / "index"
+ self.use_cases_dir = self.vault_dir / "use_cases"
+
+ # ------------------------------------------------------------------
+ # Init & safety
+ # ------------------------------------------------------------------
+ def _ensure_dirs(self):
+ for d in (self.assets_dir, self.extracted_dir, self.index_dir, self.use_cases_dir):
+ d.mkdir(parents=True, exist_ok=True)
+
+ def _safe_resolve(self, base: Path, name: str) -> Path:
+ """Prevent path traversal attacks."""
+ full = (base / name).resolve()
+ if not str(full).startswith(str(base.resolve())):
+ raise PermissionError(f"Path traversal blocked: {name}")
+ return full
+
+ # ------------------------------------------------------------------
+ # Asset CRUD
+ # ------------------------------------------------------------------
+ def list_assets(self) -> List[AssetMeta]:
+ """Return metadata for all uploaded assets."""
+ self._ensure_dirs()
+ results: List[AssetMeta] = []
+ for ext_file in sorted(self.extracted_dir.glob("*.json")):
+ try:
+ data = json.loads(ext_file.read_text(encoding="utf-8"))
+ results.append(AssetMeta(
+ asset_id=data.get("asset_id", ext_file.stem),
+ filename=data.get("filename", ""),
+ mime=data.get("mime", ""),
+ size_bytes=data.get("size_bytes", 0),
+ created_at=data.get("created_at", ""),
+ extracted_chars=len(data.get("extracted_text", "")),
+ indexed_chunks=data.get("indexed_chunks", 0),
+ notes=data.get("notes", ""),
+ ))
+ except Exception:
+ logger.warning("Skipping corrupt metadata: %s", ext_file)
+ return results
+
+ def upload_asset(self, filename: str, content: bytes, mime: str = "") -> AssetMeta:
+ """Store a raw asset and run extraction + indexing."""
+ self._ensure_dirs()
+
+ if len(content) > MAX_UPLOAD_BYTES:
+ raise ValueError(
+ f"File too large ({len(content)} bytes). Max is {MAX_UPLOAD_BYTES}."
+ )
+
+ asset_id = uuid.uuid4().hex[:12]
+ safe_name = re.sub(r"[^\w.\-]", "_", filename)
+ stored_name = f"{asset_id}_{safe_name}"
+
+ asset_path = self._safe_resolve(self.assets_dir, stored_name)
+ asset_path.write_bytes(content)
+
+ if not mime:
+ mime = _guess_mime(filename)
+
+ now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
+
+ # Extract text
+ extracted_text = _extract_text(asset_path, mime)
+
+ # Chunk + index
+ chunks = _chunk_text(extracted_text, CHUNK_SIZE, CHUNK_OVERLAP)
+ indexed_count = self._index_chunks(asset_id, filename, chunks)
+
+ # Save extracted metadata
+ meta_data = {
+ "asset_id": asset_id,
+ "filename": filename,
+ "stored_name": stored_name,
+ "mime": mime,
+ "size_bytes": len(content),
+ "extracted_text": extracted_text[:MAX_EXTRACT_CHARS],
+ "pages": None,
+ "created_at": now,
+ "indexed_chunks": indexed_count,
+ "notes": "",
+ }
+ meta_path = self._safe_resolve(self.extracted_dir, f"{asset_id}.json")
+ meta_path.write_text(json.dumps(meta_data, indent=2), encoding="utf-8")
+
+ return AssetMeta(
+ asset_id=asset_id,
+ filename=filename,
+ mime=mime,
+ size_bytes=len(content),
+ created_at=now,
+ extracted_chars=len(extracted_text),
+ indexed_chunks=indexed_count,
+ )
+
+ def delete_asset(self, asset_id: str) -> bool:
+ """Remove asset, extracted data, and index entries."""
+ self._ensure_dirs()
+
+ # Remove extracted metadata
+ meta_path = self.extracted_dir / f"{asset_id}.json"
+ stored_name = None
+ if meta_path.exists():
+ try:
+ data = json.loads(meta_path.read_text(encoding="utf-8"))
+ stored_name = data.get("stored_name")
+ except Exception:
+ pass
+ meta_path.unlink()
+
+ # Remove raw asset
+ if stored_name:
+ asset_path = self.assets_dir / stored_name
+ if asset_path.exists():
+ asset_path.unlink()
+ else:
+ # fallback: find by prefix
+ for f in self.assets_dir.iterdir():
+ if f.name.startswith(asset_id):
+ f.unlink()
+ break
+
+ # Remove from index
+ self._remove_from_index(asset_id)
+
+ return True
+
+ def get_asset_path(self, asset_id: str) -> Optional[Path]:
+ """Return the raw asset path for download."""
+ self._ensure_dirs()
+ meta_path = self.extracted_dir / f"{asset_id}.json"
+ if meta_path.exists():
+ try:
+ data = json.loads(meta_path.read_text(encoding="utf-8"))
+ stored_name = data.get("stored_name", "")
+ if stored_name:
+ p = self.assets_dir / stored_name
+ if p.exists():
+ return p
+ except Exception:
+ pass
+
+ # fallback
+ for f in self.assets_dir.iterdir():
+ if f.name.startswith(asset_id):
+ return f
+ return None
+
+ def get_asset_filename(self, asset_id: str) -> str:
+ """Return original filename for an asset."""
+ meta_path = self.extracted_dir / f"{asset_id}.json"
+ if meta_path.exists():
+ try:
+ data = json.loads(meta_path.read_text(encoding="utf-8"))
+ return data.get("filename", "unknown")
+ except Exception:
+ pass
+ return "unknown"
+
+ # ------------------------------------------------------------------
+ # Indexing (SQLite-backed)
+ # ------------------------------------------------------------------
+ def _get_db(self) -> sqlite3.Connection:
+ self._ensure_dirs()
+ db_path = self.index_dir / "context.sqlite"
+ conn = sqlite3.connect(str(db_path))
+ conn.execute("""
+ CREATE TABLE IF NOT EXISTS chunks (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ asset_id TEXT NOT NULL,
+ filename TEXT NOT NULL,
+ chunk_index INTEGER NOT NULL,
+ text TEXT NOT NULL
+ )
+ """)
+ conn.execute("""
+ CREATE INDEX IF NOT EXISTS idx_chunks_asset ON chunks(asset_id)
+ """)
+ conn.commit()
+ return conn
+
+ def _index_chunks(self, asset_id: str, filename: str, chunks: List[str]) -> int:
+ conn = self._get_db()
+ try:
+ # Remove old entries for this asset (re-index)
+ conn.execute("DELETE FROM chunks WHERE asset_id = ?", (asset_id,))
+ for i, chunk_text in enumerate(chunks):
+ conn.execute(
+ "INSERT INTO chunks (asset_id, filename, chunk_index, text) VALUES (?, ?, ?, ?)",
+ (asset_id, filename, i, chunk_text),
+ )
+ conn.commit()
+ return len(chunks)
+ finally:
+ conn.close()
+
+ def _remove_from_index(self, asset_id: str):
+ try:
+ conn = self._get_db()
+ conn.execute("DELETE FROM chunks WHERE asset_id = ?", (asset_id,))
+ conn.commit()
+ conn.close()
+ except Exception:
+ pass
+
+ # ------------------------------------------------------------------
+ # Retrieval
+ # ------------------------------------------------------------------
+ def search_chunks(
+ self,
+ query: str,
+ max_chunks: int = MAX_RETRIEVAL_CHUNKS,
+ max_chars: int = MAX_RETRIEVAL_CHARS,
+ ) -> List[ChunkResult]:
+ """Simple keyword-based retrieval (BM25-like scoring).
+
+ Phase 1: naive keyword matching. Phase 2 can add embeddings.
+ """
+ if not query.strip():
+ return []
+
+ keywords = _extract_keywords(query)
+ if not keywords:
+ return []
+
+ try:
+ conn = self._get_db()
+ except Exception:
+ return []
+
+ try:
+ rows = conn.execute(
+ "SELECT asset_id, filename, chunk_index, text FROM chunks"
+ ).fetchall()
+ finally:
+ conn.close()
+
+ scored: List[ChunkResult] = []
+ for asset_id, filename, chunk_index, text in rows:
+ text_lower = text.lower()
+ score = 0.0
+ for kw in keywords:
+ count = text_lower.count(kw.lower())
+ if count > 0:
+ # simple TF score
+ score += count * (1.0 + len(kw) * 0.1)
+ if score > 0:
+ scored.append(ChunkResult(
+ asset_id=asset_id,
+ filename=filename,
+ chunk_index=chunk_index,
+ text=text,
+ score=score,
+ ))
+
+ scored.sort(key=lambda c: c.score, reverse=True)
+
+ # Enforce limits
+ results: List[ChunkResult] = []
+ total_chars = 0
+ for chunk in scored[:max_chunks * 2]: # over-fetch then trim
+ if len(results) >= max_chunks:
+ break
+ if total_chars + len(chunk.text) > max_chars:
+ break
+ results.append(chunk)
+ total_chars += len(chunk.text)
+
+ return results
+
+
+# ---------------------------------------------------------------------------
+# Text extraction helpers
+# ---------------------------------------------------------------------------
+def _guess_mime(filename: str) -> str:
+ ext = Path(filename).suffix.lower()
+ mime_map = {
+ ".txt": "text/plain",
+ ".md": "text/markdown",
+ ".csv": "text/csv",
+ ".json": "application/json",
+ ".pdf": "application/pdf",
+ ".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
+ ".doc": "application/msword",
+ ".png": "image/png",
+ ".jpg": "image/jpeg",
+ ".jpeg": "image/jpeg",
+ ".gif": "image/gif",
+ ".svg": "image/svg+xml",
+ ".mp4": "video/mp4",
+ ".mov": "video/quicktime",
+ ".mp3": "audio/mpeg",
+ ".wav": "audio/wav",
+ ".vtt": "text/vtt",
+ ".srt": "text/srt",
+ ".py": "text/x-python",
+ ".js": "text/javascript",
+ ".ts": "text/typescript",
+ ".jsx": "text/jsx",
+ ".tsx": "text/tsx",
+ ".html": "text/html",
+ ".css": "text/css",
+ ".yaml": "text/yaml",
+ ".yml": "text/yaml",
+ ".toml": "text/toml",
+ ".xml": "text/xml",
+ ".sh": "text/x-shellscript",
+ ".go": "text/x-go",
+ ".rs": "text/x-rust",
+ ".java": "text/x-java",
+ ".rb": "text/x-ruby",
+ }
+ return mime_map.get(ext, "application/octet-stream")
+
+
+def _extract_text(path: Path, mime: str) -> str:
+ """Best-effort text extraction from a file."""
+ # Text-based files
+ if mime.startswith("text/") or mime in (
+ "application/json",
+ "text/markdown",
+ "text/csv",
+ "text/vtt",
+ "text/srt",
+ "text/toml",
+ "text/yaml",
+ ):
+ try:
+ return path.read_text(encoding="utf-8", errors="replace")[:MAX_EXTRACT_CHARS]
+ except Exception:
+ return ""
+
+ # PDF
+ if mime == "application/pdf":
+ return _extract_pdf(path)
+
+ # DOCX
+ if "wordprocessingml" in mime or mime == "application/msword":
+ return _extract_docx(path)
+
+ # Binary/media — no extraction, store only
+ return ""
+
+
+def _extract_pdf(path: Path) -> str:
+ """Extract text from PDF. Tries pypdf/PyPDF2 first, falls back gracefully."""
+ try:
+ import pypdf
+ try:
+ reader = pypdf.PdfReader(str(path))
+ pages = []
+ for page in reader.pages:
+ text = page.extract_text()
+ if text:
+ pages.append(text)
+ return "\n\n".join(pages)[:MAX_EXTRACT_CHARS]
+ except Exception as e:
+ logger.warning("PDF extraction failed with pypdf: %s", e)
+ return ""
+ except ImportError:
+ pass
+
+ try:
+ import PyPDF2 # noqa: N813
+ try:
+ reader = PyPDF2.PdfReader(str(path))
+ pages = []
+ for page in reader.pages:
+ text = page.extract_text()
+ if text:
+ pages.append(text)
+ return "\n\n".join(pages)[:MAX_EXTRACT_CHARS]
+ except Exception as e:
+ logger.warning("PDF extraction failed with PyPDF2: %s", e)
+ return ""
+ except ImportError:
+ pass
+
+ logger.info("PDF extraction unavailable (install pypdf or PyPDF2). Storing PDF without text.")
+ return ""
+
+def _extract_docx(path: Path) -> str:
+ """Extract text from DOCX."""
+ try:
+ import docx
+ doc = docx.Document(str(path))
+ paragraphs = [p.text for p in doc.paragraphs if p.text.strip()]
+ return "\n\n".join(paragraphs)[:MAX_EXTRACT_CHARS]
+ except ImportError:
+ logger.info("DOCX extraction unavailable (install python-docx). Storing without text.")
+ return ""
+ except Exception as e:
+ logger.warning("DOCX extraction failed: %s", e)
+ return ""
+
+
+# ---------------------------------------------------------------------------
+# Chunking
+# ---------------------------------------------------------------------------
+def _chunk_text(text: str, chunk_size: int = CHUNK_SIZE, overlap: int = CHUNK_OVERLAP) -> List[str]:
+ """Split text into overlapping chunks."""
+ if not text:
+ return []
+
+ chunks: List[str] = []
+ start = 0
+ while start < len(text):
+ end = start + chunk_size
+ chunk = text[start:end]
+ if chunk.strip():
+ chunks.append(chunk.strip())
+ start = end - overlap
+ if start >= len(text):
+ break
+
+ return chunks
+
+
+def _extract_keywords(query: str) -> List[str]:
+ """Extract meaningful keywords from a query string."""
+ # Remove common stop words
+ stop_words = {
+ "the", "a", "an", "is", "are", "was", "were", "be", "been", "being",
+ "have", "has", "had", "do", "does", "did", "will", "would", "could",
+ "should", "may", "might", "shall", "can", "need", "dare", "ought",
+ "used", "to", "of", "in", "for", "on", "with", "at", "by", "from",
+ "as", "into", "through", "during", "before", "after", "above",
+ "below", "between", "out", "off", "over", "under", "again",
+ "further", "then", "once", "here", "there", "when", "where", "why",
+ "how", "all", "both", "each", "few", "more", "most", "other",
+ "some", "such", "no", "nor", "not", "only", "own", "same", "so",
+ "than", "too", "very", "just", "because", "but", "and", "or", "if",
+ "while", "what", "which", "who", "whom", "this", "that", "these",
+ "those", "i", "me", "my", "we", "our", "you", "your", "he", "him",
+ "she", "her", "it", "its", "they", "them", "their",
+ }
+
+ words = re.findall(r"\w+", query.lower())
+ keywords = [w for w in words if w not in stop_words and len(w) > 1]
+ return keywords
diff --git a/gitpilot/cross_repo.py b/gitpilot/cross_repo.py
new file mode 100644
index 0000000000000000000000000000000000000000..45ccb790864d0f87e1dcc39ea9ce6894e0147988
--- /dev/null
+++ b/gitpilot/cross_repo.py
@@ -0,0 +1,351 @@
+# gitpilot/cross_repo.py
+"""Cross-repository intelligence — dependency graphs and impact analysis.
+
+Analyses patterns across multiple repositories to provide:
+- Dependency graphs (repo A depends on repo B)
+- Impact analysis (change in lib affects services)
+- Shared convention detection
+- Migration planning across repos
+
+Draws on the concept of *software ecosystems analysis* from research
+on large-scale dependency management (Decan et al., 2019).
+"""
+from __future__ import annotations
+
+import json
+import logging
+import re
+from dataclasses import dataclass, field
+from pathlib import Path
+from typing import Any, Dict, List, Optional, Set
+
+logger = logging.getLogger(__name__)
+
+# Common dependency file patterns
+_DEP_FILES = {
+ "package.json": "npm",
+ "requirements.txt": "pip",
+ "Pipfile": "pipenv",
+ "pyproject.toml": "pyproject",
+ "Cargo.toml": "cargo",
+ "go.mod": "go",
+ "Gemfile": "bundler",
+ "pom.xml": "maven",
+ "build.gradle": "gradle",
+ "composer.json": "composer",
+}
+
+
+@dataclass
+class Dependency:
+ """A dependency relationship between two entities."""
+
+ source: str # e.g., "owner/repo-a"
+ target: str # e.g., "owner/repo-b" or "package-name"
+ dep_type: str = "runtime" # runtime | dev | peer | optional
+ version: str = ""
+ ecosystem: str = "" # npm, pip, cargo, etc.
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "source": self.source,
+ "target": self.target,
+ "dep_type": self.dep_type,
+ "version": self.version,
+ "ecosystem": self.ecosystem,
+ }
+
+
+@dataclass
+class DependencyGraph:
+ """A graph of dependencies across repositories."""
+
+ repos: List[str] = field(default_factory=list)
+ dependencies: List[Dependency] = field(default_factory=list)
+ ecosystems: List[str] = field(default_factory=list)
+
+ @property
+ def node_count(self) -> int:
+ nodes: Set[str] = set()
+ for d in self.dependencies:
+ nodes.add(d.source)
+ nodes.add(d.target)
+ return len(nodes)
+
+ @property
+ def edge_count(self) -> int:
+ return len(self.dependencies)
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "repos": self.repos,
+ "dependencies": [d.to_dict() for d in self.dependencies],
+ "ecosystems": self.ecosystems,
+ "node_count": self.node_count,
+ "edge_count": self.edge_count,
+ }
+
+
+@dataclass
+class ImpactReport:
+ """Impact analysis report for a change in a repository."""
+
+ source_repo: str
+ change_description: str
+ affected_repos: List[str] = field(default_factory=list)
+ risk_level: str = "low" # low | medium | high | critical
+ details: List[str] = field(default_factory=list)
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "source_repo": self.source_repo,
+ "change_description": self.change_description,
+ "affected_repos": self.affected_repos,
+ "risk_level": self.risk_level,
+ "details": self.details,
+ }
+
+
+@dataclass
+class MigrationPlan:
+ """Plan for migrating a pattern across repositories."""
+
+ target_pattern: str
+ repos: List[str] = field(default_factory=list)
+ steps: List[Dict[str, str]] = field(default_factory=list)
+ estimated_effort: str = "unknown" # low | medium | high
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "target_pattern": self.target_pattern,
+ "repos": self.repos,
+ "steps": self.steps,
+ "estimated_effort": self.estimated_effort,
+ }
+
+
+class CrossRepoAnalyzer:
+ """Analyze patterns and dependencies across multiple repositories.
+
+ Usage::
+
+ analyzer = CrossRepoAnalyzer()
+ graph = analyzer.analyze_dependencies_from_files({
+ "owner/repo-a": {"package.json": '{"dependencies": {"lodash": "^4"}}'},
+ "owner/repo-b": {"requirements.txt": "requests>=2.28\\nflask>=3.0"},
+ })
+ impact = analyzer.impact_analysis(graph, "owner/repo-a", "Breaking change in API v2")
+ """
+
+ def analyze_dependencies_from_files(
+ self,
+ repo_files: Dict[str, Dict[str, str]],
+ ) -> DependencyGraph:
+ """Build a dependency graph from dependency files.
+
+ Args:
+ repo_files: Mapping of repo name → {filename: content}.
+ """
+ graph = DependencyGraph(repos=list(repo_files.keys()))
+ ecosystems: Set[str] = set()
+
+ for repo, files in repo_files.items():
+ for filename, content in files.items():
+ ecosystem = _DEP_FILES.get(filename)
+ if not ecosystem:
+ continue
+ ecosystems.add(ecosystem)
+ deps = self._parse_dependencies(filename, content, ecosystem)
+ for dep in deps:
+ dep.source = repo
+ graph.dependencies.append(dep)
+
+ graph.ecosystems = sorted(ecosystems)
+ return graph
+
+ def impact_analysis(
+ self,
+ graph: DependencyGraph,
+ source_repo: str,
+ change_description: str,
+ ) -> ImpactReport:
+ """Analyze the impact of a change in one repo on others.
+
+ Walks the dependency graph to find repos that depend (directly
+ or transitively) on the source repo.
+ """
+ # Build reverse adjacency: target → [sources]
+ dependents: Dict[str, List[str]] = {}
+ for dep in graph.dependencies:
+ dependents.setdefault(dep.target, []).append(dep.source)
+
+ # BFS from source_repo
+ affected: Set[str] = set()
+ queue = [source_repo]
+ visited: Set[str] = set()
+
+ while queue:
+ current = queue.pop(0)
+ if current in visited:
+ continue
+ visited.add(current)
+ for dependent in dependents.get(current, []):
+ if dependent != source_repo:
+ affected.add(dependent)
+ queue.append(dependent)
+
+ # Risk assessment
+ if len(affected) == 0:
+ risk = "low"
+ elif len(affected) <= 3:
+ risk = "medium"
+ elif len(affected) <= 10:
+ risk = "high"
+ else:
+ risk = "critical"
+
+ details = []
+ for repo in sorted(affected):
+ deps_on_source = [
+ d for d in graph.dependencies
+ if d.source == repo and d.target == source_repo
+ ]
+ for d in deps_on_source:
+ details.append(f"{repo} depends on {source_repo} ({d.dep_type}, {d.version})")
+
+ return ImpactReport(
+ source_repo=source_repo,
+ change_description=change_description,
+ affected_repos=sorted(affected),
+ risk_level=risk,
+ details=details,
+ )
+
+ def detect_shared_conventions(
+ self,
+ repo_files: Dict[str, Dict[str, str]],
+ ) -> Dict[str, List[str]]:
+ """Detect shared conventions across repos.
+
+ Looks for common config files, linters, formatters, CI configs, etc.
+ """
+ conventions: Dict[str, List[str]] = {}
+
+ convention_files = [
+ ".eslintrc", ".eslintrc.json", ".prettierrc",
+ "ruff.toml", "pyproject.toml", ".flake8",
+ ".github/workflows", "Makefile", "Dockerfile",
+ "tsconfig.json", "jest.config",
+ ]
+
+ for repo, files in repo_files.items():
+ for cf in convention_files:
+ for filename in files:
+ if cf in filename:
+ conventions.setdefault(cf, []).append(repo)
+
+ return conventions
+
+ def suggest_migration(
+ self,
+ repos: List[str],
+ target_pattern: str,
+ ) -> MigrationPlan:
+ """Suggest a migration plan for applying a pattern across repos."""
+ steps = []
+ for i, repo in enumerate(repos):
+ steps.append({
+ "order": str(i + 1),
+ "repo": repo,
+ "action": f"Apply {target_pattern} to {repo}",
+ "status": "pending",
+ })
+
+ effort = "low" if len(repos) <= 3 else ("medium" if len(repos) <= 10 else "high")
+
+ return MigrationPlan(
+ target_pattern=target_pattern,
+ repos=repos,
+ steps=steps,
+ estimated_effort=effort,
+ )
+
+ # ------------------------------------------------------------------
+ # Dependency parsers
+ # ------------------------------------------------------------------
+
+ def _parse_dependencies(
+ self, filename: str, content: str, ecosystem: str,
+ ) -> List[Dependency]:
+ if ecosystem == "npm":
+ return self._parse_npm(content)
+ if ecosystem in ("pip", "pipenv"):
+ return self._parse_pip(content)
+ if ecosystem == "pyproject":
+ return self._parse_pyproject(content)
+ if ecosystem == "go":
+ return self._parse_gomod(content)
+ return []
+
+ def _parse_npm(self, content: str) -> List[Dependency]:
+ deps = []
+ try:
+ data = json.loads(content)
+ for section, dep_type in [
+ ("dependencies", "runtime"),
+ ("devDependencies", "dev"),
+ ("peerDependencies", "peer"),
+ ]:
+ for name, version in data.get(section, {}).items():
+ deps.append(Dependency(
+ source="", target=name,
+ dep_type=dep_type, version=version, ecosystem="npm",
+ ))
+ except json.JSONDecodeError:
+ pass
+ return deps
+
+ def _parse_pip(self, content: str) -> List[Dependency]:
+ deps = []
+ for line in content.strip().split("\n"):
+ line = line.strip()
+ if not line or line.startswith("#") or line.startswith("-"):
+ continue
+ m = re.match(r"([a-zA-Z0-9_-]+)\s*([><=!~]+.+)?", line)
+ if m:
+ deps.append(Dependency(
+ source="", target=m.group(1),
+ dep_type="runtime", version=m.group(2) or "", ecosystem="pip",
+ ))
+ return deps
+
+ def _parse_pyproject(self, content: str) -> List[Dependency]:
+ deps = []
+ in_deps = False
+ for line in content.split("\n"):
+ stripped = line.strip()
+ if stripped.startswith("dependencies"):
+ in_deps = True
+ continue
+ if in_deps:
+ if stripped.startswith("]"):
+ in_deps = False
+ continue
+ m = re.match(r'"([a-zA-Z0-9_-]+)', stripped)
+ if m:
+ deps.append(Dependency(
+ source="", target=m.group(1),
+ dep_type="runtime", ecosystem="pyproject",
+ ))
+ return deps
+
+ def _parse_gomod(self, content: str) -> List[Dependency]:
+ deps = []
+ for line in content.split("\n"):
+ m = re.match(r"\s+(\S+)\s+(\S+)", line)
+ if m and not line.strip().startswith("//"):
+ deps.append(Dependency(
+ source="", target=m.group(1),
+ dep_type="runtime", version=m.group(2), ecosystem="go",
+ ))
+ return deps
diff --git a/gitpilot/diagnostics_runner.py b/gitpilot/diagnostics_runner.py
new file mode 100644
index 0000000000000000000000000000000000000000..2e69bf64119969d1002c22fdefcbc414a28282a5
--- /dev/null
+++ b/gitpilot/diagnostics_runner.py
@@ -0,0 +1,129 @@
+# gitpilot/diagnostics_runner.py
+"""
+Run linters and type-checkers server-side.
+
+For web/HF Spaces where VS Code language services aren't available,
+this module detects and runs the appropriate linter, then parses
+the output into structured diagnostic entries.
+"""
+from __future__ import annotations
+
+import logging
+from pathlib import Path
+from typing import List, Optional
+
+from .terminal import TerminalExecutor, TerminalSession, CommandResult
+
+logger = logging.getLogger(__name__)
+
+# (marker file, linter name, command)
+LINTER_MARKERS = [
+ ("tsconfig.json", "tsc", "npx tsc --noEmit 2>&1"),
+ ("eslint.config.js", "eslint", "npx eslint . --format compact 2>&1"),
+ ("eslint.config.mjs", "eslint", "npx eslint . --format compact 2>&1"),
+ (".eslintrc.json", "eslint", "npx eslint . --format compact 2>&1"),
+ (".eslintrc.js", "eslint", "npx eslint . --format compact 2>&1"),
+ (".eslintrc.yml", "eslint", "npx eslint . --format compact 2>&1"),
+ ("biome.json", "biome", "npx biome check . 2>&1"),
+ ("pyproject.toml", "ruff", "ruff check . --output-format text 2>&1"),
+ ("setup.cfg", "flake8", "flake8 . 2>&1"),
+ (".flake8", "flake8", "flake8 . 2>&1"),
+ ("Cargo.toml", "cargo", "cargo check --message-format short 2>&1"),
+ ("go.mod", "go", "go vet ./... 2>&1"),
+ ("clippy.toml", "clippy", "cargo clippy 2>&1"),
+]
+
+
+async def detect_linter(workspace_path: Path) -> Optional[tuple[str, str]]:
+ """Detect linter from project files. Returns (name, command) or None."""
+ for marker, name, command in LINTER_MARKERS:
+ if (workspace_path / marker).exists():
+ logger.info("Detected linter: %s (via %s)", name, marker)
+ return name, command
+ return None
+
+
+async def run_linter(
+ workspace_path: Path,
+ executor: Optional[TerminalExecutor] = None,
+ timeout: int = 60,
+) -> Optional[CommandResult]:
+ """Detect the linter and run it. Returns CommandResult or None."""
+ detection = await detect_linter(workspace_path)
+ if not detection:
+ return None
+
+ name, command = detection
+ executor = executor or TerminalExecutor()
+ session = TerminalSession(workspace_path=workspace_path)
+
+ logger.info("Running linter %s: %s", name, command)
+ return await executor.execute(session, command, timeout=timeout)
+
+
+def parse_diagnostics(output: str) -> List[dict]:
+ """
+ Parse linter output into structured entries (best-effort).
+
+ Handles common formats:
+ - file:line:col: severity: message (gcc, tsc, eslint compact)
+ - file:line: severity: message
+ - file(line,col): error TS1234: message (tsc)
+ """
+ entries: List[dict] = []
+ for line in output.splitlines()[:200]:
+ line = line.strip()
+ if not line:
+ continue
+
+ # Try tsc format: file(line,col): error TSxxxx: message
+ if "): error " in line or "): warning " in line:
+ try:
+ paren_idx = line.index("(")
+ close_idx = line.index(")")
+ file_path = line[:paren_idx].strip()
+ pos = line[paren_idx + 1 : close_idx]
+ rest = line[close_idx + 2 :].strip()
+ parts = pos.split(",")
+ line_num = int(parts[0]) if parts else 0
+ severity = "error" if rest.startswith("error") else "warning"
+ message = rest.split(":", 1)[-1].strip() if ":" in rest else rest
+ entries.append({
+ "file": file_path,
+ "line": line_num,
+ "severity": severity,
+ "message": message,
+ })
+ continue
+ except (ValueError, IndexError):
+ pass
+
+ # Try standard format: file:line:col: severity: message
+ parts = line.split(":", 4)
+ if len(parts) >= 4:
+ try:
+ file_path = parts[0].strip()
+ line_num = int(parts[1].strip())
+ rest = ":".join(parts[2:]).strip()
+ severity = "error" if "error" in rest.lower() else "warning"
+ message = rest
+ entries.append({
+ "file": file_path,
+ "line": line_num,
+ "severity": severity,
+ "message": message,
+ })
+ continue
+ except (ValueError, IndexError):
+ pass
+
+ # Fallback: treat as info
+ if any(kw in line.lower() for kw in ("error", "warning", "fail")):
+ entries.append({
+ "file": "",
+ "line": 0,
+ "severity": "error" if "error" in line.lower() else "warning",
+ "message": line,
+ })
+
+ return entries
diff --git a/gitpilot/doctor.py b/gitpilot/doctor.py
new file mode 100644
index 0000000000000000000000000000000000000000..b9151242b339d2c11749cf535baa8cc630b1b776
--- /dev/null
+++ b/gitpilot/doctor.py
@@ -0,0 +1,370 @@
+# gitpilot/doctor.py
+"""``gitpilot doctor`` — install + environment health check.
+
+Reports a green / amber / red status for each prerequisite GitPilot needs.
+Built to halve install-time support load: a single command tells the user
+what's missing and how to fix it.
+
+The implementation is pure-stdlib + optional ``rich`` for pretty output.
+``--offline`` skips every network probe so the command stays under the
+2-second budget on a healthy machine. ``--json`` emits a machine-readable
+payload for CI use.
+
+This module is invoked through :mod:`gitpilot.cli` but works standalone::
+
+ python -m gitpilot.doctor --json
+"""
+from __future__ import annotations
+
+import dataclasses
+import json
+import os
+import platform
+import shutil
+import subprocess
+import sys
+import time
+from dataclasses import dataclass, field
+from pathlib import Path
+from typing import Any, Callable, Dict, List, Optional, Sequence
+
+
+# ----------------------------------------------------------------------
+# Status model
+# ----------------------------------------------------------------------
+
+LEVELS = ("green", "amber", "red")
+
+
+@dataclass
+class CheckResult:
+ """Outcome of a single health check."""
+
+ name: str
+ level: str # "green" | "amber" | "red"
+ summary: str
+ hint: Optional[str] = None
+ detail: Optional[str] = None
+
+ def to_dict(self) -> Dict[str, Any]:
+ return dataclasses.asdict(self)
+
+
+@dataclass
+class DoctorReport:
+ """Aggregate report for one run."""
+
+ results: List[CheckResult] = field(default_factory=list)
+ duration_ms: int = 0
+ offline: bool = False
+
+ @property
+ def worst_level(self) -> str:
+ ranking = {"green": 0, "amber": 1, "red": 2}
+ return max((r.level for r in self.results), key=lambda lvl: ranking.get(lvl, 0), default="green")
+
+ @property
+ def exit_code(self) -> int:
+ return {"green": 0, "amber": 0, "red": 1}.get(self.worst_level, 1)
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "results": [r.to_dict() for r in self.results],
+ "duration_ms": self.duration_ms,
+ "offline": self.offline,
+ "worst_level": self.worst_level,
+ "exit_code": self.exit_code,
+ }
+
+
+# ----------------------------------------------------------------------
+# Individual checks (each returns a CheckResult)
+# ----------------------------------------------------------------------
+
+def check_python() -> CheckResult:
+ major, minor = sys.version_info.major, sys.version_info.minor
+ if major == 3 and minor >= 11:
+ return CheckResult("python", "green", f"Python {major}.{minor} ({platform.python_implementation()})")
+ return CheckResult(
+ "python", "red",
+ f"Python {major}.{minor} is too old",
+ hint="GitPilot requires Python >= 3.11. Install via uv: `uv python install 3.11`.",
+ )
+
+
+def check_node() -> CheckResult:
+ path = shutil.which("node")
+ if not path:
+ return CheckResult(
+ "node", "amber",
+ "node not found on PATH",
+ hint="Optional for the frontend. Install via nvm or your package manager.",
+ )
+ try:
+ out = subprocess.run([path, "--version"], capture_output=True, text=True, timeout=2, check=False)
+ version = out.stdout.strip() or "unknown"
+ except Exception as exc: # pragma: no cover - defensive
+ return CheckResult("node", "amber", f"node failed to run: {exc}")
+ return CheckResult("node", "green", f"node {version}")
+
+
+def check_uv() -> CheckResult:
+ path = shutil.which("uv")
+ if not path:
+ return CheckResult(
+ "uv", "amber",
+ "uv not found on PATH",
+ hint="Optional but recommended. Install via `pip install uv` or the official installer.",
+ )
+ try:
+ out = subprocess.run([path, "--version"], capture_output=True, text=True, timeout=2, check=False)
+ version = out.stdout.strip() or "unknown"
+ except Exception as exc: # pragma: no cover - defensive
+ return CheckResult("uv", "amber", f"uv failed to run: {exc}")
+ return CheckResult("uv", "green", version)
+
+
+def check_workspace_files(workspace: Path) -> CheckResult:
+ workspace = workspace.resolve()
+ agents_md = workspace / "AGENTS.md"
+ modes = workspace / ".gitpilot" / "modes.yaml"
+ bits: List[str] = []
+ level = "green"
+ hint: Optional[str] = None
+ if agents_md.exists():
+ bits.append("AGENTS.md ✓")
+ else:
+ bits.append("AGENTS.md missing")
+ level = "amber"
+ hint = "Run `gitpilot init` to generate a starter AGENTS.md."
+ if modes.exists():
+ bits.append(".gitpilot/modes.yaml ✓")
+ else:
+ bits.append(".gitpilot/modes.yaml missing")
+ return CheckResult("workspace", level, ", ".join(bits), hint=hint)
+
+
+def check_modes_parses(workspace: Path) -> CheckResult:
+ path = workspace / ".gitpilot" / "modes.yaml"
+ if not path.exists():
+ return CheckResult("modes.yaml", "amber", "no modes.yaml in this workspace")
+ try:
+ from gitpilot.modes import ModeRegistry # local import to keep doctor light
+ registry = ModeRegistry()
+ count = registry.load(workspace_path=workspace)
+ return CheckResult("modes.yaml", "green", f"parsed {count} mode(s)")
+ except Exception as exc:
+ return CheckResult(
+ "modes.yaml", "red",
+ "modes.yaml did not parse",
+ hint="Open the file and check for YAML syntax errors.",
+ detail=str(exc),
+ )
+
+
+def check_sandbox_reachable(*, offline: bool) -> CheckResult:
+ from gitpilot.sandbox import ( # local import
+ BACKEND_MATRIXLAB,
+ BACKEND_OFF,
+ BACKEND_SUBPROCESS,
+ get_sandbox,
+ )
+ sb = get_sandbox()
+ backend = sb.backend
+ if backend == BACKEND_OFF:
+ return CheckResult(
+ "sandbox", "amber",
+ "sandbox disabled (BACKEND_OFF)",
+ hint="Set GITPILOT_SANDBOX=subprocess (default) or matrixlab.",
+ )
+ if backend == BACKEND_SUBPROCESS:
+ return CheckResult("sandbox", "green", "subprocess backend ready")
+ if backend == BACKEND_MATRIXLAB:
+ if offline:
+ return CheckResult("sandbox", "amber", "matrixlab backend (skipped probe — offline)")
+ import asyncio
+ import contextlib
+ try:
+ health = asyncio.run(asyncio.wait_for(sb.health(), timeout=2))
+ except Exception as exc:
+ return CheckResult(
+ "sandbox", "red",
+ "matrixlab backend not reachable",
+ hint="Start the runner or set GITPILOT_MATRIXLAB_URL.",
+ detail=str(exc),
+ )
+ finally:
+ close = getattr(sb, "aclose", None)
+ if callable(close): # pragma: no branch
+ with contextlib.suppress(Exception):
+ asyncio.run(close())
+ if health.get("ok"):
+ return CheckResult("sandbox", "green", "matrixlab runner reachable")
+ return CheckResult(
+ "sandbox", "red",
+ "matrixlab runner unhealthy",
+ detail=str(health.get("error", "")),
+ )
+ return CheckResult("sandbox", "amber", f"unknown backend: {backend}")
+
+
+def check_mcp_config(workspace: Path) -> CheckResult:
+ project = workspace / ".gitpilot" / "mcp.json"
+ user = Path.home() / ".gitpilot" / "mcp.json"
+ files = [p for p in (project, user) if p.exists()]
+ if not files:
+ return CheckResult("mcp", "amber", "no mcp.json found (project or user)")
+ try:
+ servers: List[str] = []
+ for path in files:
+ data = json.loads(path.read_text(encoding="utf-8"))
+ for entry in data.get("servers", []) if isinstance(data, dict) else []:
+ if isinstance(entry, dict) and entry.get("name"):
+ servers.append(str(entry["name"]))
+ return CheckResult("mcp", "green", f"{len(servers)} MCP server(s) configured: {', '.join(sorted(set(servers))) or '(none)'}")
+ except Exception as exc:
+ return CheckResult("mcp", "red", "mcp.json did not parse", detail=str(exc))
+
+
+_API_KEY_HINTS = {
+ "openai": "Set OPENAI_API_KEY",
+ "anthropic": "Set ANTHROPIC_API_KEY",
+ "watsonx": "Set WATSONX_API_KEY (and WATSONX_PROJECT_ID)",
+ "ollama": "Run `ollama serve` locally; no key needed",
+}
+
+_API_KEY_ENVS = {
+ "openai": "OPENAI_API_KEY",
+ "anthropic": "ANTHROPIC_API_KEY",
+ "watsonx": "WATSONX_API_KEY",
+}
+
+
+def check_model_credentials() -> CheckResult:
+ provider = (os.environ.get("GITPILOT_LLM_PROVIDER") or "").lower()
+ if not provider:
+ # Best-effort: check whether any known env var is set.
+ present = [name for name, env in _API_KEY_ENVS.items() if os.environ.get(env)]
+ if present:
+ return CheckResult("model", "green", f"credential(s) present: {', '.join(present)}")
+ return CheckResult(
+ "model", "amber",
+ "no GITPILOT_LLM_PROVIDER set and no provider API key in env",
+ hint="Set GITPILOT_LLM_PROVIDER and the matching API key, or use ollama locally.",
+ )
+ if provider == "ollama":
+ return CheckResult("model", "green", "provider=ollama (no API key needed)")
+ env = _API_KEY_ENVS.get(provider)
+ if env and os.environ.get(env):
+ return CheckResult("model", "green", f"provider={provider} ({env} set)")
+ return CheckResult(
+ "model", "red",
+ f"provider={provider} but credential is missing",
+ hint=_API_KEY_HINTS.get(provider, f"Set the API key env var for {provider}"),
+ )
+
+
+def check_frontend_bundle() -> CheckResult:
+ bundle_dir = Path(__file__).parent / "web"
+ index = bundle_dir / "index.html"
+ if not bundle_dir.exists():
+ return CheckResult(
+ "frontend", "amber",
+ "frontend bundle not packaged",
+ hint="Run `make frontend-build` to produce the static bundle.",
+ )
+ if not index.exists():
+ return CheckResult(
+ "frontend", "amber",
+ "frontend bundle present but index.html missing",
+ )
+ return CheckResult("frontend", "green", f"bundle at {bundle_dir}")
+
+
+# ----------------------------------------------------------------------
+# Orchestrator
+# ----------------------------------------------------------------------
+
+CheckFn = Callable[[], CheckResult]
+
+
+def _build_checks(workspace: Path, *, offline: bool) -> Sequence[CheckFn]:
+ return (
+ check_python,
+ check_node,
+ check_uv,
+ lambda: check_workspace_files(workspace),
+ lambda: check_modes_parses(workspace),
+ lambda: check_sandbox_reachable(offline=offline),
+ lambda: check_mcp_config(workspace),
+ check_model_credentials,
+ check_frontend_bundle,
+ )
+
+
+def run_checks(
+ workspace: Optional[Path] = None,
+ *,
+ offline: bool = False,
+) -> DoctorReport:
+ """Execute every check and return a :class:`DoctorReport`."""
+ workspace = (workspace or Path.cwd()).resolve()
+ report = DoctorReport(offline=offline)
+ start = time.monotonic()
+ for fn in _build_checks(workspace, offline=offline):
+ try:
+ report.results.append(fn())
+ except Exception as exc: # pragma: no cover - defensive
+ report.results.append(
+ CheckResult(getattr(fn, "__name__", "check"), "red", "check failed", detail=str(exc))
+ )
+ report.duration_ms = int((time.monotonic() - start) * 1000)
+ return report
+
+
+# ----------------------------------------------------------------------
+# Renderers
+# ----------------------------------------------------------------------
+
+_LEVEL_GLYPHS = {"green": "✅", "amber": "⚠️ ", "red": "❌"}
+
+
+def render_text(report: DoctorReport) -> str:
+ """Render a plain-text table. Used by both Typer and ``python -m``."""
+ width = max((len(r.name) for r in report.results), default=8)
+ lines = ["gitpilot doctor"]
+ lines.append("-" * 60)
+ for r in report.results:
+ glyph = _LEVEL_GLYPHS.get(r.level, "?")
+ lines.append(f"{glyph} {r.name.ljust(width)} {r.summary}")
+ if r.hint:
+ lines.append(f" ↳ {r.hint}")
+ lines.append("-" * 60)
+ lines.append(f"worst: {report.worst_level} duration: {report.duration_ms} ms")
+ return "\n".join(lines)
+
+
+def render_json(report: DoctorReport) -> str:
+ """Render a :class:`DoctorReport` as indented JSON for CI consumption."""
+ return json.dumps(report.to_dict(), indent=2)
+
+
+# ----------------------------------------------------------------------
+# Module-level CLI ``python -m gitpilot.doctor``
+# ----------------------------------------------------------------------
+
+def _module_main(argv: Optional[Sequence[str]] = None) -> int:
+ import argparse
+
+ parser = argparse.ArgumentParser(prog="gitpilot.doctor")
+ parser.add_argument("--workspace", type=Path, default=Path.cwd())
+ parser.add_argument("--offline", action="store_true")
+ parser.add_argument("--json", action="store_true")
+ args = parser.parse_args(argv)
+ report = run_checks(args.workspace, offline=args.offline)
+ print(render_json(report) if args.json else render_text(report))
+ return report.exit_code
+
+
+if __name__ == "__main__": # pragma: no cover - manual entry
+ raise SystemExit(_module_main())
diff --git a/gitpilot/errors.py b/gitpilot/errors.py
new file mode 100644
index 0000000000000000000000000000000000000000..1913ef48e71ed2f5d452938d3d1c4f49f25abaee
--- /dev/null
+++ b/gitpilot/errors.py
@@ -0,0 +1,205 @@
+# gitpilot/errors.py
+"""Structured error envelope — Batch P1-D.
+
+Lets every backend endpoint return a uniform error shape that the UI can
+render as a friendly block::
+
+ {
+ "error": {
+ "code": "sandbox.unreachable",
+ "message": "MatrixLab runner did not respond",
+ "hint": "Set GITPILOT_MATRIXLAB_URL or start the runner.",
+ "doc_url": "https://docs.gitpilot.dev/errors/sandbox-unreachable"
+ },
+ "trace_id": "…"
+ }
+
+The envelope is opt-in via the ``error_envelope`` feature flag and the
+:func:`wrap_errors_envelope` decorator. When the flag is off (the
+legacy default) the decorator is a passthrough — uncaught exceptions
+bubble up to FastAPI exactly as before so existing clients see no
+change.
+"""
+from __future__ import annotations
+
+import functools
+import logging
+import traceback
+import uuid
+from dataclasses import dataclass
+from typing import Any, Awaitable, Callable, Dict, Optional, TypeVar, cast
+
+from . import flags
+
+logger = logging.getLogger(__name__)
+
+FLAG_ERROR_ENVELOPE = "error_envelope"
+DEFAULT_DOC_BASE = "https://docs.gitpilot.dev/errors"
+
+F = TypeVar("F", bound=Callable[..., Awaitable[Any]])
+
+
+# ----------------------------------------------------------------------
+# Public exception type
+# ----------------------------------------------------------------------
+
+@dataclass
+class GitPilotError(Exception):
+ """Base error carrying structured fields for the envelope.
+
+ ``code`` should be a dotted, stable identifier (``sandbox.unreachable``)
+ that the UI can branch on; ``message`` is human-readable; ``hint``
+ suggests a remedy; ``doc_url`` deep-links to documentation.
+ """
+
+ code: str
+ message: str
+ hint: Optional[str] = None
+ doc_url: Optional[str] = None
+ status_code: int = 500
+
+ def __post_init__(self) -> None:
+ super().__init__(self.message)
+
+ def to_envelope(self, trace_id: Optional[str] = None) -> Dict[str, Any]:
+ return error_envelope(self, trace_id=trace_id)
+
+
+# Convenience subclasses for common categories.
+
+class ValidationError(GitPilotError):
+ """Raised when a request fails input validation (HTTP 400)."""
+
+ def __init__(self, message: str, *, hint: Optional[str] = None) -> None:
+ super().__init__(
+ code="request.invalid",
+ message=message,
+ hint=hint,
+ doc_url=f"{DEFAULT_DOC_BASE}/request-invalid",
+ status_code=400,
+ )
+
+
+class NotFoundError(GitPilotError):
+ """Raised when a requested resource is missing (HTTP 404)."""
+
+ def __init__(self, message: str, *, hint: Optional[str] = None) -> None:
+ super().__init__(
+ code="resource.not_found",
+ message=message,
+ hint=hint,
+ doc_url=f"{DEFAULT_DOC_BASE}/resource-not-found",
+ status_code=404,
+ )
+
+
+class UpstreamError(GitPilotError):
+ """Raised when an upstream provider (LLM, MCP, GitHub) returns an
+ unrecoverable error (HTTP 502)."""
+
+ def __init__(self, message: str, *, hint: Optional[str] = None, code: str = "upstream.failure") -> None:
+ super().__init__(
+ code=code,
+ message=message,
+ hint=hint,
+ doc_url=f"{DEFAULT_DOC_BASE}/upstream-failure",
+ status_code=502,
+ )
+
+
+# ----------------------------------------------------------------------
+# Envelope construction
+# ----------------------------------------------------------------------
+
+def error_envelope(
+ err: BaseException,
+ *,
+ trace_id: Optional[str] = None,
+ fallback_code: str = "internal.unexpected",
+) -> Dict[str, Any]:
+ """Render an exception as the canonical error payload."""
+ if isinstance(err, GitPilotError):
+ body: Dict[str, Any] = {
+ "code": err.code,
+ "message": err.message,
+ }
+ if err.hint:
+ body["hint"] = err.hint
+ if err.doc_url:
+ body["doc_url"] = err.doc_url
+ else:
+ body = {
+ "code": fallback_code,
+ "message": str(err) or err.__class__.__name__,
+ "hint": "Re-run with GITPILOT_DEBUG=1 for a traceback in the server log.",
+ "doc_url": f"{DEFAULT_DOC_BASE}/internal-unexpected",
+ }
+ return {
+ "error": body,
+ "trace_id": trace_id or _new_trace_id(),
+ }
+
+
+def error_envelope_response(err: BaseException, *, trace_id: Optional[str] = None) -> Any:
+ """Return a FastAPI ``JSONResponse`` carrying the envelope.
+
+ Imports the FastAPI types lazily so the module remains importable in
+ contexts where FastAPI isn't installed (CLI, tests).
+ """
+ from fastapi.responses import JSONResponse
+
+ status = err.status_code if isinstance(err, GitPilotError) else 500
+ return JSONResponse(status_code=status, content=error_envelope(err, trace_id=trace_id))
+
+
+# ----------------------------------------------------------------------
+# Endpoint decorator
+# ----------------------------------------------------------------------
+
+def wrap_errors_envelope(func: F) -> F:
+ """Decorate an async FastAPI handler to emit the envelope.
+
+ When the ``error_envelope`` flag is **off** the wrapper re-raises so
+ the legacy FastAPI behaviour (default ``{detail: …}`` body or
+ framework traceback) applies. When the flag is **on** every
+ uncaught exception is translated into the structured payload.
+
+ The decorator is a no-op for handlers that return normally.
+ """
+
+ @functools.wraps(func)
+ async def wrapper(*args: Any, **kwargs: Any) -> Any:
+ try:
+ return await func(*args, **kwargs)
+ except GitPilotError as err:
+ if flags.is_on(FLAG_ERROR_ENVELOPE):
+ trace_id = _new_trace_id()
+ logger.warning(
+ "GitPilotError code=%s trace_id=%s msg=%s",
+ err.code, trace_id, err.message,
+ )
+ return error_envelope_response(err, trace_id=trace_id)
+ raise
+ except Exception as err: # noqa: BLE001 — top-of-stack adapter
+ if flags.is_on(FLAG_ERROR_ENVELOPE):
+ trace_id = _new_trace_id()
+ logger.exception(
+ "unhandled exception in %s (trace_id=%s)", func.__name__, trace_id,
+ )
+ return error_envelope_response(err, trace_id=trace_id)
+ raise
+
+ return cast(F, wrapper)
+
+
+# ----------------------------------------------------------------------
+# Helpers
+# ----------------------------------------------------------------------
+
+def _new_trace_id() -> str:
+ return uuid.uuid4().hex[:16]
+
+
+def render_traceback_for_log(err: BaseException) -> str:
+ """Return a short traceback suitable for structured logs."""
+ return "".join(traceback.format_exception(type(err), err, err.__traceback__)).strip()
diff --git a/gitpilot/flags.py b/gitpilot/flags.py
new file mode 100644
index 0000000000000000000000000000000000000000..4a7f172e5bd66627f833a3bbc46cca340e4914d0
--- /dev/null
+++ b/gitpilot/flags.py
@@ -0,0 +1,161 @@
+# gitpilot/flags.py
+"""Feature-flag service — single source of truth for opt-in code paths.
+
+Lookup precedence (first hit wins): explicit override → ``GITPILOT_FLAGS``
+env (``name=1,other=0``) → ``/.gitpilot/flags.json`` →
+``~/.gitpilot/flags.json`` → call-site default. Lazy, cached, RLock-safe,
+zero third-party deps.
+"""
+from __future__ import annotations
+
+import json
+import logging
+import os
+import threading
+from pathlib import Path
+from typing import Any, Dict, Iterator, Mapping, Optional
+
+logger = logging.getLogger(__name__)
+
+ENV_VAR = "GITPILOT_FLAGS"
+PROJECT_FLAGS_REL = Path(".gitpilot") / "flags.json"
+USER_FLAGS_PATH = Path.home() / ".gitpilot" / "flags.json"
+
+_TRUE = {"1", "true", "yes", "on", "y", "t"}
+_FALSE = {"0", "false", "no", "off", "n", "f"}
+
+_lock = threading.RLock()
+_overrides: Dict[str, bool] = {}
+_cache: Optional[Dict[str, bool]] = None
+_workspace: Optional[Path] = None
+
+
+def _coerce(value: Any) -> Optional[bool]:
+ if isinstance(value, bool):
+ return value
+ if isinstance(value, (int, float)):
+ return bool(value)
+ if isinstance(value, str):
+ v = value.strip().lower()
+ if v in _TRUE:
+ return True
+ if v in _FALSE:
+ return False
+ return None
+
+
+def _parse_env(raw: str) -> Dict[str, bool]:
+ out: Dict[str, bool] = {}
+ for piece in raw.split(","):
+ piece = piece.strip()
+ if not piece:
+ continue
+ if "=" in piece:
+ name, _, value = piece.partition("=")
+ parsed = _coerce(value)
+ else:
+ name, parsed = piece, True
+ name = name.strip()
+ if not name or parsed is None:
+ continue
+ out[name] = parsed
+ return out
+
+
+def _load_file(path: Path) -> Dict[str, bool]:
+ if not path.exists():
+ return {}
+ try:
+ data = json.loads(path.read_text(encoding="utf-8"))
+ except Exception as exc: # pragma: no cover - logged, returns empty
+ logger.warning("could not parse %s: %s", path, exc)
+ return {}
+ if not isinstance(data, Mapping):
+ return {}
+ out: Dict[str, bool] = {}
+ for key, value in data.items():
+ parsed = _coerce(value)
+ if parsed is not None and isinstance(key, str):
+ out[key.strip()] = parsed
+ return out
+
+
+def _build_cache() -> Dict[str, bool]:
+ merged: Dict[str, bool] = {}
+ merged.update(_load_file(USER_FLAGS_PATH))
+ if _workspace is not None:
+ merged.update(_load_file(_workspace / PROJECT_FLAGS_REL))
+ env_raw = os.environ.get(ENV_VAR, "")
+ if env_raw:
+ merged.update(_parse_env(env_raw))
+ merged.update(_overrides)
+ return merged
+
+
+def _ensure_cache() -> Dict[str, bool]:
+ global _cache
+ if _cache is None:
+ _cache = _build_cache()
+ return _cache
+
+
+# --- public API --------------------------------------------------------
+
+def set_workspace(workspace: Optional[Path]) -> None:
+ """Register the active workspace so its ``.gitpilot/flags.json`` loads."""
+ global _workspace
+ with _lock:
+ _workspace = workspace.resolve() if workspace is not None else None
+ _invalidate()
+
+
+def is_on(name: str, default: bool = False) -> bool:
+ """Return whether feature flag *name* is enabled."""
+ with _lock:
+ return _ensure_cache().get(name, default)
+
+
+def enabled_flags() -> Dict[str, bool]:
+ """Return a snapshot of the currently merged flag map."""
+ with _lock:
+ return dict(_ensure_cache())
+
+
+def set_override(name: str, value: bool) -> None:
+ """Set a runtime override that beats every other source (tests, REPL)."""
+ with _lock:
+ _overrides[name] = bool(value)
+ _invalidate()
+
+
+def clear_override(name: str) -> None:
+ """Remove a previously registered override."""
+ with _lock:
+ _overrides.pop(name, None)
+ _invalidate()
+
+
+def clear_all_overrides() -> None:
+ """Drop every runtime override. Mostly useful for test teardown."""
+ with _lock:
+ _overrides.clear()
+ _invalidate()
+
+
+def reload() -> Dict[str, bool]:
+ """Reread environment + files. Returns the new merged map."""
+ with _lock:
+ _invalidate()
+ return dict(_ensure_cache())
+
+
+def iter_known(defaults: Mapping[str, bool]) -> Iterator[tuple[str, bool, bool]]:
+ """Yield ``(name, current, default)`` for every flag in *defaults*."""
+ snapshot = enabled_flags()
+ for name, default in defaults.items():
+ yield name, snapshot.get(name, default), default
+
+
+def _invalidate() -> None:
+ global _cache
+ _cache = None
diff --git a/gitpilot/github_api.py b/gitpilot/github_api.py
new file mode 100644
index 0000000000000000000000000000000000000000..d4111e73852a719107984c408032fb66fcfcda01
--- /dev/null
+++ b/gitpilot/github_api.py
@@ -0,0 +1,582 @@
+# gitpilot/github_api.py
+from __future__ import annotations
+
+import contextvars
+import logging
+import os
+import re
+from base64 import b64decode, b64encode
+from contextlib import contextmanager
+from typing import Any
+
+import httpx
+from fastapi import HTTPException
+
+from gitpilot.models import GithubStatusSummary
+
+GITHUB_API_BASE = "https://api.github.com"
+
+# Context variable to store the GitHub token for the current request/execution scope
+_request_token: contextvars.ContextVar[str | None] = contextvars.ContextVar(
+ "request_token", default=None
+)
+
+# Git SHA (40-hex) validator
+_SHA_RE = re.compile(r"^[0-9a-fA-F]{40}$")
+
+# add near _request_token
+_request_ref: contextvars.ContextVar[str | None] = contextvars.ContextVar(
+ "request_ref", default=None
+)
+
+
+@contextmanager
+def execution_context(token: str | None, ref: str | None = None):
+ token_var = _request_token.set(token)
+ ref_var = _request_ref.set(ref)
+ try:
+ yield
+ finally:
+ _request_token.reset(token_var)
+ _request_ref.reset(ref_var)
+
+
+def _github_ref(provided_ref: str | None = None) -> str | None:
+ if provided_ref:
+ return provided_ref
+ return _request_ref.get()
+
+
+def _github_token(provided_token: str | None = None) -> str:
+ """
+ Get GitHub token from:
+ 1. Explicit argument
+ 2. Request Context (set via execution_context)
+ 3. Environment variables (Fallback)
+ """
+ if provided_token:
+ return provided_token
+
+ ctx_token = _request_token.get()
+ if ctx_token:
+ return ctx_token
+
+ token = os.getenv("GITPILOT_GITHUB_TOKEN") or os.getenv("GITHUB_TOKEN")
+ if not token:
+ raise HTTPException(
+ status_code=401,
+ detail=(
+ "GitHub authentication required. "
+ "Please log in via the UI or set GITPILOT_GITHUB_TOKEN in your environment."
+ ),
+ )
+ return token
+
+
+async def github_request(
+ path: str,
+ *,
+ method: str = "GET",
+ json: dict[str, Any] | None = None,
+ params: dict[str, Any] | None = None,
+ token: str | None = None,
+) -> Any:
+ """
+ Core GitHub request helper.
+ Raises HTTPException with GitHub's error message on failures.
+ """
+ github_token = _github_token(token)
+
+ headers = {
+ "Authorization": f"Bearer {github_token}",
+ "Accept": "application/vnd.github+json",
+ "User-Agent": "gitpilot",
+ }
+
+ timeout = httpx.Timeout(connect=15.0, read=45.0, write=30.0, pool=15.0)
+
+ async with httpx.AsyncClient(
+ base_url=GITHUB_API_BASE, headers=headers, timeout=timeout
+ ) as client:
+ resp = await client.request(method, path, json=json, params=params)
+
+ if resp.status_code >= 400:
+ try:
+ data = resp.json()
+ msg = data.get("message") or resp.text
+ except Exception:
+ msg = resp.text
+
+ if resp.status_code == 401:
+ msg = "GitHub Token Expired or Invalid. Please refresh your login."
+
+ raise HTTPException(status_code=resp.status_code, detail=msg)
+
+ if resp.status_code == 204:
+ return None
+
+ # Some GitHub endpoints return 200 with empty body
+ if not resp.content:
+ return None
+
+ return resp.json()
+
+
+# -----------------------------------------------------------------------------
+# Repos listing (legacy + pagination/search)
+# -----------------------------------------------------------------------------
+
+async def list_user_repos(
+ query: str | None = None, token: str | None = None
+) -> list[dict[str, Any]]:
+ """
+ Legacy function - fetches first 100 repos.
+ (Retro-compatible with older GitPilot versions.)
+ """
+ params = {
+ "per_page": 100,
+ "affiliation": "owner,collaborator,organization_member",
+ "sort": "updated",
+ "direction": "desc",
+ }
+ data = await github_request("/user/repos", params=params, token=token)
+
+ # FIXED: Added default_branch mapping
+ repos = [
+ {
+ "id": r["id"],
+ "name": r["name"],
+ "full_name": r["full_name"],
+ "private": r["private"],
+ "owner": r["owner"]["login"],
+ "default_branch": r.get("default_branch", "main"), # Critical Fix
+ }
+ for r in data
+ ]
+
+ if query:
+ q = query.lower()
+ repos = [r for r in repos if q in r["full_name"].lower()]
+ return repos
+
+
+async def list_user_repos_paginated(
+ page: int = 1,
+ per_page: int = 100,
+ token: str | None = None,
+) -> dict[str, Any]:
+ """
+ Fetch user repositories with pagination support.
+ Returns:
+ {
+ "repositories": [...],
+ "page": int,
+ "per_page": int,
+ "has_more": bool,
+ }
+ """
+ per_page = min(per_page, 100)
+ params = {
+ "page": page,
+ "per_page": per_page,
+ "affiliation": "owner,collaborator,organization_member",
+ "sort": "updated",
+ "direction": "desc",
+ }
+
+ github_token = _github_token(token)
+ headers = {
+ "Authorization": f"Bearer {github_token}",
+ "Accept": "application/vnd.github+json",
+ "User-Agent": "gitpilot",
+ }
+
+ timeout = httpx.Timeout(connect=10.0, read=30.0, write=30.0, pool=10.0)
+
+ async with httpx.AsyncClient(
+ base_url=GITHUB_API_BASE, headers=headers, timeout=timeout
+ ) as client:
+ resp = await client.get("/user/repos", params=params)
+
+ if resp.status_code >= 400:
+ raise HTTPException(status_code=resp.status_code, detail=resp.text)
+
+ data = resp.json()
+
+ # FIXED: Added default_branch mapping
+ repos = [
+ {
+ "id": r["id"],
+ "name": r["name"],
+ "full_name": r["full_name"],
+ "private": r["private"],
+ "owner": r["owner"]["login"],
+ "default_branch": r.get("default_branch", "main"), # Critical Fix
+ }
+ for r in data
+ ]
+
+ link_header = resp.headers.get("Link", "") or ""
+ has_more = 'rel="next"' in link_header
+
+ return {
+ "repositories": repos,
+ "page": page,
+ "per_page": per_page,
+ "has_more": has_more,
+ }
+
+
+async def search_user_repos(
+ query: str,
+ page: int = 1,
+ per_page: int = 100,
+ token: str | None = None,
+) -> dict[str, Any]:
+ all_repos: list[dict[str, Any]] = []
+ fetch_page = 1
+ max_pages = 15
+
+ try:
+ while fetch_page <= max_pages:
+ result = await list_user_repos_paginated(
+ page=fetch_page,
+ per_page=100,
+ token=token,
+ )
+ all_repos.extend(result["repositories"])
+
+ if not result["has_more"]:
+ break
+
+ fetch_page += 1
+
+ except httpx.TimeoutException:
+ return {
+ "repositories": [],
+ "page": page,
+ "per_page": per_page,
+ "total_count": 0,
+ "has_more": False,
+ }
+
+ q = query.lower()
+ filtered = [
+ r for r in all_repos
+ if q in r["name"].lower() or q in r["full_name"].lower()
+ ]
+
+ total_count = len(filtered)
+ start = (page - 1) * per_page
+ end = start + per_page
+
+ return {
+ "repositories": filtered[start:end],
+ "page": page,
+ "per_page": per_page,
+ "total_count": total_count,
+ "has_more": end < total_count,
+ }
+
+# -----------------------------------------------------------------------------
+# Repo + Ref resolution helpers (fixes "No commit found for SHA: main")
+# -----------------------------------------------------------------------------
+
+async def get_repo(owner: str, repo: str, token: str | None = None) -> dict[str, Any]:
+ """
+ Get repository information including default_branch.
+ """
+ return await github_request(f"/repos/{owner}/{repo}", token=token)
+
+
+async def _resolve_head_ref(owner: str, repo: str, token: str | None) -> str:
+ repo_data = await get_repo(owner, repo, token=token)
+ return repo_data.get("default_branch", "main")
+
+
+async def _resolve_ref_to_commit_sha(
+ owner: str,
+ repo: str,
+ ref: str | None,
+ token: str | None,
+) -> str:
+ """
+ Resolve a ref (branch/tag/commit SHA/"HEAD"/None) to a commit SHA.
+ """
+ if not ref or ref == "HEAD":
+ ref = await _resolve_head_ref(owner, repo, token)
+
+ if _SHA_RE.match(ref):
+ return ref.lower()
+
+ # Branch ref
+ try:
+ data = await github_request(
+ f"/repos/{owner}/{repo}/git/ref/heads/{ref}",
+ token=token,
+ )
+ return data["object"]["sha"]
+ except HTTPException:
+ pass
+
+ # Tag ref (lightweight or annotated)
+ try:
+ data = await github_request(
+ f"/repos/{owner}/{repo}/git/ref/tags/{ref}",
+ token=token,
+ )
+ obj = data.get("object") or {}
+ sha = obj.get("sha")
+ obj_type = obj.get("type")
+
+ if not sha:
+ raise HTTPException(status_code=404, detail=f"Tag ref '{ref}' not found.")
+
+ # Annotated tag -> dereference to commit SHA
+ if obj_type == "tag":
+ tag_obj = await github_request(
+ f"/repos/{owner}/{repo}/git/tags/{sha}",
+ token=token,
+ )
+ target = tag_obj.get("object") or {}
+ target_sha = target.get("sha")
+ if not target_sha:
+ raise HTTPException(
+ status_code=404, detail=f"Annotated tag '{ref}' has no target sha."
+ )
+ return target_sha
+
+ # Lightweight tag points directly to commit SHA
+ return sha
+ except HTTPException:
+ pass
+
+ # Fallback: commits endpoint resolves branch/tag names to a commit
+ try:
+ commit = await github_request(
+ f"/repos/{owner}/{repo}/commits/{ref}",
+ token=token,
+ )
+ sha = commit.get("sha")
+ if not sha:
+ raise HTTPException(status_code=404, detail=f"Ref not found: {ref}")
+ return sha
+ except HTTPException as e:
+ raise HTTPException(status_code=404, detail=f"Ref not found: {ref}") from e
+
+
+async def _commit_sha_to_tree_sha(
+ owner: str,
+ repo: str,
+ commit_sha: str,
+ token: str | None,
+) -> str:
+ """
+ Convert commit SHA -> tree SHA using /git/commits/{sha}.
+ """
+ commit = await github_request(
+ f"/repos/{owner}/{repo}/git/commits/{commit_sha}",
+ token=token,
+ )
+ tree = commit.get("tree") or {}
+ tree_sha = tree.get("sha")
+ if not tree_sha:
+ raise HTTPException(status_code=500, detail="Failed to resolve tree SHA from commit.")
+ return tree_sha
+
+
+# -----------------------------------------------------------------------------
+# Branch creation
+# -----------------------------------------------------------------------------
+
+async def create_branch(
+ owner: str,
+ repo: str,
+ new_branch: str,
+ from_ref: str = "HEAD",
+ token: str | None = None,
+) -> str:
+ """
+ Create a new branch from a ref (default: HEAD = default branch).
+ """
+ base_commit_sha = await _resolve_ref_to_commit_sha(owner, repo, from_ref, token)
+
+ body = {"ref": f"refs/heads/{new_branch}", "sha": base_commit_sha}
+ new_ref = await github_request(
+ f"/repos/{owner}/{repo}/git/refs",
+ method="POST",
+ json=body,
+ token=token,
+ )
+ return new_ref["ref"]
+
+
+# -----------------------------------------------------------------------------
+# Tree + File APIs (branch-aware)
+# -----------------------------------------------------------------------------
+
+async def get_repo_tree(
+ owner: str,
+ repo: str,
+ token: str | None = None,
+ ref: str = "HEAD",
+):
+ # ✅ FIX: Only use context ref if caller did NOT provide a specific ref
+ # i.e. only when ref is missing/empty or explicitly "HEAD"
+ ctx_ref = _github_ref(None)
+ if (not ref or ref == "HEAD") and ctx_ref:
+ ref = ctx_ref
+
+ commit_sha = await _resolve_ref_to_commit_sha(owner, repo, ref, token)
+ tree_sha = await _commit_sha_to_tree_sha(owner, repo, commit_sha, token)
+
+ tree_data = await github_request(
+ f"/repos/{owner}/{repo}/git/trees/{tree_sha}",
+ params={"recursive": 1},
+ token=token,
+ )
+
+ return [
+ {"path": item["path"], "type": item["type"]}
+ for item in tree_data.get("tree", [])
+ if item.get("type") == "blob"
+ ]
+
+
+async def get_file(
+ owner: str,
+ repo: str,
+ path: str,
+ token: str | None = None,
+ ref: str | None = None,
+) -> str:
+ # ✅ FIX: Only use context ref if ref is missing or "HEAD"
+ ctx_ref = _github_ref(None)
+ if (not ref or ref == "HEAD") and ctx_ref:
+ ref = ctx_ref
+
+ params = {"ref": ref} if ref else None
+ data = await github_request(
+ f"/repos/{owner}/{repo}/contents/{path}",
+ params=params,
+ token=token,
+ )
+ content_b64 = data.get("content") or ""
+ return b64decode(content_b64.encode("utf-8")).decode("utf-8", errors="replace")
+
+
+async def put_file(
+ owner: str,
+ repo: str,
+ path: str,
+ content: str,
+ message: str,
+ token: str | None = None,
+ branch: str | None = None,
+) -> dict[str, Any]:
+ """
+ Create or update a file in the repository on a specific branch.
+ (Retro-compatible signature with older GitPilot versions.)
+ """
+ sha: str | None = None
+ try:
+ params = {"ref": branch} if branch else None
+ existing = await github_request(
+ f"/repos/{owner}/{repo}/contents/{path}",
+ params=params,
+ token=token,
+ )
+ sha = existing.get("sha")
+ except HTTPException:
+ sha = None
+
+ body: dict[str, Any] = {
+ "message": message,
+ "content": b64encode(content.encode("utf-8")).decode("utf-8"),
+ }
+ if sha:
+ body["sha"] = sha
+ if branch:
+ body["branch"] = branch
+
+ result = await github_request(
+ f"/repos/{owner}/{repo}/contents/{path}",
+ method="PUT",
+ json=body,
+ token=token,
+ )
+ commit = (result or {}).get("commit", {}) if isinstance(result, dict) else {}
+ return {
+ "path": path,
+ "commit_sha": commit.get("sha", ""),
+ "commit_url": commit.get("html_url"),
+ }
+
+
+async def delete_file(
+ owner: str,
+ repo: str,
+ path: str,
+ message: str,
+ token: str | None = None,
+ branch: str | None = None,
+) -> dict[str, Any]:
+ """
+ Delete a file from the repository on a specific branch.
+ (Retro-compatible signature with older GitPilot versions.)
+ """
+ params = {"ref": branch} if branch else None
+ existing = await github_request(
+ f"/repos/{owner}/{repo}/contents/{path}",
+ params=params,
+ token=token,
+ )
+ sha = existing.get("sha")
+ if not sha:
+ raise HTTPException(status_code=404, detail=f"File {path} not found or has no SHA")
+
+ body: dict[str, Any] = {"message": message, "sha": sha}
+ if branch:
+ body["branch"] = branch
+
+ result = await github_request(
+ f"/repos/{owner}/{repo}/contents/{path}",
+ method="DELETE",
+ json=body,
+ token=token,
+ )
+ commit = (result or {}).get("commit", {}) if isinstance(result, dict) else {}
+ return {
+ "path": path,
+ "commit_sha": commit.get("sha", ""),
+ "commit_url": commit.get("html_url"),
+ }
+
+
+async def get_github_status_summary() -> GithubStatusSummary:
+ """Return GitHub connection status for the redesigned UI."""
+ token = (
+ os.environ.get("GITPILOT_GITHUB_TOKEN")
+ or os.environ.get("GITHUB_TOKEN")
+ or None
+ )
+ token_configured = bool(token)
+
+ summary = GithubStatusSummary(
+ connected=False,
+ token_configured=token_configured,
+ )
+
+ if not token_configured:
+ return summary
+
+ # Try to get authenticated user
+ try:
+ data = await github_request("/user", token=token)
+ if data and "login" in data:
+ summary.connected = True
+ summary.username = data["login"]
+ except Exception:
+ logging.debug("GitHub connection check failed", exc_info=True)
+
+ return summary
diff --git a/gitpilot/github_app.py b/gitpilot/github_app.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b134a33785683420ba97ae31d0d1404bc25ee7c
--- /dev/null
+++ b/gitpilot/github_app.py
@@ -0,0 +1,232 @@
+"""
+GitHub App Installation Management - PROPER FIX
+
+This checks which repositories ACTUALLY have the GitHub App installed
+by querying the user's app installations.
+"""
+from __future__ import annotations
+
+import logging
+import os
+from typing import Optional, Dict, Any, Set
+
+import httpx
+
+logger = logging.getLogger("gitpilot.github_app")
+
+# Cache for installed repositories
+_installed_repos_cache: Dict[str, Set[str]] = {}
+_cache_timestamp: Dict[str, float] = {}
+CACHE_TTL_SECONDS = 300 # 5 minutes
+
+
+class GitHubAppConfig:
+ """Configuration for GitHub App."""
+
+ def __init__(self):
+ self.app_id = os.getenv("GITHUB_APP_ID", "2313985")
+ self.client_id = os.getenv("GITHUB_CLIENT_ID", "Iv23litmRp80Z6wmlyRn")
+ self.app_slug = os.getenv("GITHUB_APP_SLUG", "gitpilota")
+
+ @property
+ def is_configured(self) -> bool:
+ """Check if GitHub App is configured."""
+ return bool(self.app_id and self.client_id)
+
+
+def get_app_config() -> GitHubAppConfig:
+ """Get GitHub App configuration."""
+ return GitHubAppConfig()
+
+
+async def get_installed_repositories(user_token: str) -> Set[str]:
+ """
+ Get list of repositories where the GitHub App is installed.
+
+ Uses /user/installations endpoint to get all installations,
+ then fetches repositories for each installation.
+
+ Returns:
+ Set of repository full names (e.g., "owner/repo")
+ """
+ cache_key = "installed_repos"
+
+ # Check cache
+ import time
+ if cache_key in _installed_repos_cache:
+ if time.time() - _cache_timestamp.get(cache_key, 0) < CACHE_TTL_SECONDS:
+ logger.debug(f"Using cached installed repositories ({len(_installed_repos_cache[cache_key])} repos)")
+ return _installed_repos_cache[cache_key]
+
+ installed_repos: Set[str] = set()
+
+ try:
+ config = get_app_config()
+
+ async with httpx.AsyncClient(timeout=10.0) as client:
+ # Get user's app installations
+ installations_response = await client.get(
+ "https://api.github.com/user/installations",
+ headers={
+ "Authorization": f"Bearer {user_token}",
+ "Accept": "application/vnd.github+json",
+ "User-Agent": "gitpilot",
+ },
+ )
+
+ if installations_response.status_code != 200:
+ logger.warning(f"Failed to get installations: {installations_response.status_code}")
+ return installed_repos
+
+ installations_data = installations_response.json()
+ installations = installations_data.get("installations", [])
+
+ logger.info(f"Found {len(installations)} app installations")
+
+ # For each installation, get the repositories
+ for installation in installations:
+ installation_id = installation.get("id")
+
+ # Get repositories for this installation
+ repos_response = await client.get(
+ f"https://api.github.com/user/installations/{installation_id}/repositories",
+ headers={
+ "Authorization": f"Bearer {user_token}",
+ "Accept": "application/vnd.github+json",
+ "User-Agent": "gitpilot",
+ },
+ )
+
+ if repos_response.status_code == 200:
+ repos_data = repos_response.json()
+ repositories = repos_data.get("repositories", [])
+
+ for repo in repositories:
+ full_name = repo.get("full_name") # e.g., "owner/repo"
+ if full_name:
+ installed_repos.add(full_name)
+ logger.debug(f" ✓ App installed on: {full_name}")
+
+ logger.info(f"GitHub App is installed on {len(installed_repos)} repositories")
+
+ # Cache the results
+ _installed_repos_cache[cache_key] = installed_repos
+ _cache_timestamp[cache_key] = time.time()
+
+ return installed_repos
+
+ except Exception as e:
+ logger.error(f"Error getting installed repositories: {e}")
+ return installed_repos
+
+
+async def check_repo_write_access(
+ owner: str,
+ repo: str,
+ user_token: Optional[str] = None
+) -> Dict[str, Any]:
+ """
+ Check if user has write access to a repository.
+
+ PROPER FIX: Checks BOTH:
+ 1. User has push permissions
+ 2. GitHub App is ACTUALLY installed on this specific repository
+
+ Args:
+ owner: Repository owner
+ repo: Repository name
+ user_token: User's OAuth token
+
+ Returns:
+ Dict with 'can_write', 'app_installed', 'auth_type', 'reason'
+ """
+ result = {
+ "can_write": False,
+ "app_installed": False,
+ "auth_type": "none",
+ "reason": "No token provided",
+ }
+
+ if not user_token:
+ return result
+
+ full_repo_name = f"{owner}/{repo}"
+
+ try:
+ # Step 1: Check user's push permissions
+ async with httpx.AsyncClient(timeout=5.0) as client:
+ response = await client.get(
+ f"https://api.github.com/repos/{owner}/{repo}",
+ headers={
+ "Authorization": f"Bearer {user_token}",
+ "Accept": "application/vnd.github+json",
+ "User-Agent": "gitpilot",
+ },
+ )
+
+ if response.status_code != 200:
+ result["reason"] = f"Cannot access repository (status: {response.status_code})"
+ logger.warning(f"❌ {full_repo_name}: {result['reason']}")
+ return result
+
+ repo_data = response.json()
+ permissions = repo_data.get("permissions", {})
+ has_push = permissions.get("push", False)
+
+ # Step 2: Check if GitHub App is installed on this repo
+ installed_repos = await get_installed_repositories(user_token)
+ app_installed = full_repo_name in installed_repos
+
+ # Step 3: Determine write access
+ if app_installed:
+ # App IS installed - agent can write!
+ result["can_write"] = True
+ result["app_installed"] = True
+ result["auth_type"] = "github_app"
+ result["reason"] = "GitHub App installed with write access"
+ logger.info(f"✅ {full_repo_name}: App installed (agent can write)")
+ elif has_push:
+ # User has push but App NOT installed - agent operations will FAIL
+ result["can_write"] = False
+ result["app_installed"] = False
+ result["auth_type"] = "user_only"
+ result["reason"] = "User has push access but GitHub App NOT installed (install app for agent operations)"
+ logger.warning(f"⚠️ {full_repo_name}: User can push but app NOT installed - agent will get 403 errors")
+ else:
+ # User has no push and App NOT installed
+ result["can_write"] = False
+ result["app_installed"] = False
+ result["auth_type"] = "read_only"
+ result["reason"] = "No push access and GitHub App not installed"
+ logger.info(f"ℹ️ {full_repo_name}: Read-only access")
+
+ except Exception as e:
+ result["reason"] = f"Error checking access: {str(e)}"
+ logger.error(f"❌ Error checking {full_repo_name}: {e}")
+
+ return result
+
+
+def clear_cache():
+ """Clear all caches."""
+ _installed_repos_cache.clear()
+ _cache_timestamp.clear()
+ logger.info("Cleared installation cache")
+
+
+async def check_installation_for_repo(
+ owner: str,
+ repo: str,
+ user_token: str
+) -> Optional[Dict[str, Any]]:
+ """
+ Legacy function - kept for compatibility.
+ """
+ result = await check_repo_write_access(owner, repo, user_token)
+ if result["app_installed"]:
+ return {
+ "installed": True,
+ "owner": owner,
+ "repo": repo,
+ }
+ return None
\ No newline at end of file
diff --git a/gitpilot/github_issues.py b/gitpilot/github_issues.py
new file mode 100644
index 0000000000000000000000000000000000000000..c78b758cef584c1b468a31f800ab17ed5417a025
--- /dev/null
+++ b/gitpilot/github_issues.py
@@ -0,0 +1,224 @@
+# gitpilot/github_issues.py
+"""GitHub Issues API wrapper.
+
+Provides async functions for creating, reading, updating, and managing
+GitHub issues including labels, assignees, milestones, and comments.
+"""
+from __future__ import annotations
+
+from typing import Any, Dict, List, Optional
+
+from .github_api import github_request
+
+
+# ---------------------------------------------------------------------------
+# Issue CRUD
+# ---------------------------------------------------------------------------
+
+async def list_issues(
+ owner: str,
+ repo: str,
+ *,
+ state: str = "open",
+ labels: Optional[str] = None,
+ assignee: Optional[str] = None,
+ milestone: Optional[str] = None,
+ sort: str = "created",
+ direction: str = "desc",
+ per_page: int = 30,
+ page: int = 1,
+ token: Optional[str] = None,
+) -> List[Dict[str, Any]]:
+ """List issues for a repository with optional filters."""
+ params: Dict[str, Any] = {
+ "state": state,
+ "sort": sort,
+ "direction": direction,
+ "per_page": min(per_page, 100),
+ "page": page,
+ }
+ if labels:
+ params["labels"] = labels
+ if assignee:
+ params["assignee"] = assignee
+ if milestone:
+ params["milestone"] = milestone
+
+ data = await github_request(
+ f"/repos/{owner}/{repo}/issues",
+ params=params,
+ token=token,
+ )
+ # GitHub's issues endpoint also returns PRs; filter them out
+ return [i for i in (data or []) if "pull_request" not in i]
+
+
+async def get_issue(
+ owner: str,
+ repo: str,
+ issue_number: int,
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Get a single issue by number."""
+ return await github_request(
+ f"/repos/{owner}/{repo}/issues/{issue_number}",
+ token=token,
+ )
+
+
+async def create_issue(
+ owner: str,
+ repo: str,
+ title: str,
+ *,
+ body: Optional[str] = None,
+ labels: Optional[List[str]] = None,
+ assignees: Optional[List[str]] = None,
+ milestone: Optional[int] = None,
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Create a new issue."""
+ payload: Dict[str, Any] = {"title": title}
+ if body is not None:
+ payload["body"] = body
+ if labels:
+ payload["labels"] = labels
+ if assignees:
+ payload["assignees"] = assignees
+ if milestone is not None:
+ payload["milestone"] = milestone
+
+ return await github_request(
+ f"/repos/{owner}/{repo}/issues",
+ method="POST",
+ json=payload,
+ token=token,
+ )
+
+
+async def update_issue(
+ owner: str,
+ repo: str,
+ issue_number: int,
+ *,
+ title: Optional[str] = None,
+ body: Optional[str] = None,
+ state: Optional[str] = None,
+ labels: Optional[List[str]] = None,
+ assignees: Optional[List[str]] = None,
+ milestone: Optional[int] = None,
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Update an existing issue (title, body, state, labels, assignees, milestone)."""
+ payload: Dict[str, Any] = {}
+ if title is not None:
+ payload["title"] = title
+ if body is not None:
+ payload["body"] = body
+ if state is not None:
+ payload["state"] = state
+ if labels is not None:
+ payload["labels"] = labels
+ if assignees is not None:
+ payload["assignees"] = assignees
+ if milestone is not None:
+ payload["milestone"] = milestone
+
+ if not payload:
+ return await get_issue(owner, repo, issue_number, token=token)
+
+ return await github_request(
+ f"/repos/{owner}/{repo}/issues/{issue_number}",
+ method="PATCH",
+ json=payload,
+ token=token,
+ )
+
+
+# ---------------------------------------------------------------------------
+# Comments
+# ---------------------------------------------------------------------------
+
+async def list_issue_comments(
+ owner: str,
+ repo: str,
+ issue_number: int,
+ *,
+ per_page: int = 30,
+ page: int = 1,
+ token: Optional[str] = None,
+) -> List[Dict[str, Any]]:
+ """List comments on an issue."""
+ return await github_request(
+ f"/repos/{owner}/{repo}/issues/{issue_number}/comments",
+ params={"per_page": min(per_page, 100), "page": page},
+ token=token,
+ ) or []
+
+
+async def add_issue_comment(
+ owner: str,
+ repo: str,
+ issue_number: int,
+ body: str,
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Add a comment to an issue."""
+ return await github_request(
+ f"/repos/{owner}/{repo}/issues/{issue_number}/comments",
+ method="POST",
+ json={"body": body},
+ token=token,
+ )
+
+
+# ---------------------------------------------------------------------------
+# Labels
+# ---------------------------------------------------------------------------
+
+async def add_labels(
+ owner: str,
+ repo: str,
+ issue_number: int,
+ labels: List[str],
+ token: Optional[str] = None,
+) -> List[Dict[str, Any]]:
+ """Add labels to an issue."""
+ return await github_request(
+ f"/repos/{owner}/{repo}/issues/{issue_number}/labels",
+ method="POST",
+ json={"labels": labels},
+ token=token,
+ ) or []
+
+
+async def remove_label(
+ owner: str,
+ repo: str,
+ issue_number: int,
+ label: str,
+ token: Optional[str] = None,
+) -> None:
+ """Remove a single label from an issue."""
+ await github_request(
+ f"/repos/{owner}/{repo}/issues/{issue_number}/labels/{label}",
+ method="DELETE",
+ token=token,
+ )
+
+
+# ---------------------------------------------------------------------------
+# Assignees
+# ---------------------------------------------------------------------------
+
+async def set_assignees(
+ owner: str,
+ repo: str,
+ issue_number: int,
+ assignees: List[str],
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Replace assignees on an issue."""
+ return await update_issue(
+ owner, repo, issue_number, assignees=assignees, token=token,
+ )
diff --git a/gitpilot/github_oauth.py b/gitpilot/github_oauth.py
new file mode 100644
index 0000000000000000000000000000000000000000..a87718115fc04704250004ac850f27bf7944d009
--- /dev/null
+++ b/gitpilot/github_oauth.py
@@ -0,0 +1,285 @@
+# gitpilot/github_oauth.py
+
+"""GitHub OAuth 2.0 authentication flow implementation (Web + Device Flow)."""
+from __future__ import annotations
+
+import logging
+import os
+import secrets
+import time
+from typing import Optional, Dict, Any
+from urllib.parse import urlencode
+
+import httpx
+from pydantic import BaseModel
+
+# Configure logging
+logger = logging.getLogger("gitpilot.auth")
+
+class OAuthConfig(BaseModel):
+ """GitHub OAuth App configuration."""
+ client_id: str
+ # Secret is now optional to allow Device Flow
+ client_secret: Optional[str] = None
+
+class OAuthState(BaseModel):
+ """OAuth state management."""
+ state: str
+ code_verifier: str
+ timestamp: float
+
+class GitHubUser(BaseModel):
+ """GitHub user information."""
+ login: str
+ id: int
+ avatar_url: str
+ name: Optional[str] = None
+ email: Optional[str] = None
+ bio: Optional[str] = None
+ html_url: Optional[str] = None
+
+class AuthSession(BaseModel):
+ """Authenticated user session."""
+ access_token: str
+ token_type: str = "bearer"
+ scope: str = ""
+ user: GitHubUser
+
+# In-memory OAuth state storage (For Web Flow)
+_oauth_states: dict[str, OAuthState] = {}
+
+
+def get_oauth_config() -> OAuthConfig:
+ """
+ Load OAuth configuration from environment variables.
+ """
+ # Use your App's Client ID.
+ # NOTE: Ensure "Device Flow" is enabled in your GitHub App settings.
+ client_id = os.getenv("GITHUB_CLIENT_ID", "Iv23litmRp80Z6wmlyRn")
+ client_secret = os.getenv("GITHUB_CLIENT_SECRET", "")
+
+ return OAuthConfig(
+ client_id=client_id,
+ # Convert empty string to None
+ client_secret=client_secret if client_secret else None
+ )
+
+# ============================================================================
+# WEB FLOW (Standard OAuth2 - Requires Client Secret)
+# ============================================================================
+
+def generate_authorization_url() -> tuple[str, str]:
+ """
+ Generate GitHub OAuth authorization URL with PKCE (Web Flow).
+ Returns: (authorization_url, state)
+ """
+ config = get_oauth_config()
+
+ # 1. State for CSRF protection
+ state = secrets.token_urlsafe(32)
+ code_verifier = secrets.token_urlsafe(32)
+
+ # 2. Store state
+ _oauth_states[state] = OAuthState(
+ state=state,
+ code_verifier=code_verifier,
+ timestamp=time.time(),
+ )
+ _cleanup_old_states()
+
+ # 3. Build URL
+ params = {
+ "client_id": config.client_id,
+ "scope": "repo user:email",
+ "state": state,
+ "allow_signup": "true",
+ }
+
+ auth_url = f"https://github.com/login/oauth/authorize?{urlencode(params)}"
+ return auth_url, state
+
+
+async def exchange_code_for_token(code: str, state: str) -> AuthSession:
+ """
+ Exchange authorization code for access token (Web Flow).
+ Requires GITHUB_CLIENT_SECRET to be set.
+ """
+ config = get_oauth_config()
+
+ if not config.client_secret:
+ raise ValueError("Web Flow requires GITHUB_CLIENT_SECRET. Please use Device Flow or configure the secret.")
+
+ # 1. Validate State
+ if state not in _oauth_states:
+ logger.error(f"State mismatch or expiration. Received: {state}")
+ raise ValueError("Invalid OAuth state. The session may have expired. Please try again.")
+
+ oauth_state = _oauth_states.pop(state)
+ if time.time() - oauth_state.timestamp > 600:
+ raise ValueError("OAuth interaction timed out.")
+
+ # 2. Exchange Code
+ async with httpx.AsyncClient(timeout=10.0) as client:
+ try:
+ token_response = await client.post(
+ "https://github.com/login/oauth/access_token",
+ data={
+ "client_id": config.client_id,
+ "client_secret": config.client_secret,
+ "code": code,
+ },
+ headers={"Accept": "application/json"},
+ )
+ token_response.raise_for_status()
+ token_data = token_response.json()
+ except httpx.HTTPError as e:
+ logger.error(f"HTTP Error contacting GitHub: {e}")
+ raise ValueError("Failed to contact GitHub authentication server.")
+
+ if "error" in token_data:
+ raise ValueError(f"GitHub refused the connection: {token_data.get('error_description')}")
+
+ access_token = token_data.get("access_token")
+ if not access_token:
+ raise ValueError("No access_token returned from GitHub.")
+
+ # 3. Fetch User
+ user = await _fetch_user_profile(client, access_token)
+
+ return AuthSession(
+ access_token=access_token,
+ token_type=token_data.get("token_type", "bearer"),
+ scope=token_data.get("scope", ""),
+ user=user,
+ )
+
+# ============================================================================
+# DEVICE FLOW (No Secret Required)
+# ============================================================================
+
+async def initiate_device_flow() -> Dict[str, Any]:
+ """
+ Step 1: Request a device code from GitHub.
+ """
+ config = get_oauth_config()
+
+ async with httpx.AsyncClient(timeout=10.0) as client:
+ response = await client.post(
+ "https://github.com/login/device/code",
+ data={
+ "client_id": config.client_id,
+ "scope": "repo user:email",
+ },
+ headers={"Accept": "application/json"}
+ )
+ response.raise_for_status()
+ return response.json()
+
+
+async def poll_device_token(device_code: str) -> Optional[AuthSession]:
+ """
+ Step 2: Exchange device code for token (Polling).
+
+ Returns:
+ AuthSession: If authentication is successful.
+ None: If status is 'authorization_pending' or 'slow_down'.
+
+ Raises:
+ ValueError: If the code expired, access denied, or other errors.
+ """
+ config = get_oauth_config()
+
+ async with httpx.AsyncClient(timeout=10.0) as client:
+ response = await client.post(
+ "https://github.com/login/oauth/access_token",
+ data={
+ "client_id": config.client_id,
+ "device_code": device_code,
+ "grant_type": "urn:ietf:params:oauth:grant-type:device_code",
+ },
+ headers={"Accept": "application/json"}
+ )
+ data = response.json()
+
+ # Handle GitHub Device Flow Errors
+ if "error" in data:
+ error_code = data["error"]
+ # These are expected during polling
+ if error_code in ["authorization_pending", "slow_down"]:
+ return None
+
+ # These are actual failures
+ desc = data.get("error_description", error_code)
+ if error_code == "expired_token":
+ raise ValueError("The device code has expired. Please try again.")
+ if error_code == "access_denied":
+ raise ValueError("Access denied by user.")
+
+ raise ValueError(f"GitHub Auth Error: {desc}")
+
+ access_token = data.get("access_token")
+ if not access_token:
+ return None
+
+ # Success: Fetch User details
+ user = await _fetch_user_profile(client, access_token)
+
+ return AuthSession(
+ access_token=access_token,
+ token_type=data.get("token_type", "bearer"),
+ scope=data.get("scope", ""),
+ user=user
+ )
+
+# ============================================================================
+# SHARED HELPERS
+# ============================================================================
+
+async def _fetch_user_profile(client: httpx.AsyncClient, token: str) -> GitHubUser:
+ """Internal helper to fetch user profile with an existing client."""
+ response = await client.get(
+ "https://api.github.com/user",
+ headers={
+ "Authorization": f"Bearer {token}",
+ "Accept": "application/json",
+ },
+ )
+ response.raise_for_status()
+ u = response.json()
+
+ return GitHubUser(
+ login=u["login"],
+ id=u["id"],
+ avatar_url=u["avatar_url"],
+ name=u.get("name"),
+ email=u.get("email"),
+ bio=u.get("bio"),
+ html_url=u.get("html_url")
+ )
+
+
+async def validate_token(access_token: str) -> Optional[GitHubUser]:
+ """
+ Validate GitHub access token and return user info.
+ Useful for checking if a stored session is still valid.
+ """
+ if not access_token:
+ return None
+
+ try:
+ async with httpx.AsyncClient(timeout=5.0) as client:
+ return await _fetch_user_profile(client, access_token)
+ except Exception as e:
+ logger.debug(f"Token validation failed: {e}")
+ return None
+
+
+def _cleanup_old_states():
+ """Remove OAuth states older than 10 minutes to prevent memory leaks."""
+ current_time = time.time()
+ expired_states = [
+ state for state, data in _oauth_states.items()
+ if current_time - data.timestamp > 600
+ ]
+ for state in expired_states:
+ _oauth_states.pop(state, None)
\ No newline at end of file
diff --git a/gitpilot/github_pulls.py b/gitpilot/github_pulls.py
new file mode 100644
index 0000000000000000000000000000000000000000..e47aea832776f9f13fb028711af948672db39302
--- /dev/null
+++ b/gitpilot/github_pulls.py
@@ -0,0 +1,230 @@
+# gitpilot/github_pulls.py
+"""GitHub Pull Requests API wrapper.
+
+Provides async functions for creating, listing, reviewing, and merging
+pull requests.
+"""
+from __future__ import annotations
+
+from typing import Any, Dict, List, Optional
+
+from .github_api import github_request
+
+
+# ---------------------------------------------------------------------------
+# PR CRUD
+# ---------------------------------------------------------------------------
+
+async def list_pull_requests(
+ owner: str,
+ repo: str,
+ *,
+ state: str = "open",
+ sort: str = "created",
+ direction: str = "desc",
+ head: Optional[str] = None,
+ base: Optional[str] = None,
+ per_page: int = 30,
+ page: int = 1,
+ token: Optional[str] = None,
+) -> List[Dict[str, Any]]:
+ """List pull requests with optional filters."""
+ params: Dict[str, Any] = {
+ "state": state,
+ "sort": sort,
+ "direction": direction,
+ "per_page": min(per_page, 100),
+ "page": page,
+ }
+ if head:
+ params["head"] = head
+ if base:
+ params["base"] = base
+
+ return await github_request(
+ f"/repos/{owner}/{repo}/pulls",
+ params=params,
+ token=token,
+ ) or []
+
+
+async def get_pull_request(
+ owner: str,
+ repo: str,
+ pull_number: int,
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Get a single pull request."""
+ return await github_request(
+ f"/repos/{owner}/{repo}/pulls/{pull_number}",
+ token=token,
+ )
+
+
+async def create_pull_request(
+ owner: str,
+ repo: str,
+ *,
+ title: str,
+ head: str,
+ base: str,
+ body: Optional[str] = None,
+ draft: bool = False,
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Create a new pull request."""
+ payload: Dict[str, Any] = {
+ "title": title,
+ "head": head,
+ "base": base,
+ "draft": draft,
+ }
+ if body is not None:
+ payload["body"] = body
+
+ return await github_request(
+ f"/repos/{owner}/{repo}/pulls",
+ method="POST",
+ json=payload,
+ token=token,
+ )
+
+
+async def update_pull_request(
+ owner: str,
+ repo: str,
+ pull_number: int,
+ *,
+ title: Optional[str] = None,
+ body: Optional[str] = None,
+ state: Optional[str] = None,
+ base: Optional[str] = None,
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Update an existing pull request."""
+ payload: Dict[str, Any] = {}
+ if title is not None:
+ payload["title"] = title
+ if body is not None:
+ payload["body"] = body
+ if state is not None:
+ payload["state"] = state
+ if base is not None:
+ payload["base"] = base
+
+ if not payload:
+ return await get_pull_request(owner, repo, pull_number, token=token)
+
+ return await github_request(
+ f"/repos/{owner}/{repo}/pulls/{pull_number}",
+ method="PATCH",
+ json=payload,
+ token=token,
+ )
+
+
+# ---------------------------------------------------------------------------
+# Merge
+# ---------------------------------------------------------------------------
+
+async def merge_pull_request(
+ owner: str,
+ repo: str,
+ pull_number: int,
+ *,
+ commit_title: Optional[str] = None,
+ commit_message: Optional[str] = None,
+ merge_method: str = "merge",
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Merge a pull request.
+
+ merge_method: one of 'merge', 'squash', 'rebase'.
+ """
+ payload: Dict[str, Any] = {"merge_method": merge_method}
+ if commit_title:
+ payload["commit_title"] = commit_title
+ if commit_message:
+ payload["commit_message"] = commit_message
+
+ return await github_request(
+ f"/repos/{owner}/{repo}/pulls/{pull_number}/merge",
+ method="PUT",
+ json=payload,
+ token=token,
+ )
+
+
+# ---------------------------------------------------------------------------
+# PR Files & Diff
+# ---------------------------------------------------------------------------
+
+async def list_pr_files(
+ owner: str,
+ repo: str,
+ pull_number: int,
+ *,
+ per_page: int = 100,
+ page: int = 1,
+ token: Optional[str] = None,
+) -> List[Dict[str, Any]]:
+ """List files changed in a pull request."""
+ return await github_request(
+ f"/repos/{owner}/{repo}/pulls/{pull_number}/files",
+ params={"per_page": min(per_page, 100), "page": page},
+ token=token,
+ ) or []
+
+
+# ---------------------------------------------------------------------------
+# PR Reviews & Comments
+# ---------------------------------------------------------------------------
+
+async def list_pr_reviews(
+ owner: str,
+ repo: str,
+ pull_number: int,
+ token: Optional[str] = None,
+) -> List[Dict[str, Any]]:
+ """List reviews on a pull request."""
+ return await github_request(
+ f"/repos/{owner}/{repo}/pulls/{pull_number}/reviews",
+ token=token,
+ ) or []
+
+
+async def create_pr_review(
+ owner: str,
+ repo: str,
+ pull_number: int,
+ *,
+ body: str,
+ event: str = "COMMENT",
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Create a review on a pull request.
+
+ event: one of 'APPROVE', 'REQUEST_CHANGES', 'COMMENT'.
+ """
+ return await github_request(
+ f"/repos/{owner}/{repo}/pulls/{pull_number}/reviews",
+ method="POST",
+ json={"body": body, "event": event},
+ token=token,
+ )
+
+
+async def add_pr_comment(
+ owner: str,
+ repo: str,
+ pull_number: int,
+ body: str,
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Add a general comment to a pull request (via issues API)."""
+ return await github_request(
+ f"/repos/{owner}/{repo}/issues/{pull_number}/comments",
+ method="POST",
+ json={"body": body},
+ token=token,
+ )
diff --git a/gitpilot/github_search.py b/gitpilot/github_search.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e3f9de16320b4127bc9d13f22f7d2987b03bbb4
--- /dev/null
+++ b/gitpilot/github_search.py
@@ -0,0 +1,157 @@
+# gitpilot/github_search.py
+"""GitHub Search API wrapper.
+
+Provides async functions for searching code, repositories, issues, and users
+via GitHub's Search API.
+"""
+from __future__ import annotations
+
+from typing import Any, Dict, List, Optional
+
+from .github_api import github_request
+
+
+async def search_code(
+ query: str,
+ *,
+ owner: Optional[str] = None,
+ repo: Optional[str] = None,
+ language: Optional[str] = None,
+ path: Optional[str] = None,
+ per_page: int = 30,
+ page: int = 1,
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Search for code across GitHub repositories.
+
+ Builds a qualified search query string from the parameters.
+ Returns: {total_count, incomplete_results, items[...]}.
+ """
+ parts = [query]
+ if owner and repo:
+ parts.append(f"repo:{owner}/{repo}")
+ elif owner:
+ parts.append(f"user:{owner}")
+ if language:
+ parts.append(f"language:{language}")
+ if path:
+ parts.append(f"path:{path}")
+
+ q = " ".join(parts)
+
+ result = await github_request(
+ "/search/code",
+ params={"q": q, "per_page": min(per_page, 100), "page": page},
+ token=token,
+ )
+ return _normalise_search_result(result)
+
+
+async def search_issues(
+ query: str,
+ *,
+ owner: Optional[str] = None,
+ repo: Optional[str] = None,
+ state: Optional[str] = None,
+ label: Optional[str] = None,
+ is_pr: Optional[bool] = None,
+ per_page: int = 30,
+ page: int = 1,
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Search issues and pull requests."""
+ parts = [query]
+ if owner and repo:
+ parts.append(f"repo:{owner}/{repo}")
+ elif owner:
+ parts.append(f"user:{owner}")
+ if state:
+ parts.append(f"state:{state}")
+ if label:
+ parts.append(f"label:{label}")
+ if is_pr is True:
+ parts.append("type:pr")
+ elif is_pr is False:
+ parts.append("type:issue")
+
+ q = " ".join(parts)
+
+ result = await github_request(
+ "/search/issues",
+ params={"q": q, "per_page": min(per_page, 100), "page": page},
+ token=token,
+ )
+ return _normalise_search_result(result)
+
+
+async def search_repositories(
+ query: str,
+ *,
+ language: Optional[str] = None,
+ sort: Optional[str] = None,
+ order: str = "desc",
+ per_page: int = 30,
+ page: int = 1,
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Search for repositories."""
+ parts = [query]
+ if language:
+ parts.append(f"language:{language}")
+
+ q = " ".join(parts)
+
+ params: Dict[str, Any] = {
+ "q": q,
+ "per_page": min(per_page, 100),
+ "page": page,
+ "order": order,
+ }
+ if sort:
+ params["sort"] = sort
+
+ result = await github_request("/search/repositories", params=params, token=token)
+ return _normalise_search_result(result)
+
+
+async def search_users(
+ query: str,
+ *,
+ type_filter: Optional[str] = None,
+ location: Optional[str] = None,
+ language: Optional[str] = None,
+ per_page: int = 30,
+ page: int = 1,
+ token: Optional[str] = None,
+) -> Dict[str, Any]:
+ """Search for users and organizations.
+
+ type_filter: 'user' or 'org' to narrow results.
+ """
+ parts = [query]
+ if type_filter:
+ parts.append(f"type:{type_filter}")
+ if location:
+ parts.append(f"location:{location}")
+ if language:
+ parts.append(f"language:{language}")
+
+ q = " ".join(parts)
+
+ result = await github_request(
+ "/search/users",
+ params={"q": q, "per_page": min(per_page, 100), "page": page},
+ token=token,
+ )
+ return _normalise_search_result(result)
+
+
+def _normalise_search_result(result: Any) -> Dict[str, Any]:
+ """Ensure consistent shape even if GitHub returns None."""
+ if not isinstance(result, dict):
+ return {"total_count": 0, "incomplete_results": False, "items": []}
+ return {
+ "total_count": result.get("total_count", 0),
+ "incomplete_results": result.get("incomplete_results", False),
+ "items": result.get("items", []),
+ }
diff --git a/gitpilot/headless.py b/gitpilot/headless.py
new file mode 100644
index 0000000000000000000000000000000000000000..72f4be6e531c4a8cb6f370f29976725d21588454
--- /dev/null
+++ b/gitpilot/headless.py
@@ -0,0 +1,96 @@
+# gitpilot/headless.py
+"""Headless execution mode for CI/CD pipelines.
+
+Runs GitPilot non-interactively from the command line, GitHub Actions,
+or GitLab CI, returning structured JSON output.
+
+Usage examples::
+
+ gitpilot run --headless -r owner/repo -m "fix the login bug"
+ gitpilot run --headless -r owner/repo --from-pr 42
+ echo "add tests for auth module" | gitpilot run --headless -r owner/repo
+"""
+from __future__ import annotations
+
+import json
+import logging
+from dataclasses import dataclass, field
+from typing import Any, Dict, Optional
+
+from .agent_tools import set_repo_context
+from .agentic import dispatch_request
+
+logger = logging.getLogger(__name__)
+
+
+@dataclass
+class HeadlessResult:
+ """Result of a headless execution."""
+
+ success: bool
+ output: str
+ session_id: Optional[str] = None
+ pr_url: Optional[str] = None
+ plan: Optional[Dict[str, Any]] = None
+ error: Optional[str] = None
+
+ def to_json(self) -> str:
+ return json.dumps(
+ {
+ "success": self.success,
+ "output": self.output,
+ "session_id": self.session_id,
+ "pr_url": self.pr_url,
+ "error": self.error,
+ },
+ indent=2,
+ )
+
+
+async def run_headless(
+ repo_full_name: str,
+ message: str,
+ token: str,
+ branch: Optional[str] = None,
+ auto_pr: bool = False,
+ from_pr: Optional[int] = None,
+) -> HeadlessResult:
+ """Execute a request non-interactively."""
+ owner, repo = repo_full_name.split("/", 1)
+ set_repo_context(owner, repo, token=token, branch=branch or "main")
+
+ # If from_pr, fetch PR context
+ if from_pr:
+ try:
+ from .github_pulls import get_pull_request
+
+ pr = await get_pull_request(owner, repo, from_pr, token=token)
+ message = (
+ f"PR #{from_pr}: {pr.get('title', '')}\n"
+ f"{pr.get('body', '')}\n\n"
+ f"User request: {message}"
+ )
+ except Exception as e:
+ logger.warning("Could not fetch PR #%s: %s", from_pr, e)
+
+ try:
+ result = await dispatch_request(
+ user_request=message,
+ repo_full_name=repo_full_name,
+ token=token,
+ branch_name=branch,
+ )
+
+ output = result.get("result", "") if isinstance(result, dict) else str(result)
+
+ return HeadlessResult(
+ success=True,
+ output=output,
+ )
+ except Exception as e:
+ logger.exception("Headless execution failed")
+ return HeadlessResult(
+ success=False,
+ output="",
+ error=str(e),
+ )
diff --git a/gitpilot/hf_space_tools.py b/gitpilot/hf_space_tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..c9d1dc2f39e70d5f5d63a439558a1e4f649cd471
--- /dev/null
+++ b/gitpilot/hf_space_tools.py
@@ -0,0 +1,407 @@
+"""HuggingFace Space management tools for GitPilot.
+
+Provides CrewAI-compatible tools for:
+- Cloning HF Spaces
+- Analyzing Space health (SDK, deps, dead patterns)
+- Generating fixes via OllaBridge LLM
+- Pushing fixes to HF repos
+- Managing ZeroGPU hardware allocation
+
+Designed to work with GitPilot's multi-agent architecture
+and OllaBridge Cloud as the LLM backend.
+"""
+from __future__ import annotations
+
+import json
+import logging
+import os
+import re
+import subprocess
+import tempfile
+from pathlib import Path
+from typing import Any
+
+logger = logging.getLogger(__name__)
+
+# Dead/deprecated patterns to scan for
+DEAD_PATTERNS: list[tuple[str, str]] = [
+ (r'st\.secrets\[.*BACKEND_SERVER.*\]', 'Dead backend server dependency'),
+ (r'api-inference\.huggingface\.co', 'Deprecated HF Inference API endpoint'),
+ (r'from\s+dalle_mini', 'Deprecated dalle-mini imports'),
+ (r'from\s+min_dalle', 'Deprecated min-dalle imports'),
+ (r'from\s+transformers\.file_utils', 'Removed transformers.file_utils'),
+ (r'jax\.experimental\.PartitionSpec', 'Moved JAX PartitionSpec API'),
+ (r'gr\.inputs\.', 'Deprecated Gradio inputs API'),
+ (r'gr\.outputs\.', 'Deprecated Gradio outputs API'),
+]
+
+
+def clone_hf_space(space_id: str, token: str | None = None) -> dict[str, Any]:
+ """Clone a HuggingFace Space repository to a temp directory.
+
+ Args:
+ space_id: Full Space ID (e.g. 'user/space-name').
+ token: Optional HF token for private repos.
+
+ Returns:
+ Dict with 'path' (str), 'success' (bool), 'error' (str|None).
+ """
+ tmpdir = tempfile.mkdtemp(prefix="gitpilot_hf_")
+ name = space_id.split("/")[-1]
+ repo_dir = os.path.join(tmpdir, name)
+
+ clone_url = f"https://huggingface.co/spaces/{space_id}"
+ if token:
+ clone_url = f"https://user:{token}@huggingface.co/spaces/{space_id}"
+
+ try:
+ result = subprocess.run(
+ ["git", "clone", "--depth=1", clone_url, repo_dir],
+ capture_output=True, text=True, timeout=120,
+ )
+ if result.returncode != 0:
+ return {"path": "", "success": False, "error": result.stderr.strip()}
+ return {"path": repo_dir, "success": True, "error": None}
+ except subprocess.TimeoutExpired:
+ return {"path": "", "success": False, "error": "Clone timed out (120s)"}
+ except Exception as exc:
+ return {"path": "", "success": False, "error": str(exc)}
+
+
+def analyze_hf_space(repo_dir: str) -> dict[str, Any]:
+ """Analyze a cloned HuggingFace Space for issues.
+
+ Returns a diagnosis dict with:
+ sdk, app_file, issues, dead_patterns, needs_gpu,
+ needs_rebuild, severity, recommendations.
+ """
+ path = Path(repo_dir)
+ diag: dict[str, Any] = {
+ "sdk": "unknown",
+ "app_file": "app.py",
+ "issues": [],
+ "dead_patterns": [],
+ "needs_gpu": False,
+ "needs_rebuild": False,
+ "severity": "info",
+ "recommendations": [],
+ "files": [],
+ }
+
+ # Parse README front matter
+ readme = path / "README.md"
+ if readme.exists():
+ text = readme.read_text(errors="replace")
+ sdk_match = re.search(r'^sdk:\s*(\S+)', text, re.MULTILINE)
+ app_match = re.search(r'^app_file:\s*(\S+)', text, re.MULTILINE)
+ if sdk_match:
+ diag["sdk"] = sdk_match.group(1)
+ if app_match:
+ diag["app_file"] = app_match.group(1)
+ else:
+ diag["issues"].append("Missing README.md")
+
+ # Check app_file exists
+ app_path = path / diag["app_file"]
+ if not app_path.exists():
+ diag["issues"].append(f"app_file '{diag['app_file']}' does not exist")
+ diag["severity"] = "critical"
+ diag["needs_rebuild"] = True
+
+ # Check requirements.txt
+ req = path / "requirements.txt"
+ if not req.exists():
+ diag["issues"].append("Missing requirements.txt")
+ elif not req.read_text(errors="replace").strip():
+ diag["issues"].append("Empty requirements.txt")
+
+ # Scan for dead patterns
+ for py_file in path.rglob("*.py"):
+ try:
+ content = py_file.read_text(errors="replace")
+ except OSError:
+ continue
+ for pattern, desc in DEAD_PATTERNS:
+ if re.search(pattern, content):
+ rel = str(py_file.relative_to(path))
+ diag["dead_patterns"].append(f"{rel}: {desc}")
+ diag["issues"].append(f"Dead pattern in {rel}: {desc}")
+ diag["severity"] = "critical"
+ diag["needs_rebuild"] = True
+
+ # Check GPU needs
+ gpu_indicators = [
+ "torch", "diffusers", "transformers", "accelerate",
+ "spaces.GPU", "@spaces.GPU", "cuda", ".to(\"cuda\")",
+ ]
+ for py_file in path.rglob("*.py"):
+ try:
+ content = py_file.read_text(errors="replace")
+ except OSError:
+ continue
+ for indicator in gpu_indicators:
+ if indicator in content:
+ diag["needs_gpu"] = True
+ break
+ if diag["needs_gpu"]:
+ break
+
+ # File listing
+ for p in sorted(path.rglob("*")):
+ if p.is_file() and ".git" not in p.parts:
+ diag["files"].append(str(p.relative_to(path)))
+
+ # Build recommendations
+ if diag["needs_rebuild"]:
+ diag["recommendations"].append("Rebuild app.py with modern dependencies")
+ if diag["sdk"] == "streamlit":
+ diag["recommendations"].append("Consider migrating to Gradio SDK")
+ if diag["dead_patterns"]:
+ diag["recommendations"].append("Remove deprecated API calls")
+ if diag["needs_gpu"]:
+ diag["recommendations"].append("Request ZeroGPU (zero-a10g) hardware")
+
+ return diag
+
+
+def generate_space_fix(
+ space_id: str,
+ diagnosis: dict[str, Any],
+ app_content: str = "",
+ ollabridge_url: str | None = None,
+ ollabridge_model: str = "qwen2.5:1.5b",
+ ollabridge_key: str | None = None,
+) -> dict[str, Any]:
+ """Generate a fix for a broken HF Space.
+
+ If ollabridge_url is provided, uses LLM for intelligent fix.
+ Otherwise falls back to template-based fix.
+
+ Returns dict with 'files' (dict of filename->content), 'explanation' (str).
+ """
+ # Try LLM-powered fix via OllaBridge
+ if ollabridge_url:
+ try:
+ import httpx
+ prompt = _build_repair_prompt(space_id, diagnosis, app_content)
+ payload = {
+ "model": ollabridge_model,
+ "messages": [
+ {"role": "system", "content": "You are an expert HuggingFace Spaces developer. Output valid JSON."},
+ {"role": "user", "content": prompt},
+ ],
+ "temperature": 0.3,
+ "max_tokens": 4096,
+ }
+ headers = {"Content-Type": "application/json"}
+ if ollabridge_key:
+ headers["Authorization"] = f"Bearer {ollabridge_key}"
+
+ resp = httpx.post(
+ f"{ollabridge_url.rstrip('/')}/v1/chat/completions",
+ json=payload, headers=headers, timeout=120.0,
+ )
+ if resp.status_code == 200:
+ content = resp.json()["choices"][0]["message"]["content"]
+ fix = _parse_llm_fix(content)
+ if fix:
+ return fix
+ except Exception as exc:
+ logger.warning("OllaBridge fix generation failed: %s", exc)
+
+ # Template fallback
+ return _generate_template_fix(space_id, diagnosis)
+
+
+def push_space_fix(
+ repo_dir: str,
+ fix: dict[str, Any],
+ commit_message: str = "fix: auto-repair by GitPilot + RepoGuardian",
+) -> dict[str, Any]:
+ """Apply fix files and push to the Space repo.
+
+ Args:
+ repo_dir: Path to cloned Space repo.
+ fix: Fix dict with 'files' key.
+ commit_message: Git commit message.
+
+ Returns:
+ Dict with 'success' (bool), 'changed_files' (list), 'error' (str|None).
+ """
+ path = Path(repo_dir)
+ changed = []
+
+ # Write fix files
+ for filename, content in fix.get("files", {}).items():
+ filepath = path / filename
+ filepath.parent.mkdir(parents=True, exist_ok=True)
+ filepath.write_text(content)
+ changed.append(filename)
+
+ if not changed:
+ return {"success": False, "changed_files": [], "error": "No files to write"}
+
+ # Git add, commit, push
+ cmds = [
+ ["git", "add", "-A"],
+ ["git", "commit", "-m", commit_message],
+ ["git", "push", "origin", "main"],
+ ]
+ for cmd in cmds:
+ result = subprocess.run(cmd, cwd=repo_dir, capture_output=True, text=True, timeout=60)
+ if result.returncode != 0:
+ return {
+ "success": False,
+ "changed_files": changed,
+ "error": f"Command '{' '.join(cmd)}' failed: {result.stderr.strip()}",
+ }
+
+ return {"success": True, "changed_files": changed, "error": None}
+
+
+def manage_space_hardware(
+ space_id: str,
+ token: str,
+ hardware: str = "zero-a10g",
+ auto_free: bool = True,
+) -> dict[str, Any]:
+ """Request hardware for a HuggingFace Space.
+
+ If ZeroGPU slots are full and auto_free is True,
+ automatically downgrades a paused Space to free a slot.
+
+ Returns dict with 'success', 'hardware', 'freed_slot', 'error'.
+ """
+ try:
+ from huggingface_hub import HfApi
+ api = HfApi(token=token)
+
+ # Try direct request
+ try:
+ api.request_space_hardware(space_id, hardware)
+ return {"success": True, "hardware": hardware, "freed_slot": None, "error": None}
+ except Exception as exc:
+ if "limited to" not in str(exc).lower():
+ return {"success": False, "hardware": None, "freed_slot": None, "error": str(exc)}
+
+ if not auto_free:
+ return {"success": False, "hardware": None, "freed_slot": None, "error": "Slots full, auto_free disabled"}
+
+ # Find and downgrade a paused Space
+ namespace = space_id.split("/")[0]
+ spaces = list(api.list_spaces(author=namespace))
+ for s in spaces:
+ try:
+ info = api.space_info(s.id)
+ if not info.runtime:
+ continue
+ raw_hw = info.runtime.raw.get("hardware", {})
+ req_hw = raw_hw.get("requested", "")
+ stage = info.runtime.stage
+ if "zero" in str(req_hw).lower() and stage in ("PAUSED", "SLEEPING") and s.id != space_id:
+ api.request_space_hardware(s.id, "cpu-basic")
+ # Retry the original request
+ api.request_space_hardware(space_id, hardware)
+ return {
+ "success": True,
+ "hardware": hardware,
+ "freed_slot": s.id,
+ "error": None,
+ }
+ except Exception:
+ continue
+
+ return {"success": False, "hardware": None, "freed_slot": None, "error": "No paused Spaces to free"}
+
+ except ImportError:
+ return {"success": False, "hardware": None, "freed_slot": None, "error": "huggingface_hub not installed"}
+
+
+def get_space_runtime_info(space_id: str, token: str | None = None) -> dict[str, Any]:
+ """Fetch runtime info for a HuggingFace Space.
+
+ Returns dict with sdk, stage, hardware, domain, etc.
+ """
+ try:
+ from huggingface_hub import HfApi
+ api = HfApi(token=token)
+ info = api.space_info(space_id)
+ result: dict[str, Any] = {
+ "space_id": space_id,
+ "sdk": info.sdk,
+ "success": True,
+ }
+ if info.runtime:
+ result["stage"] = info.runtime.stage
+ hw = info.runtime.raw.get("hardware", {})
+ result["current_hardware"] = hw.get("current")
+ result["requested_hardware"] = hw.get("requested")
+ domains = info.runtime.raw.get("domains", [])
+ if domains:
+ result["domain"] = domains[0].get("domain")
+ return result
+ except Exception as exc:
+ return {"space_id": space_id, "success": False, "error": str(exc)}
+
+
+# ---- Internal helpers ----
+
+def _build_repair_prompt(space_id: str, diagnosis: dict[str, Any], app_content: str) -> str:
+ return f"""A HuggingFace Space is broken and needs repair.
+
+## Space: {space_id}
+- SDK: {diagnosis.get('sdk', 'unknown')}
+- app_file: {diagnosis.get('app_file', 'app.py')}
+
+## Issues
+{chr(10).join('- ' + i for i in diagnosis.get('issues', []))}
+
+## Dead Patterns
+{chr(10).join('- ' + p for p in diagnosis.get('dead_patterns', []))}
+
+## Current app.py (first 150 lines)
+{app_content[:5000]}
+
+Generate a complete fix as JSON:
+{{
+ "files": {{
+ "app.py": "",
+ "requirements.txt": "",
+ "README.md": ""
+ }},
+ "explanation": ""
+}}"""
+
+
+def _parse_llm_fix(response: str) -> dict[str, Any] | None:
+ try:
+ return json.loads(response)
+ except json.JSONDecodeError:
+ pass
+ match = re.search(r'```(?:json)?\s*\n(.+?)\n```', response, re.DOTALL)
+ if match:
+ try:
+ return json.loads(match.group(1))
+ except json.JSONDecodeError:
+ pass
+ return None
+
+
+def _generate_template_fix(space_id: str, diagnosis: dict[str, Any]) -> dict[str, Any]:
+ name = space_id.split("/")[-1]
+ title = name.replace("-", " ").replace("_", " ").title()
+ needs_gpu = diagnosis.get("needs_gpu", False)
+
+ if needs_gpu:
+ app = f'''"""\n{title} - Auto-repaired by GitPilot + RepoGuardian\n"""\nimport gradio as gr\nimport numpy as np\n\ntry:\n import spaces\n GPU = True\nexcept ImportError:\n GPU = False\n\ndef process(prompt: str, progress=gr.Progress(track_tqdm=True)):\n if not prompt.strip():\n raise gr.Error("Please enter a prompt.")\n return f"Output for: {{prompt}}"\n\nif GPU:\n process = spaces.GPU(process)\n\nwith gr.Blocks(theme=gr.themes.Soft(), title="{title}") as demo:\n gr.Markdown("# {title}")\n with gr.Row():\n inp = gr.Textbox(label="Prompt", lines=3)\n out = gr.Textbox(label="Output", lines=5)\n gr.Button("Generate", variant="primary").click(process, [inp], [out])\n\nif __name__ == "__main__":\n demo.launch()\n'''
+ reqs = "gradio>=4.0.0\ntorch>=2.0.0\nnumpy>=1.24.0\n"
+ else:
+ app = f'''"""\n{title} - Auto-repaired by GitPilot + RepoGuardian\n"""\nimport gradio as gr\n\ndef process(text: str):\n if not text.strip():\n raise gr.Error("Please enter text.")\n return f"Processed: {{text}}"\n\nwith gr.Blocks(theme=gr.themes.Soft(), title="{title}") as demo:\n gr.Markdown("# {title}")\n with gr.Row():\n inp = gr.Textbox(label="Input", lines=3)\n out = gr.Textbox(label="Output", lines=3)\n gr.Button("Process", variant="primary").click(process, [inp], [out])\n\nif __name__ == "__main__":\n demo.launch()\n'''
+ reqs = "gradio>=4.0.0\n"
+
+ readme = f"""---\ntitle: {title}\nemoji: \U0001f680\ncolorFrom: blue\ncolorTo: purple\nsdk: gradio\nsdk_version: 5.23.0\napp_file: app.py\npinned: false\nlicense: apache-2.0\n---\n\n# {title}\n\nAuto-repaired by [GitPilot](https://github.com/ruslanmv/gitpilot) + [RepoGuardian](https://github.com/ruslanmv/RepoGuardian).\n"""
+
+ return {
+ "files": {"app.py": app, "requirements.txt": reqs, "README.md": readme},
+ "explanation": "Template fix: replaced broken app with working Gradio placeholder",
+ }
diff --git a/gitpilot/hooks.py b/gitpilot/hooks.py
new file mode 100644
index 0000000000000000000000000000000000000000..18027fbeea9e0dc54a3d1794b6071de4d6f225fc
--- /dev/null
+++ b/gitpilot/hooks.py
@@ -0,0 +1,195 @@
+# gitpilot/hooks.py
+"""Event hook system for workflow automation.
+
+Allows users to register shell commands or Python callables that fire
+on specific lifecycle events. Hooks are defined in .gitpilot/hooks.json
+or programmatically via the API.
+
+Events
+------
+- session_start Session begins
+- session_end Session ends
+- pre_tool_use Before a tool runs (blocking hooks can cancel)
+- post_tool_use After a tool completes
+- pre_edit Before file edit (blocking hooks can cancel)
+- post_edit After file edit
+- pre_commit Before git commit (blocking hooks can cancel)
+- post_commit After git commit
+- pre_push Before git push (blocking hooks can cancel)
+- user_message When the user sends a message
+"""
+from __future__ import annotations
+
+import asyncio
+import json
+import logging
+import os
+from dataclasses import dataclass, field
+from enum import Enum
+from pathlib import Path
+from typing import Any, Callable, Dict, List, Optional
+
+logger = logging.getLogger(__name__)
+
+
+class HookEvent(str, Enum):
+ SESSION_START = "session_start"
+ SESSION_END = "session_end"
+ PRE_TOOL_USE = "pre_tool_use"
+ POST_TOOL_USE = "post_tool_use"
+ PRE_EDIT = "pre_edit"
+ POST_EDIT = "post_edit"
+ PRE_COMMIT = "pre_commit"
+ POST_COMMIT = "post_commit"
+ PRE_PUSH = "pre_push"
+ USER_MESSAGE = "user_message"
+
+
+@dataclass
+class HookDefinition:
+ event: HookEvent
+ name: str
+ command: Optional[str] = None
+ handler: Optional[Callable] = None
+ blocking: bool = False
+ timeout: int = 30
+
+
+@dataclass
+class HookResult:
+ hook_name: str
+ event: HookEvent
+ success: bool
+ output: str = ""
+ blocked: bool = False
+
+
+class HookManager:
+ """Register and fire lifecycle hooks."""
+
+ def __init__(self):
+ self._hooks: Dict[HookEvent, List[HookDefinition]] = {
+ e: [] for e in HookEvent
+ }
+
+ def register(self, hook: HookDefinition):
+ self._hooks[hook.event].append(hook)
+ logger.info("Registered hook '%s' for event '%s'", hook.name, hook.event)
+
+ def unregister(self, event: HookEvent, name: str):
+ self._hooks[event] = [h for h in self._hooks[event] if h.name != name]
+
+ def list_hooks(self) -> List[Dict[str, Any]]:
+ result = []
+ for event, hooks in self._hooks.items():
+ for h in hooks:
+ result.append({
+ "event": event.value,
+ "name": h.name,
+ "command": h.command,
+ "blocking": h.blocking,
+ "timeout": h.timeout,
+ })
+ return result
+
+ def load_from_file(self, path: Path):
+ """Load hooks from a JSON config file.
+
+ Format::
+
+ [
+ {"event": "post_edit", "name": "lint", "command": "ruff check ."},
+ {"event": "pre_commit", "name": "test", "command": "pytest", "blocking": true}
+ ]
+ """
+ if not path.exists():
+ return
+ try:
+ hooks = json.loads(path.read_text())
+ for h in hooks:
+ self.register(HookDefinition(
+ event=HookEvent(h["event"]),
+ name=h["name"],
+ command=h.get("command"),
+ blocking=h.get("blocking", False),
+ timeout=h.get("timeout", 30),
+ ))
+ except Exception as e:
+ logger.warning("Failed to load hooks from %s: %s", path, e)
+
+ async def fire(
+ self,
+ event: HookEvent,
+ context: Optional[Dict[str, Any]] = None,
+ cwd: Optional[Path] = None,
+ ) -> List[HookResult]:
+ results = []
+ for hook in self._hooks.get(event, []):
+ result = await self._run_hook(hook, context, cwd)
+ results.append(result)
+ if hook.blocking and not result.success:
+ result.blocked = True
+ break
+ return results
+
+ def is_blocked(self, results: List[HookResult]) -> bool:
+ return any(r.blocked for r in results)
+
+ async def _run_hook(
+ self,
+ hook: HookDefinition,
+ context: Optional[Dict[str, Any]],
+ cwd: Optional[Path],
+ ) -> HookResult:
+ try:
+ if hook.command:
+ return await self._run_command_hook(hook, context, cwd)
+ if hook.handler:
+ output = hook.handler(context or {})
+ return HookResult(
+ hook_name=hook.name, event=hook.event,
+ success=True, output=str(output),
+ )
+ return HookResult(
+ hook_name=hook.name, event=hook.event,
+ success=True, output="No action",
+ )
+ except Exception as e:
+ return HookResult(
+ hook_name=hook.name, event=hook.event,
+ success=False, output=str(e),
+ )
+
+ async def _run_command_hook(
+ self,
+ hook: HookDefinition,
+ context: Optional[Dict[str, Any]],
+ cwd: Optional[Path],
+ ) -> HookResult:
+ env = {**os.environ}
+ if context:
+ for k, v in context.items():
+ env[f"GITPILOT_HOOK_{k.upper()}"] = str(v)
+
+ proc = await asyncio.create_subprocess_shell(
+ hook.command,
+ cwd=str(cwd) if cwd else None,
+ stdout=asyncio.subprocess.PIPE,
+ stderr=asyncio.subprocess.STDOUT,
+ env=env,
+ )
+ try:
+ stdout, _ = await asyncio.wait_for(
+ proc.communicate(), timeout=hook.timeout,
+ )
+ return HookResult(
+ hook_name=hook.name, event=hook.event,
+ success=proc.returncode == 0,
+ output=stdout.decode("utf-8", errors="replace"),
+ )
+ except asyncio.TimeoutError:
+ proc.kill()
+ return HookResult(
+ hook_name=hook.name, event=hook.event,
+ success=False, output="Hook timed out",
+ )
diff --git a/gitpilot/init_wizard.py b/gitpilot/init_wizard.py
new file mode 100644
index 0000000000000000000000000000000000000000..8cfdb736290669dd102e07a91ab1893b60f61b13
--- /dev/null
+++ b/gitpilot/init_wizard.py
@@ -0,0 +1,653 @@
+# gitpilot/init_wizard.py
+"""First-run wizard — Batch P3-G.
+
+Walks a new user through the four decisions that previously required
+reading three pages of documentation:
+
+1. Pick a model provider (Anthropic, OpenAI, Ollama, Watsonx).
+2. Supply the matching API key (skipped for local-only providers).
+3. Pick a starter mode (``coder``, ``planner``, ``reviewer``).
+4. Trust the current workspace (records it in
+ :class:`gitpilot.trusted_folders.TrustStore`).
+
+Output artefacts, all written atomically:
+
+* ``.env`` — only the keys the user actually picked
+* ``.gitpilot/modes.yaml`` — one starter mode for the selection
+* ``AGENTS.md`` — via :func:`gitpilot.agents_md.run_init`
+* trust entry in ``~/.gitpilot/trusted.json``
+
+Design rules
+------------
+* **Atomic** — every file is written to a sibling temp file, fsynced,
+ then renamed. An abort (Ctrl-C, KeyboardInterrupt, validation
+ error) leaves the workspace untouched.
+* **Secret-safe** — the wizard never echoes the API key back to stdout.
+ Confirmation messages report ``set`` / ``not set`` only.
+* **Idempotent** — re-running the wizard with the same answers
+ produces a byte-identical ``.env`` and ``.gitpilot/modes.yaml``. An
+ existing file is preserved by default; ``overwrite=True`` is opt-in.
+* **Non-interactive friendly** — every prompt can be pre-answered via
+ the :class:`WizardAnswers` dataclass so the wizard runs in CI and
+ scripts without TTY access.
+* **Flag-gated** — public entry points consult ``init_wizard``. With
+ the flag off the function refuses to run, leaving manual ``init``
+ intact.
+"""
+from __future__ import annotations
+
+import dataclasses
+import logging
+import os
+import re
+import stat
+import tempfile
+import time
+from contextlib import contextmanager
+from dataclasses import dataclass, field
+from pathlib import Path
+from typing import (
+ Any,
+ Callable,
+ Dict,
+ Iterable,
+ Iterator,
+ List,
+ Mapping,
+ Optional,
+ Tuple,
+)
+
+from . import flags
+from .agents_md import run_init as run_agents_md_init
+from .trusted_folders import TrustStore
+
+logger = logging.getLogger(__name__)
+
+FLAG_INIT_WIZARD = "init_wizard"
+SECRET_REDACTED = "***"
+
+
+# ----------------------------------------------------------------------
+# Catalog of providers
+# ----------------------------------------------------------------------
+
+@dataclass(frozen=True)
+class _ProviderSpec:
+ slug: str # canonical lowercase id
+ label: str # display name
+ env_key: Optional[str] # secret env var (None for local providers)
+ default_model: str
+ notes: str
+
+ @property
+ def needs_key(self) -> bool:
+ return self.env_key is not None
+
+
+SUPPORTED_PROVIDERS: Tuple[_ProviderSpec, ...] = (
+ _ProviderSpec("anthropic", "Anthropic Claude", "ANTHROPIC_API_KEY",
+ "claude-sonnet-4-5", "Default for hosted use."),
+ _ProviderSpec("openai", "OpenAI", "OPENAI_API_KEY",
+ "gpt-4o-mini", ""),
+ _ProviderSpec("watsonx", "IBM watsonx", "WATSONX_API_KEY",
+ "meta-llama/llama-3-1-8b-instruct",
+ "Set WATSONX_PROJECT_ID separately."),
+ _ProviderSpec("ollama", "Ollama (local)", None,
+ "llama3.1", "Runs locally; no key needed."),
+)
+
+
+def provider_by_slug(slug: str) -> Optional[_ProviderSpec]:
+ s = slug.strip().lower()
+ for prov in SUPPORTED_PROVIDERS:
+ if prov.slug == s:
+ return prov
+ return None
+
+
+# ----------------------------------------------------------------------
+# Starter modes
+# ----------------------------------------------------------------------
+
+@dataclass(frozen=True)
+class _ModeSpec:
+ slug: str
+ label: str
+ role: str
+ when: str
+ groups: Tuple[Any, ...]
+
+
+STARTER_MODES: Tuple[_ModeSpec, ...] = (
+ _ModeSpec(
+ slug="coder",
+ label="Coder",
+ role=("You write code, run tests, and self-correct on failure. "
+ "Keep changes small and reversible."),
+ when="Use to implement features and fix bugs.",
+ groups=("read", "edit", "command"),
+ ),
+ _ModeSpec(
+ slug="planner",
+ label="Planner",
+ role=("You explore the repo and draft step-by-step plans with risks "
+ "and acceptance criteria. You never write code yourself."),
+ when="Use before implementing a complex change.",
+ groups=("read",),
+ ),
+ _ModeSpec(
+ slug="reviewer",
+ label="Reviewer",
+ role=("You audit diffs, suggest improvements, and draft commit "
+ "messages. You never modify the working tree."),
+ when="Use after a change is ready, before commit.",
+ groups=("read",),
+ ),
+)
+
+
+def mode_by_slug(slug: str) -> Optional[_ModeSpec]:
+ s = slug.strip().lower()
+ for mode in STARTER_MODES:
+ if mode.slug == s:
+ return mode
+ return None
+
+
+# ----------------------------------------------------------------------
+# Answers + result
+# ----------------------------------------------------------------------
+
+@dataclass
+class WizardAnswers:
+ """Inputs collected from the user (or supplied non-interactively)."""
+
+ provider: str = "anthropic"
+ api_key: Optional[str] = None # ``None`` for providers without a key
+ mode_slug: str = "coder"
+ workspace_trust: bool = True
+ overwrite_env: bool = False
+ overwrite_modes: bool = False
+ overwrite_agents_md: bool = False
+
+
+@dataclass
+class WizardResult:
+ """Outcome of one wizard run."""
+
+ workspace: Path
+ files_written: List[Path] = field(default_factory=list)
+ files_skipped: List[Tuple[Path, str]] = field(default_factory=list)
+ trust_recorded: bool = False
+ provider: str = ""
+ mode_slug: str = ""
+ duration_ms: int = 0
+ aborted: bool = False
+ reason: Optional[str] = None
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "workspace": str(self.workspace),
+ "files_written": [str(p) for p in self.files_written],
+ "files_skipped": [(str(p), why) for p, why in self.files_skipped],
+ "trust_recorded": self.trust_recorded,
+ "provider": self.provider,
+ "mode_slug": self.mode_slug,
+ "duration_ms": self.duration_ms,
+ "aborted": self.aborted,
+ "reason": self.reason,
+ }
+
+
+class WizardError(Exception):
+ """Surfaced when validation fails before any file is written."""
+
+
+# ----------------------------------------------------------------------
+# Prompt protocols (so tests can drive the wizard without a TTY)
+# ----------------------------------------------------------------------
+
+class Prompter:
+ """Tiny abstraction over Typer prompts. Implementations are simple
+ enough to swap for a recorded transcript in tests."""
+
+ def text(self, message: str, *, default: Optional[str] = None) -> str:
+ raise NotImplementedError
+
+ def secret(self, message: str) -> str:
+ raise NotImplementedError
+
+ def select(self, message: str, options: List[str], *, default: int = 0) -> int:
+ raise NotImplementedError
+
+ def confirm(self, message: str, *, default: bool = True) -> bool:
+ raise NotImplementedError
+
+ def echo(self, message: str = "") -> None:
+ raise NotImplementedError
+
+
+class _TyperPrompter(Prompter):
+ """Real prompts backed by Typer / Rich. Imported lazily."""
+
+ def __init__(self) -> None:
+ import typer # local
+ self._typer = typer
+
+ def text(self, message: str, *, default: Optional[str] = None) -> str:
+ return str(self._typer.prompt(message, default=default or ""))
+
+ def secret(self, message: str) -> str:
+ return str(self._typer.prompt(
+ message, hide_input=True, default="", show_default=False,
+ ))
+
+ def select(self, message: str, options: List[str], *, default: int = 0) -> int:
+ self.echo(message)
+ for i, opt in enumerate(options):
+ self.echo(f" [{i + 1}] {opt}")
+ while True:
+ raw = self._typer.prompt("Choose", default=str(default + 1))
+ try:
+ idx = int(raw) - 1
+ if 0 <= idx < len(options):
+ return idx
+ except ValueError:
+ pass
+ self.echo(f"Please enter a number between 1 and {len(options)}.")
+
+ def confirm(self, message: str, *, default: bool = True) -> bool:
+ return self._typer.confirm(message, default=default)
+
+ def echo(self, message: str = "") -> None:
+ self._typer.echo(message)
+
+
+@dataclass
+class ScriptedPrompter(Prompter):
+ """Prompter driven by a list of pre-recorded answers. Test-only."""
+
+ answers: List[Any]
+ echoed: List[str] = field(default_factory=list)
+ _cursor: int = 0
+
+ def _pop(self) -> Any:
+ if self._cursor >= len(self.answers):
+ raise WizardError("scripted prompter ran out of answers")
+ value = self.answers[self._cursor]
+ self._cursor += 1
+ return value
+
+ def text(self, message: str, *, default: Optional[str] = None) -> str:
+ return str(self._pop())
+
+ def secret(self, message: str) -> str:
+ return str(self._pop())
+
+ def select(self, message: str, options: List[str], *, default: int = 0) -> int:
+ value = self._pop()
+ if isinstance(value, int):
+ return value
+ # Strings can pass either the slug or the label
+ s = str(value).strip().lower()
+ for i, opt in enumerate(options):
+ if opt.lower() == s:
+ return i
+ raise WizardError(f"scripted option {value!r} not in {options}")
+
+ def confirm(self, message: str, *, default: bool = True) -> bool:
+ return bool(self._pop())
+
+ def echo(self, message: str = "") -> None:
+ self.echoed.append(message)
+
+
+# ----------------------------------------------------------------------
+# Core runner
+# ----------------------------------------------------------------------
+
+def collect_answers(
+ *,
+ prompter: Prompter,
+ presets: Optional[WizardAnswers] = None,
+) -> WizardAnswers:
+ """Drive the interactive prompts. ``presets`` short-circuits any
+ field that is already set (anything not ``None``)."""
+ presets = presets or WizardAnswers()
+ prompter.echo("== GitPilot first-run wizard ==")
+
+ # 1. Provider
+ options = [f"{p.label}" for p in SUPPORTED_PROVIDERS]
+ chosen_idx = next(
+ (i for i, p in enumerate(SUPPORTED_PROVIDERS) if p.slug == presets.provider),
+ 0,
+ )
+ idx = prompter.select("Which model provider?", options, default=chosen_idx)
+ provider_spec = SUPPORTED_PROVIDERS[idx]
+
+ # 2. API key (if needed and not pre-supplied)
+ api_key: Optional[str] = presets.api_key
+ if provider_spec.needs_key:
+ if api_key is None:
+ api_key = prompter.secret(f"Paste your {provider_spec.env_key}").strip()
+ if not api_key:
+ raise WizardError(
+ f"{provider_spec.env_key} is required for the {provider_spec.label} provider."
+ )
+
+ # 3. Starter mode
+ mode_options = [f"{m.label} — {m.when}" for m in STARTER_MODES]
+ mode_idx = next(
+ (i for i, m in enumerate(STARTER_MODES) if m.slug == presets.mode_slug),
+ 0,
+ )
+ selected_mode = prompter.select(
+ "Starter mode?", mode_options, default=mode_idx,
+ )
+ mode_spec = STARTER_MODES[selected_mode]
+
+ # 4. Workspace trust
+ workspace_trust = prompter.confirm(
+ "Trust this workspace (allow tool execution)?",
+ default=presets.workspace_trust,
+ )
+
+ return WizardAnswers(
+ provider=provider_spec.slug,
+ api_key=api_key,
+ mode_slug=mode_spec.slug,
+ workspace_trust=workspace_trust,
+ overwrite_env=presets.overwrite_env,
+ overwrite_modes=presets.overwrite_modes,
+ overwrite_agents_md=presets.overwrite_agents_md,
+ )
+
+
+def run_wizard(
+ workspace: Path,
+ *,
+ prompter: Optional[Prompter] = None,
+ presets: Optional[WizardAnswers] = None,
+ trust_store: Optional[TrustStore] = None,
+ enabled: Optional[bool] = None,
+) -> WizardResult:
+ """Run the full wizard end-to-end and return a :class:`WizardResult`.
+
+ Raises :class:`WizardError` for validation failures *before* any
+ file is touched. Mid-run aborts (Ctrl-C, partial writes) leave the
+ workspace untouched thanks to :func:`_atomic_write`.
+ """
+ flag_on = enabled if enabled is not None else flags.is_on(FLAG_INIT_WIZARD)
+ if not flag_on:
+ raise WizardError(
+ "init_wizard flag is off; run `gitpilot init` for the legacy flow."
+ )
+
+ start = time.monotonic()
+ workspace = workspace.resolve()
+ workspace.mkdir(parents=True, exist_ok=True)
+ result = WizardResult(workspace=workspace)
+
+ # Phase 0 — validate any presets that we *can* validate before
+ # touching prompts. A typed-but-unknown provider/mode in the
+ # presets is a clean abort, not a fall-through to prompts.
+ if presets is not None:
+ if provider_by_slug(presets.provider) is None:
+ result.aborted = True
+ result.reason = f"unsupported provider: {presets.provider!r}"
+ result.duration_ms = int((time.monotonic() - start) * 1000)
+ return result
+ if mode_by_slug(presets.mode_slug) is None:
+ result.aborted = True
+ result.reason = f"unsupported mode: {presets.mode_slug!r}"
+ result.duration_ms = int((time.monotonic() - start) * 1000)
+ return result
+
+ # Phase 1 — collect (no writes yet)
+ try:
+ prompter = prompter or _TyperPrompter()
+ if presets and _is_complete(presets):
+ answers = presets
+ else:
+ answers = collect_answers(prompter=prompter, presets=presets)
+ except KeyboardInterrupt:
+ result.aborted = True
+ result.reason = "user aborted"
+ result.duration_ms = int((time.monotonic() - start) * 1000)
+ return result
+
+ result.provider = answers.provider
+ result.mode_slug = answers.mode_slug
+
+ # Phase 2 — render in-memory artefacts
+ env_text = _render_env(answers)
+ modes_text = _render_modes(answers)
+
+ # Phase 3 — write atomically (rollback any partial writes on failure)
+ rollback_handlers: List[Callable[[], None]] = []
+ try:
+ env_path = workspace / ".env"
+ if env_path.exists() and not answers.overwrite_env:
+ result.files_skipped.append((env_path, "exists"))
+ else:
+ _atomic_write(env_path, env_text, mode=0o600,
+ rollback=rollback_handlers)
+ result.files_written.append(env_path)
+
+ gitpilot_dir = workspace / ".gitpilot"
+ gitpilot_dir.mkdir(exist_ok=True)
+ modes_path = gitpilot_dir / "modes.yaml"
+ if modes_path.exists() and not answers.overwrite_modes:
+ result.files_skipped.append((modes_path, "exists"))
+ else:
+ _atomic_write(modes_path, modes_text, mode=0o644,
+ rollback=rollback_handlers)
+ result.files_written.append(modes_path)
+
+ agents_md_path = workspace / "AGENTS.md"
+ if agents_md_path.exists() and not answers.overwrite_agents_md:
+ result.files_skipped.append((agents_md_path, "exists"))
+ else:
+ report = run_agents_md_init(workspace, overwrite=answers.overwrite_agents_md)
+ if report.created:
+ result.files_written.append(agents_md_path)
+
+ def _agents_rollback(p: Path = agents_md_path) -> None:
+ _unlink_quiet(p)
+
+ rollback_handlers.append(_agents_rollback)
+ else:
+ result.files_skipped.append((agents_md_path, report.skipped_reason or "exists"))
+
+ if answers.workspace_trust:
+ store = trust_store or TrustStore.default()
+ store.trust(workspace, note="set up via wizard")
+ result.trust_recorded = True
+
+ except Exception as exc:
+ # Atomic rollback — undo any successful writes so the user can
+ # safely re-run. We log the error rather than re-raise so the
+ # WizardResult always describes what happened.
+ for fn in reversed(rollback_handlers):
+ try:
+ fn()
+ except Exception:
+ logger.exception("rollback handler failed")
+ result.aborted = True
+ result.reason = str(exc) or exc.__class__.__name__
+ result.files_written = []
+ logger.exception("wizard failed")
+
+ result.duration_ms = int((time.monotonic() - start) * 1000)
+ return result
+
+
+# ----------------------------------------------------------------------
+# Renderers — pure functions, easy to snapshot in tests
+# ----------------------------------------------------------------------
+
+def _render_env(answers: WizardAnswers) -> str:
+ spec = provider_by_slug(answers.provider)
+ if spec is None:
+ raise WizardError(f"unsupported provider: {answers.provider!r}")
+ lines: List[str] = [
+ "# GitPilot environment — generated by `gitpilot init --wizard`.",
+ "# Only the keys you actually need are listed; add more as required.",
+ f"GITPILOT_LLM_PROVIDER={spec.slug}",
+ f"GITPILOT_DEFAULT_MODEL={spec.default_model}",
+ ]
+ if spec.needs_key:
+ if not answers.api_key:
+ raise WizardError(f"{spec.env_key} is required")
+ _validate_env_value(answers.api_key)
+ lines.append(f"{spec.env_key}={answers.api_key}")
+ return "\n".join(lines) + "\n"
+
+
+def _render_modes(answers: WizardAnswers) -> str:
+ spec = mode_by_slug(answers.mode_slug)
+ if spec is None:
+ raise WizardError(f"unsupported mode: {answers.mode_slug!r}")
+ groups_yaml = "\n".join(f" - {g}" for g in spec.groups)
+ return (
+ "# GitPilot modes — generated by `gitpilot init --wizard`.\n"
+ "# Edit freely; new modes can be added under customModes.\n"
+ "customModes:\n"
+ f" - slug: {spec.slug}\n"
+ f" name: {spec.label}\n"
+ f" description: {spec.label} starter mode\n"
+ f" roleDefinition: |\n"
+ f" {spec.role}\n"
+ f" whenToUse: |\n"
+ f" {spec.when}\n"
+ " groups:\n"
+ f"{groups_yaml}\n"
+ )
+
+
+# ----------------------------------------------------------------------
+# Safety helpers
+# ----------------------------------------------------------------------
+
+_FORBIDDEN_ENV_CHARS = re.compile(r"[\r\n\x00]")
+
+
+def _validate_env_value(value: str) -> None:
+ """Reject newlines and NULs so the secret can't break out of the file."""
+ if _FORBIDDEN_ENV_CHARS.search(value):
+ raise WizardError("API key contains forbidden control characters")
+
+
+def _atomic_write(
+ path: Path,
+ text: str,
+ *,
+ mode: int = 0o644,
+ rollback: List[Callable[[], None]],
+) -> None:
+ """Write *text* to *path* atomically.
+
+ The file is written to a sibling temp file in the same directory,
+ fsynced for durability, then renamed over the target. A rollback
+ handler that deletes the renamed file is appended to *rollback*
+ so the wizard can undo all writes on a later failure.
+ """
+ path.parent.mkdir(parents=True, exist_ok=True)
+ fd, tmp_name = tempfile.mkstemp(
+ prefix=f".{path.name}.", suffix=".tmp", dir=str(path.parent),
+ )
+ tmp_path = Path(tmp_name)
+ try:
+ with os.fdopen(fd, "w", encoding="utf-8") as handle:
+ handle.write(text)
+ handle.flush()
+ try:
+ os.fsync(handle.fileno())
+ except OSError:
+ pass
+ os.chmod(tmp_path, mode)
+ os.replace(tmp_path, path)
+ except Exception:
+ _unlink_quiet(tmp_path)
+ raise
+
+ def _undo(p: Path = path) -> None:
+ _unlink_quiet(p)
+
+ rollback.append(_undo)
+
+
+def _unlink_quiet(path: Path) -> None:
+ try:
+ path.unlink()
+ except FileNotFoundError:
+ return
+ except OSError:
+ logger.exception("could not unlink %s", path)
+
+
+def _is_complete(answers: WizardAnswers) -> bool:
+ """True if presets cover every prompt — wizard runs non-interactively."""
+ spec = provider_by_slug(answers.provider)
+ if spec is None:
+ return False
+ if spec.needs_key and not answers.api_key:
+ return False
+ return mode_by_slug(answers.mode_slug) is not None
+
+
+# ----------------------------------------------------------------------
+# Rendering helpers exported for tests
+# ----------------------------------------------------------------------
+
+def render_env(answers: WizardAnswers) -> str:
+ """Public render helper for snapshot tests."""
+ return _render_env(answers)
+
+
+def render_modes(answers: WizardAnswers) -> str:
+ """Public render helper for snapshot tests."""
+ return _render_modes(answers)
+
+
+def supported_provider_slugs() -> List[str]:
+ """Return the canonical slug for each provider the wizard supports."""
+ return [p.slug for p in SUPPORTED_PROVIDERS]
+
+
+def starter_mode_slugs() -> List[str]:
+ """Return the slug for each starter mode the wizard can write."""
+ return [m.slug for m in STARTER_MODES]
+
+
+# ----------------------------------------------------------------------
+# Module-level entry — ``python -m gitpilot.init_wizard --provider …``
+# ----------------------------------------------------------------------
+
+def _module_main(argv: Optional[List[str]] = None) -> int: # pragma: no cover - manual
+ import argparse
+
+ parser = argparse.ArgumentParser(prog="gitpilot.init_wizard")
+ parser.add_argument("--workspace", type=Path, default=Path.cwd())
+ parser.add_argument("--provider", default="anthropic")
+ parser.add_argument("--api-key", default=None)
+ parser.add_argument("--mode", default="coder")
+ parser.add_argument("--no-trust", action="store_true")
+ args = parser.parse_args(argv)
+ presets = WizardAnswers(
+ provider=args.provider,
+ api_key=args.api_key,
+ mode_slug=args.mode,
+ workspace_trust=not args.no_trust,
+ )
+ flags.set_override(FLAG_INIT_WIZARD, True)
+ result = run_wizard(args.workspace, presets=presets,
+ prompter=ScriptedPrompter(answers=[]))
+ import json
+ print(json.dumps(result.to_dict(), indent=2))
+ return 0 if not result.aborted else 1
+
+
+if __name__ == "__main__": # pragma: no cover
+ raise SystemExit(_module_main())
diff --git a/gitpilot/issue_tools.py b/gitpilot/issue_tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..38506746709e8d2b787a33c5ddc044dead0f36b7
--- /dev/null
+++ b/gitpilot/issue_tools.py
@@ -0,0 +1,161 @@
+"""CrewAI tools for GitHub Issue management.
+
+These tools allow agents to create, list, update, and comment on GitHub issues.
+They reuse the repo context mechanism from agent_tools.
+"""
+import asyncio
+import json
+from typing import Optional
+
+from crewai.tools import tool
+
+from .agent_tools import get_repo_context
+from . import github_issues as gi
+
+
+def _run_async(coro):
+ """Run an async coroutine from a sync CrewAI tool."""
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ try:
+ return loop.run_until_complete(coro)
+ finally:
+ loop.close()
+
+
+def _fmt_issue(issue: dict) -> str:
+ labels = ", ".join(l.get("name", "") for l in issue.get("labels", []))
+ assignees = ", ".join(a.get("login", "") for a in issue.get("assignees", []))
+ return (
+ f"#{issue.get('number')} [{issue.get('state', 'open')}] "
+ f"{issue.get('title', '')}\n"
+ f" Labels: {labels or 'none'} | Assignees: {assignees or 'none'}\n"
+ f" URL: {issue.get('html_url', '')}"
+ )
+
+
+@tool("List repository issues")
+def list_issues(
+ state: str = "open",
+ labels: Optional[str] = None,
+ per_page: int = 20,
+) -> str:
+ """Lists issues in the current repository. Optional filters: state (open/closed/all), labels (comma-separated), per_page."""
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ issues = _run_async(
+ gi.list_issues(owner, repo, state=state, labels=labels, per_page=per_page, token=token)
+ )
+ if not issues:
+ return f"No {state} issues found in {owner}/{repo}."
+ header = f"Issues in {owner}/{repo} (state={state}):\n"
+ return header + "\n".join(_fmt_issue(i) for i in issues)
+ except Exception as e:
+ return f"Error listing issues: {e}"
+
+
+@tool("Get issue details")
+def get_issue(issue_number: int) -> str:
+ """Gets full details of a specific issue by number."""
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ issue = _run_async(gi.get_issue(owner, repo, issue_number, token=token))
+ body = (issue.get("body") or "")[:500]
+ return (
+ f"Issue #{issue.get('number')}: {issue.get('title')}\n"
+ f"State: {issue.get('state')} | Created: {issue.get('created_at')}\n"
+ f"Author: {issue.get('user', {}).get('login', 'unknown')}\n"
+ f"Body:\n{body}\n"
+ f"URL: {issue.get('html_url', '')}"
+ )
+ except Exception as e:
+ return f"Error getting issue: {e}"
+
+
+@tool("Create a new issue")
+def create_issue(
+ title: str,
+ body: str = "",
+ labels: str = "",
+ assignees: str = "",
+) -> str:
+ """Creates a new GitHub issue. labels and assignees are comma-separated strings."""
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ label_list = [l.strip() for l in labels.split(",") if l.strip()] if labels else None
+ assignee_list = [a.strip() for a in assignees.split(",") if a.strip()] if assignees else None
+ issue = _run_async(
+ gi.create_issue(owner, repo, title, body=body or None, labels=label_list, assignees=assignee_list, token=token)
+ )
+ return f"Created issue #{issue.get('number')}: {issue.get('title')}\nURL: {issue.get('html_url', '')}"
+ except Exception as e:
+ return f"Error creating issue: {e}"
+
+
+@tool("Update an issue")
+def update_issue(
+ issue_number: int,
+ title: str = "",
+ body: str = "",
+ state: str = "",
+ labels: str = "",
+ assignees: str = "",
+) -> str:
+ """Updates an existing issue. Only non-empty fields are changed. labels/assignees are comma-separated."""
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ kwargs: dict = {}
+ if title:
+ kwargs["title"] = title
+ if body:
+ kwargs["body"] = body
+ if state:
+ kwargs["state"] = state
+ if labels:
+ kwargs["labels"] = [l.strip() for l in labels.split(",") if l.strip()]
+ if assignees:
+ kwargs["assignees"] = [a.strip() for a in assignees.split(",") if a.strip()]
+ issue = _run_async(gi.update_issue(owner, repo, issue_number, token=token, **kwargs))
+ return f"Updated issue #{issue.get('number')}: {issue.get('title')}\nState: {issue.get('state')}"
+ except Exception as e:
+ return f"Error updating issue: {e}"
+
+
+@tool("Add a comment to an issue")
+def add_issue_comment(issue_number: int, body: str) -> str:
+ """Adds a comment to an existing issue."""
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ comment = _run_async(gi.add_issue_comment(owner, repo, issue_number, body, token=token))
+ return f"Comment added to issue #{issue_number}\nURL: {comment.get('html_url', '')}"
+ except Exception as e:
+ return f"Error adding comment: {e}"
+
+
+@tool("List issue comments")
+def list_issue_comments(issue_number: int) -> str:
+ """Lists all comments on an issue."""
+ try:
+ owner, repo, token, _branch = get_repo_context()
+ comments = _run_async(gi.list_issue_comments(owner, repo, issue_number, token=token))
+ if not comments:
+ return f"No comments on issue #{issue_number}."
+ lines = [f"Comments on issue #{issue_number}:"]
+ for c in comments:
+ author = c.get("user", {}).get("login", "unknown")
+ body_preview = (c.get("body") or "")[:200]
+ lines.append(f" [{author}] {body_preview}")
+ return "\n".join(lines)
+ except Exception as e:
+ return f"Error listing comments: {e}"
+
+
+# Export all issue tools
+ISSUE_TOOLS = [
+ list_issues,
+ get_issue,
+ create_issue,
+ update_issue,
+ add_issue_comment,
+ list_issue_comments,
+]
diff --git a/gitpilot/langflow_client.py b/gitpilot/langflow_client.py
new file mode 100644
index 0000000000000000000000000000000000000000..e271a9966dfcb03acdb74272f5489e48718487a5
--- /dev/null
+++ b/gitpilot/langflow_client.py
@@ -0,0 +1,50 @@
+from __future__ import annotations
+
+from typing import Any, Dict, Optional
+
+import httpx
+from fastapi import HTTPException
+
+from .settings import get_settings
+
+
+async def run_langflow_flow(
+ flow_id: str,
+ input_value: str,
+ *,
+ session_id: str = "gitpilot-session",
+ tweaks: Optional[Dict[str, Any]] = None,
+) -> str:
+ """Run a LangFlow flow and return the first chat-like output as text."""
+ settings = get_settings()
+ url = f"{settings.langflow_url.rstrip('/')}/api/v1/run/{flow_id}"
+ headers = {"Content-Type": "application/json"}
+ if settings.langflow_api_key:
+ headers["x-api-key"] = settings.langflow_api_key
+
+ payload: Dict[str, Any] = {
+ "input_value": input_value,
+ "session_id": session_id,
+ "input_type": "chat",
+ "output_type": "chat",
+ "output_component": "",
+ "tweaks": tweaks or {},
+ }
+
+ async with httpx.AsyncClient() as client:
+ resp = await client.post(url, headers=headers, json=payload)
+
+ if resp.status_code >= 400:
+ raise HTTPException(resp.status_code, resp.text)
+
+ data = resp.json()
+ try:
+ outputs = data["outputs"][0]["outputs"][0]["results"]
+ if isinstance(outputs, dict):
+ for key in ("message", "text", "output_text"):
+ if key in outputs:
+ return str(outputs[key])
+ except Exception:
+ pass
+
+ return str(data)
diff --git a/gitpilot/learning.py b/gitpilot/learning.py
new file mode 100644
index 0000000000000000000000000000000000000000..b5fc5a2b8e9e2df2882a1a5801edb3c0f90b1f74
--- /dev/null
+++ b/gitpilot/learning.py
@@ -0,0 +1,251 @@
+# gitpilot/learning.py
+"""Self-improving agent learning engine.
+
+After each task execution, evaluates outcomes, extracts patterns,
+and stores them in the project's auto-memory. Over time, GitPilot
+becomes specialised to each project's patterns and conventions.
+
+Inspired by reinforcement learning from human feedback (RLHF) principles
+and the experience-replay mechanism from DeepMind's DQN (Mnih et al., 2015),
+adapted for a software engineering context.
+
+Learning loop::
+
+ Execute task → Evaluate outcome → Extract patterns → Store in memory
+ ↓
+ Future tasks ← Agent reads patterns from memory ← Memory updated
+"""
+from __future__ import annotations
+
+import json
+import logging
+from dataclasses import dataclass, field
+from datetime import datetime, timezone
+from pathlib import Path
+from typing import Any, Dict, List, Optional
+
+logger = logging.getLogger(__name__)
+
+LEARNING_DIR = "learning"
+MAX_INSIGHTS_PER_REPO = 200
+INSIGHT_CATEGORIES = [
+ "code_style",
+ "testing",
+ "architecture",
+ "workflow",
+ "error_pattern",
+ "performance",
+ "security",
+]
+
+
+@dataclass
+class Evaluation:
+ """Result of evaluating a task outcome."""
+
+ task_description: str
+ success: bool
+ outcome_type: str = "" # tests_passed, pr_approved, error_fixed, etc.
+ details: str = ""
+ confidence: float = 0.8 # 0.0 - 1.0
+ timestamp: str = field(
+ default_factory=lambda: datetime.now(timezone.utc).isoformat()
+ )
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "task_description": self.task_description,
+ "success": self.success,
+ "outcome_type": self.outcome_type,
+ "details": self.details,
+ "confidence": self.confidence,
+ "timestamp": self.timestamp,
+ }
+
+
+@dataclass
+class RepoInsights:
+ """Accumulated insights for a repository."""
+
+ repo: str
+ patterns: List[str] = field(default_factory=list)
+ preferred_style: Dict[str, str] = field(default_factory=dict)
+ common_errors: List[str] = field(default_factory=list)
+ success_rate: float = 0.0
+ total_tasks: int = 0
+ successful_tasks: int = 0
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "repo": self.repo,
+ "patterns": self.patterns,
+ "preferred_style": self.preferred_style,
+ "common_errors": self.common_errors,
+ "success_rate": self.success_rate,
+ "total_tasks": self.total_tasks,
+ "successful_tasks": self.successful_tasks,
+ }
+
+
+class LearningEngine:
+ """Learn from task execution outcomes and improve over time.
+
+ Usage::
+
+ engine = LearningEngine(storage_dir=Path("~/.gitpilot"))
+ evaluation = engine.evaluate_outcome(
+ task="Fix login bug",
+ result={"tests_passed": True, "pr_approved": True},
+ )
+ patterns = engine.extract_patterns(evaluation)
+ engine.update_strategies("owner/repo", patterns)
+ insights = engine.get_repo_insights("owner/repo")
+ """
+
+ def __init__(self, storage_dir: Optional[Path] = None) -> None:
+ self.storage_dir = storage_dir or (Path.home() / ".gitpilot")
+ self._learning_dir = self.storage_dir / LEARNING_DIR
+ self._learning_dir.mkdir(parents=True, exist_ok=True)
+
+ def evaluate_outcome(
+ self,
+ task: str,
+ result: Optional[Dict[str, Any]] = None,
+ ) -> Evaluation:
+ """Evaluate a task outcome based on result signals.
+
+ Checks for success signals like:
+ - tests_passed: True
+ - pr_approved: True
+ - error_fixed: True
+ - build_success: True
+ """
+ result = result or {}
+ success_signals = [
+ result.get("tests_passed", False),
+ result.get("pr_approved", False),
+ result.get("error_fixed", False),
+ result.get("build_success", False),
+ ]
+ explicit_success = result.get("success")
+
+ if explicit_success is not None:
+ success = bool(explicit_success)
+ else:
+ success = any(success_signals)
+
+ # Determine outcome type
+ if result.get("tests_passed"):
+ outcome_type = "tests_passed"
+ elif result.get("pr_approved"):
+ outcome_type = "pr_approved"
+ elif result.get("error_fixed"):
+ outcome_type = "error_fixed"
+ elif result.get("error"):
+ outcome_type = "error"
+ success = False
+ else:
+ outcome_type = "completed" if success else "unknown"
+
+ confidence = 0.9 if success else 0.6
+
+ return Evaluation(
+ task_description=task,
+ success=success,
+ outcome_type=outcome_type,
+ details=result.get("details", ""),
+ confidence=confidence,
+ )
+
+ def extract_patterns(self, evaluation: Evaluation) -> List[str]:
+ """Extract learnable patterns from an evaluation.
+
+ Generates natural-language patterns that can be injected
+ into future agent system prompts.
+ """
+ patterns = []
+
+ if evaluation.success:
+ patterns.append(
+ f"Task '{evaluation.task_description}' succeeded "
+ f"(outcome: {evaluation.outcome_type})"
+ )
+ if evaluation.outcome_type == "tests_passed":
+ patterns.append("Tests are available and should be run after changes")
+ if evaluation.outcome_type == "pr_approved":
+ patterns.append("PR workflow is active; create PRs for review")
+ else:
+ patterns.append(
+ f"Task '{evaluation.task_description}' failed "
+ f"(outcome: {evaluation.outcome_type})"
+ )
+ if evaluation.details:
+ patterns.append(f"Error context: {evaluation.details[:200]}")
+
+ return patterns
+
+ def update_strategies(self, repo: str, patterns: List[str]) -> None:
+ """Store learned patterns for a repository."""
+ repo_file = self._repo_path(repo)
+ data = self._load_repo_data(repo)
+
+ existing = set(data.get("patterns", []))
+ for p in patterns:
+ existing.add(p)
+
+ data["patterns"] = list(existing)[-MAX_INSIGHTS_PER_REPO:]
+ data["updated_at"] = datetime.now(timezone.utc).isoformat()
+ data.setdefault("total_tasks", 0)
+ data["total_tasks"] += 1
+
+ # Update success rate
+ if any("succeeded" in p for p in patterns):
+ data.setdefault("successful_tasks", 0)
+ data["successful_tasks"] += 1
+
+ total = data.get("total_tasks", 1)
+ successful = data.get("successful_tasks", 0)
+ data["success_rate"] = round(successful / total, 3) if total > 0 else 0.0
+
+ repo_file.write_text(json.dumps(data, indent=2))
+
+ def get_repo_insights(self, repo: str) -> RepoInsights:
+ """Get accumulated insights for a repository."""
+ data = self._load_repo_data(repo)
+ return RepoInsights(
+ repo=repo,
+ patterns=data.get("patterns", []),
+ preferred_style=data.get("preferred_style", {}),
+ common_errors=data.get("common_errors", []),
+ success_rate=data.get("success_rate", 0.0),
+ total_tasks=data.get("total_tasks", 0),
+ successful_tasks=data.get("successful_tasks", 0),
+ )
+
+ def record_error(self, repo: str, error: str) -> None:
+ """Record a common error pattern for a repo."""
+ data = self._load_repo_data(repo)
+ errors = data.setdefault("common_errors", [])
+ if error not in errors:
+ errors.append(error)
+ data["common_errors"] = errors[-50:] # Keep last 50
+ self._repo_path(repo).write_text(json.dumps(data, indent=2))
+
+ def set_preferred_style(self, repo: str, key: str, value: str) -> None:
+ """Set a preferred code style for a repo (e.g., indent: 4spaces)."""
+ data = self._load_repo_data(repo)
+ data.setdefault("preferred_style", {})[key] = value
+ self._repo_path(repo).write_text(json.dumps(data, indent=2))
+
+ def _repo_path(self, repo: str) -> Path:
+ safe_name = repo.replace("/", "__")
+ return self._learning_dir / f"{safe_name}.json"
+
+ def _load_repo_data(self, repo: str) -> Dict[str, Any]:
+ path = self._repo_path(repo)
+ if path.exists():
+ try:
+ return json.loads(path.read_text())
+ except Exception:
+ return {}
+ return {}
diff --git a/gitpilot/llm_provider.py b/gitpilot/llm_provider.py
new file mode 100644
index 0000000000000000000000000000000000000000..9df0108821a646bc6bc8b4048d531be954ee1f13
--- /dev/null
+++ b/gitpilot/llm_provider.py
@@ -0,0 +1,380 @@
+from __future__ import annotations
+
+import logging
+import os
+from typing import TYPE_CHECKING, Any
+
+import httpx
+
+# LAZY IMPORT: `from crewai import LLM` pulls in litellm, chromadb, lancedb,
+# opentelemetry, onnxruntime, and ~180 other packages. Importing it at module
+# top-level adds 10-60s to every backend startup (especially on WSL).
+# We defer it into build_llm() so it only loads when a chat is actually sent.
+if TYPE_CHECKING:
+ from crewai import LLM # noqa: F401 — type hint only
+
+from gitpilot.models import ProviderHealth, ProviderSummary
+
+from .settings import LLMProvider, get_settings
+from .reasoning_normalizer import wrap_if_reasoning_model
+
+logger = logging.getLogger(__name__)
+
+
+def _wrap_llm(llm: Any, model: str) -> Any:
+ """Auto-wrap the LLM with ReasoningAwareLLM if the model is a reasoning
+ model (deepseek-r1, qwq, marco-o1, r1-distill, etc.).
+
+ This is the single point where reasoning-model normalization is applied.
+ For non-reasoning models this is a no-op — the original LLM is returned
+ unchanged with zero overhead.
+
+ The wrapper strips ... blocks from LLM responses before
+ CrewAI's ReAct parser sees them, preventing the common
+ "Invalid response from LLM call - None or empty" error.
+ """
+ return wrap_if_reasoning_model(llm, model)
+
+
+def build_llm() -> Any:
+ """Return an initialized CrewAI LLM using the active provider.
+
+ CrewAI is lazy-imported here to avoid loading ~180 packages (litellm,
+ chromadb, lancedb, opentelemetry, onnxruntime, etc.) at server startup.
+ First call adds 5-15s; subsequent calls are instant.
+
+ If the active model is a reasoning model (deepseek-r1, qwq, etc.),
+ the returned LLM is automatically wrapped with ReasoningAwareLLM
+ for CrewAI compatibility. For non-reasoning models, the original
+ LLM is returned unchanged.
+ """
+ # LAZY IMPORT — see module-level comment for rationale
+ from crewai import LLM
+
+ settings = get_settings()
+ provider = settings.provider
+
+ if provider == LLMProvider.openai:
+ # Use settings config if available, otherwise fall back to env vars
+ api_key = settings.openai.api_key or os.getenv("OPENAI_API_KEY", "")
+ model = settings.openai.model or os.getenv("GITPILOT_OPENAI_MODEL", "gpt-4o-mini")
+ base_url = settings.openai.base_url or os.getenv("OPENAI_BASE_URL", "")
+
+ # Validate required credentials
+ if not api_key:
+ raise ValueError(
+ "OpenAI API key is required. "
+ "Configure it in Admin / LLM Settings or set OPENAI_API_KEY environment variable."
+ )
+
+ # Ensure model has provider prefix for CrewAI
+ if not model.startswith("openai/"):
+ model = f"openai/{model}"
+
+ return _wrap_llm(
+ LLM(
+ model=model,
+ api_key=api_key,
+ base_url=base_url if base_url else None,
+ ),
+ model,
+ )
+
+ if provider == LLMProvider.claude:
+ # Use settings config if available, otherwise fall back to env vars
+ api_key = settings.claude.api_key or os.getenv("ANTHROPIC_API_KEY", "")
+ model = settings.claude.model or os.getenv("GITPILOT_CLAUDE_MODEL", "claude-sonnet-4-5")
+ base_url = settings.claude.base_url or os.getenv("ANTHROPIC_BASE_URL", "")
+
+ # Validate required credentials
+ if not api_key:
+ raise ValueError(
+ "Claude API key is required. "
+ "Configure it in Admin / LLM Settings or set "
+ "ANTHROPIC_API_KEY environment variable."
+ )
+
+ # CRITICAL: Set API key as environment variable
+ # (required by CrewAI's native Anthropic provider)
+ # CrewAI's Anthropic integration checks for this env var internally
+ os.environ["ANTHROPIC_API_KEY"] = api_key
+
+ # Optional: Set base URL as environment variable if provided
+ if base_url:
+ os.environ["ANTHROPIC_BASE_URL"] = base_url
+
+ # Ensure model has provider prefix for CrewAI
+ if not model.startswith("anthropic/"):
+ model = f"anthropic/{model}"
+
+ return _wrap_llm(
+ LLM(
+ model=model,
+ api_key=api_key,
+ base_url=base_url if base_url else None,
+ ),
+ model,
+ )
+
+ if provider == LLMProvider.watsonx:
+ # FIXED: Use settings config with proper watsonx.ai integration
+ api_key = settings.watsonx.api_key or os.getenv("WATSONX_API_KEY", "")
+ project_id = settings.watsonx.project_id or os.getenv("WATSONX_PROJECT_ID", "")
+ model = settings.watsonx.model_id or os.getenv(
+ "GITPILOT_WATSONX_MODEL",
+ "ibm/granite-3-8b-instruct", # Default model (without prefix)
+ )
+ base_url = settings.watsonx.base_url or os.getenv(
+ "WATSONX_BASE_URL",
+ "https://us-south.ml.cloud.ibm.com", # Default to US South
+ )
+
+ # Validate required credentials
+ if not api_key:
+ raise ValueError(
+ "Watsonx API key is required. "
+ "Configure it in Admin / LLM Settings or set WATSONX_API_KEY environment variable."
+ )
+ if not project_id:
+ raise ValueError(
+ "Watsonx project ID is required. "
+ "Configure it in Admin / LLM Settings or set "
+ "WATSONX_PROJECT_ID environment variable."
+ )
+
+ # CRITICAL: Set project ID as environment variable (required by watsonx.ai SDK)
+ os.environ["WATSONX_PROJECT_ID"] = project_id
+
+ # CRITICAL: Also set the base URL as WATSONX_URL (some integrations use this)
+ os.environ["WATSONX_URL"] = base_url
+
+ # Ensure model has provider prefix for CrewAI (watsonx/provider/model)
+ # Format: watsonx/ibm/granite-3-8b-instruct
+ if not model.startswith("watsonx/"):
+ model = f"watsonx/{model}"
+
+ # FIXED: Create LLM with project_id parameter (CRITICAL!)
+ return _wrap_llm(
+ LLM(
+ model=model,
+ api_key=api_key,
+ base_url=base_url,
+ project_id=project_id, # \u2190 CRITICAL: This was missing!
+ temperature=0.3, # Default temperature
+ max_tokens=1024, # Default max tokens
+ ),
+ model,
+ )
+
+ if provider == LLMProvider.ollama:
+ # Use settings config if available, otherwise fall back to env vars
+ model = settings.ollama.model or os.getenv("GITPILOT_OLLAMA_MODEL", "llama3")
+ base_url = settings.ollama.base_url or os.getenv("OLLAMA_BASE_URL", "http://localhost:11434")
+
+ # Validate required configuration
+ if not base_url:
+ raise ValueError(
+ "Ollama base URL is required. "
+ "Configure it in Admin / LLM Settings or set OLLAMA_BASE_URL environment variable."
+ )
+
+ # Ensure model has provider prefix for CrewAI
+ if not model.startswith("ollama/"):
+ model = f"ollama/{model}"
+
+ return _wrap_llm(LLM(model=model, base_url=base_url), model)
+
+ if provider == LLMProvider.ollabridge:
+ # OllaBridge / OllaBridge Cloud - OpenAI-compatible API
+ model = settings.ollabridge.model or os.getenv("GITPILOT_OLLABRIDGE_MODEL", "qwen2.5:1.5b")
+ base_url = settings.ollabridge.base_url or os.getenv("OLLABRIDGE_BASE_URL", "http://localhost:8000")
+ api_key = settings.ollabridge.api_key or os.getenv("OLLABRIDGE_API_KEY", "")
+
+ # Validate required configuration
+ if not base_url:
+ raise ValueError(
+ "OllaBridge base URL is required. "
+ "Configure it in Admin / LLM Settings or set "
+ "OLLABRIDGE_BASE_URL environment variable."
+ )
+
+ # OllaBridge exposes an OpenAI-compatible API at /v1/
+ # Use the openai/ prefix so CrewAI routes through the OpenAI adapter
+ if not model.startswith("openai/"):
+ model = f"openai/{model}"
+
+ ollabridge_api_base = f"{base_url.rstrip('/')}/v1"
+ ollabridge_key = api_key or "ollabridge"
+
+ # CRITICAL: Set environment variables so litellm/OpenAI client uses
+ # the remote OllaBridge URL instead of falling back to localhost.
+ # Without this, the openai/ prefix causes litellm to check OPENAI_API_BASE
+ # and default to localhost when it's not set.
+ os.environ["OPENAI_API_KEY"] = ollabridge_key
+ os.environ["OPENAI_API_BASE"] = ollabridge_api_base
+
+ return _wrap_llm(
+ LLM(
+ model=model,
+ api_key=ollabridge_key,
+ base_url=ollabridge_api_base,
+ ),
+ model,
+ )
+
+ raise ValueError(f"Unsupported provider: {provider}")
+
+
+# ---------------------------------------------------------------------------
+# Batch P2-A — structured system-prompt builder.
+#
+# This helper is purely additive: it composes a :class:`SystemPayload` with
+# cacheable / non-cacheable segments via :mod:`gitpilot.prompt_cache`. The
+# legacy code path (callers that feed a flat ``system`` string into
+# ``build_llm()`` results) is untouched — they keep working with no behaviour
+# change. Callers that want the cache markers should adopt this helper
+# incrementally.
+# ---------------------------------------------------------------------------
+def build_system_blocks(
+ *,
+ base_system: str = "",
+ workspace: Any = None,
+ mode_slug: Any = None,
+ tool_defs: Any = None,
+ session_conventions: str = "",
+) -> Any:
+ """Return the structured system payload for the active provider.
+
+ The active provider is read from settings; the prompt-cache markers
+ are emitted only when both ``prompt_cache`` is on and the provider
+ is Anthropic. For every other provider the payload still carries
+ the same content and a stable ordering, just without cache markers.
+ """
+ from .prompt_cache import build_system_blocks as _build # local import
+
+ try:
+ provider = get_settings().provider.value # type: ignore[union-attr]
+ except Exception:
+ provider = None
+
+ return _build(
+ base_system=base_system,
+ workspace=workspace,
+ mode_slug=mode_slug,
+ tool_defs=tool_defs,
+ session_conventions=session_conventions,
+ provider=provider,
+ )
+
+
+def validate_provider_config(settings) -> tuple[bool, list[str]]:
+ """Validate provider configuration and return (is_valid, errors)."""
+ errors = []
+ provider = settings.provider
+
+ if provider == LLMProvider.openai:
+ if not settings.openai.api_key:
+ errors.append("OpenAI API key is required")
+ elif provider == LLMProvider.claude:
+ if not settings.claude.api_key:
+ errors.append("Anthropic API key is required")
+ elif provider == LLMProvider.watsonx:
+ if not settings.watsonx.api_key:
+ errors.append("Watsonx API key is required")
+ if not settings.watsonx.project_id:
+ errors.append("Watsonx project ID is required")
+ elif provider == LLMProvider.ollama:
+ pass # Local, always valid
+ elif provider == LLMProvider.ollabridge:
+ pass # Local default, always valid
+
+ return (len(errors) == 0, errors)
+
+
+def get_effective_model(settings) -> str | None:
+ """Get the active model name for the current provider."""
+ provider = settings.provider
+ if provider == LLMProvider.openai:
+ return settings.openai.model
+ if provider == LLMProvider.claude:
+ return settings.claude.model
+ if provider == LLMProvider.watsonx:
+ return settings.watsonx.model_id
+ if provider == LLMProvider.ollama:
+ return settings.ollama.model
+ if provider == LLMProvider.ollabridge:
+ return settings.ollabridge.model
+ return None
+
+
+def _apply_health(summary: ProviderSummary, status_code: int) -> None:
+ """Set health and models_available from HTTP status code."""
+ ok = status_code == 200
+ summary.health = ProviderHealth.ok if ok else ProviderHealth.error
+ summary.models_available = ok
+
+
+async def test_provider_connection(settings) -> ProviderSummary:
+ """Test the current provider connection and return status."""
+ summary = settings.get_provider_summary()
+ provider = settings.provider
+
+ try:
+ async with httpx.AsyncClient(timeout=10.0) as client:
+ if provider == LLMProvider.openai:
+ url = settings.openai.base_url or "https://api.openai.com"
+ resp = await client.get(
+ f"{url}/v1/models",
+ headers={"Authorization": f"Bearer {settings.openai.api_key}"},
+ )
+ _apply_health(summary, resp.status_code)
+
+ elif provider == LLMProvider.claude:
+ url = settings.claude.base_url or "https://api.anthropic.com"
+ headers = {
+ "x-api-key": settings.claude.api_key,
+ "anthropic-version": "2023-06-01",
+ }
+ resp = await client.get(f"{url}/v1/models", headers=headers)
+ _apply_health(summary, resp.status_code)
+
+ elif provider == LLMProvider.watsonx:
+ base = settings.watsonx.base_url or "https://us-south.ml.cloud.ibm.com"
+ resp = await client.get(
+ f"{base}/ml/v1/foundation_model_specs",
+ params={"version": "2024-03-14", "limit": "1"},
+ headers={"Authorization": f"Bearer {settings.watsonx.api_key}"},
+ )
+ _apply_health(summary, resp.status_code)
+
+ elif provider == LLMProvider.ollama:
+ base = settings.ollama.base_url or "http://127.0.0.1:11434"
+ resp = await client.get(f"{base}/api/tags")
+ _apply_health(summary, resp.status_code)
+
+ elif provider == LLMProvider.ollabridge:
+ base = settings.ollabridge.base_url or "http://127.0.0.1:8000"
+ base = base.rstrip("/")
+ if base.endswith("/v1"):
+ base = base[:-3]
+ summary.warning = (
+ "Do not include /v1; GitPilot adds it automatically."
+ )
+ api_key = settings.ollabridge.api_key or "ollabridge"
+ resp = await client.get(
+ f"{base}/v1/models",
+ headers={"Authorization": f"Bearer {api_key}"},
+ )
+ _apply_health(summary, resp.status_code)
+
+ except httpx.ConnectError:
+ summary.health = ProviderHealth.error
+ summary.warning = f"Cannot connect to {provider.value} server"
+ except httpx.TimeoutException:
+ summary.health = ProviderHealth.warning
+ summary.warning = f"Connection to {provider.value} timed out"
+ except Exception as e:
+ summary.health = ProviderHealth.error
+ summary.warning = str(e)
+
+ return summary
diff --git a/gitpilot/local_tools.py b/gitpilot/local_tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..05b18d216e76141222c79d655699ab0c1a09c3ef
--- /dev/null
+++ b/gitpilot/local_tools.py
@@ -0,0 +1,212 @@
+# gitpilot/local_tools.py
+"""CrewAI tools for local workspace file and shell operations.
+
+These tools give agents the ability to read, write, search, and navigate
+files on the local filesystem (within the sandboxed workspace directory),
+and to run shell commands like test suites, linters, and build scripts.
+"""
+import asyncio
+import concurrent.futures
+import json
+from typing import Optional
+
+from crewai.tools import tool
+
+from .workspace import WorkspaceManager, WorkspaceInfo
+from .terminal import TerminalExecutor, TerminalSession
+
+_ws_manager = WorkspaceManager()
+_executor = TerminalExecutor()
+_current_workspace: Optional[WorkspaceInfo] = None
+
+
+def set_active_workspace(ws: WorkspaceInfo):
+ global _current_workspace
+ _current_workspace = ws
+
+
+def get_active_workspace() -> Optional[WorkspaceInfo]:
+ return _current_workspace
+
+
+def _require_workspace() -> WorkspaceInfo:
+ if _current_workspace is None:
+ raise RuntimeError("No active workspace. Call set_active_workspace() first.")
+ return _current_workspace
+
+
+def _run_async(coro):
+ """Bridge sync CrewAI tools to async workspace/terminal calls."""
+ try:
+ asyncio.get_running_loop()
+ except RuntimeError:
+ return asyncio.run(coro)
+ # If a loop is already running (CrewAI thread), use a thread pool
+ with concurrent.futures.ThreadPoolExecutor(max_workers=1) as pool:
+ return pool.submit(asyncio.run, coro).result()
+
+
+# -----------------------------------------------------------------------
+# File operations
+# -----------------------------------------------------------------------
+
+@tool("Read local file")
+def read_local_file(file_path: str) -> str:
+ """Read a file from the local workspace. Returns the file content."""
+ ws = _require_workspace()
+ try:
+ content = _run_async(_ws_manager.read_file(ws, file_path))
+ return f"Content of {file_path}:\n---\n{content}\n---"
+ except Exception as e:
+ return f"Error reading {file_path}: {e}"
+
+
+@tool("Write local file")
+def write_local_file(file_path: str, content: str) -> str:
+ """Write content to a file in the local workspace. Creates parent directories."""
+ ws = _require_workspace()
+ try:
+ result = _run_async(_ws_manager.write_file(ws, file_path, content))
+ return f"Written {result['size']} bytes to {result['path']}"
+ except Exception as e:
+ return f"Error writing {file_path}: {e}"
+
+
+@tool("Delete local file")
+def delete_local_file(file_path: str) -> str:
+ """Delete a file from the local workspace."""
+ ws = _require_workspace()
+ try:
+ deleted = _run_async(_ws_manager.delete_file(ws, file_path))
+ return f"Deleted: {deleted}"
+ except Exception as e:
+ return f"Error deleting {file_path}: {e}"
+
+
+@tool("List local files")
+def list_local_files(directory: str = ".") -> str:
+ """List all tracked and untracked files in a directory."""
+ ws = _require_workspace()
+ try:
+ files = _run_async(_ws_manager.list_files(ws, directory))
+ return "\n".join(files) if files else "No files found."
+ except Exception as e:
+ return f"Error listing files: {e}"
+
+
+@tool("Search in files")
+def search_in_files(pattern: str, path: str = ".") -> str:
+ """Search for a text pattern across all files using git grep.
+ Returns matching lines with file paths and line numbers."""
+ ws = _require_workspace()
+ try:
+ matches = _run_async(_ws_manager.search_files(ws, pattern, path))
+ if not matches:
+ return "No matches found."
+ lines = [f"{m['file']}:{m['line']}: {m['content']}" for m in matches[:50]]
+ return "\n".join(lines)
+ except Exception as e:
+ return f"Error searching: {e}"
+
+
+# -----------------------------------------------------------------------
+# Git operations
+# -----------------------------------------------------------------------
+
+@tool("Git diff")
+def git_diff(staged: str = "false") -> str:
+ """Show the current git diff (unstaged changes by default)."""
+ ws = _require_workspace()
+ try:
+ return _run_async(_ws_manager.diff(ws, staged=staged.lower() == "true")) or "No changes."
+ except Exception as e:
+ return f"Error: {e}"
+
+
+@tool("Git status")
+def git_status() -> str:
+ """Show the current git status."""
+ ws = _require_workspace()
+ try:
+ status = _run_async(_ws_manager.status(ws))
+ return json.dumps(status, indent=2)
+ except Exception as e:
+ return f"Error: {e}"
+
+
+@tool("Git commit")
+def git_commit(message: str, files: str = "") -> str:
+ """Commit changes. Optionally specify files (comma-separated)."""
+ ws = _require_workspace()
+ try:
+ file_list = [f.strip() for f in files.split(",") if f.strip()] or None
+ result = _run_async(_ws_manager.commit(ws, message, file_list))
+ return json.dumps(result)
+ except Exception as e:
+ return f"Error: {e}"
+
+
+@tool("Git log")
+def git_log(count: str = "10") -> str:
+ """Show recent commit history."""
+ ws = _require_workspace()
+ try:
+ commits = _run_async(_ws_manager.log(ws, int(count)))
+ return json.dumps(commits, indent=2)
+ except Exception as e:
+ return f"Error: {e}"
+
+
+# -----------------------------------------------------------------------
+# Shell command execution
+# -----------------------------------------------------------------------
+
+@tool("Run shell command")
+def run_command(command: str, timeout: str = "120") -> str:
+ """Run a shell command in the workspace directory.
+ Returns stdout, stderr, and exit code.
+ Examples: 'npm test', 'python -m pytest', 'make build', 'ls -la'."""
+ ws = _require_workspace()
+ try:
+ session = TerminalSession(workspace_path=ws.path)
+ result = _run_async(_executor.execute(session, command, int(timeout)))
+ output = f"Exit code: {result.exit_code}\n"
+ if result.stdout:
+ output += f"--- stdout ---\n{result.stdout}\n"
+ if result.stderr:
+ output += f"--- stderr ---\n{result.stderr}\n"
+ if result.timed_out:
+ output += "WARNING: Command timed out\n"
+ if result.truncated:
+ output += "WARNING: Output was truncated\n"
+ return output
+ except PermissionError as e:
+ return f"Permission denied: {e}"
+ except Exception as e:
+ return f"Error: {e}"
+
+
+# -----------------------------------------------------------------------
+# Exports
+# -----------------------------------------------------------------------
+
+LOCAL_FILE_TOOLS = [
+ read_local_file,
+ write_local_file,
+ delete_local_file,
+ list_local_files,
+ search_in_files,
+]
+
+LOCAL_GIT_TOOLS = [
+ git_diff,
+ git_status,
+ git_commit,
+ git_log,
+]
+
+LOCAL_SHELL_TOOLS = [
+ run_command,
+]
+
+LOCAL_TOOLS = LOCAL_FILE_TOOLS + LOCAL_GIT_TOOLS + LOCAL_SHELL_TOOLS
diff --git a/gitpilot/mcp_admin_api.py b/gitpilot/mcp_admin_api.py
new file mode 100644
index 0000000000000000000000000000000000000000..70bde48ab5b1be67bbaaac6187a518e8c01aadf8
--- /dev/null
+++ b/gitpilot/mcp_admin_api.py
@@ -0,0 +1,754 @@
+"""MCP Context Forge admin API.
+
+Powers the Settings → "MCP Servers" tab. Lets the user list, install,
+uninstall, enable/disable and test attached MCP servers, and toggle
+individual tools on a per-server basis.
+
+State is persisted to ``~/.gitpilot/mcp_servers.json`` so it survives
+restarts. The shape is intentionally small:
+
+.. code-block:: json
+
+ {
+ "version": 1,
+ "gateway_url": "http://localhost:4444/mcp",
+ "servers": {
+ "mcp-postgre-server": {
+ "installed": true,
+ "enabled": true,
+ "endpoint": "http://mcp-postgre-server:8080/mcp",
+ "auth_token_env": "MCP_POSTGRE_SERVER_TOKEN",
+ "tags": ["postgresql", "database"],
+ "tool_overrides": {"postgres.execute_write": false}
+ }
+ }
+ }
+
+Catalog data lives in
+``gitpilot/extensions/mcp_plugins//register.json`` so a release
+artefact ships with the curated set.
+"""
+from __future__ import annotations
+
+import json
+import logging
+import os
+import time
+from dataclasses import dataclass, field
+from pathlib import Path
+from typing import Any, Iterable
+from urllib.parse import urlparse
+
+import httpx
+from fastapi import APIRouter, HTTPException
+from pydantic import BaseModel, Field
+
+from gitpilot.mcp_plugin import (
+ KNOWN_SERVERS,
+ ForgeClientError,
+ MCPContextForgeClient,
+ PolicyEngine,
+ PolicyViolation,
+ get_settings,
+)
+
+logger = logging.getLogger(__name__)
+
+STATE_VERSION = 1
+STATE_FILE_ENV = "GITPILOT_MCP_STATE_FILE"
+DEFAULT_STATE_FILE = Path.home() / ".gitpilot" / "mcp_servers.json"
+
+# Per-tool risk classification used by the UI to colour the badges.
+RISK_HIGH = "high"
+RISK_MEDIUM = "medium"
+RISK_LOW = "low"
+
+DESTRUCTIVE_KEYWORDS = ("drop", "delete", "truncate", "remove", "destroy")
+MUTATION_KEYWORDS = ("insert", "update", "upsert", "create", "alter", "execute_write")
+
+# Which agents call which tool, used by the UI's "Used by" chip.
+AGENT_USES_TOOL: dict[str, tuple[str, ...]] = {
+ "postgres.list_databases": ("explorer",),
+ "postgres.list_schemas": ("explorer",),
+ "postgres.list_tables": ("explorer",),
+ "postgres.describe_table": ("coder", "test_runner", "reviewer"),
+ "postgres.safe_select": ("coder",),
+ "postgres.explain_query": ("reviewer",),
+ "postgres.validate_migration": ("reviewer",),
+ "postgres.generate_test_fixtures": ("test_runner",),
+ "postgres.generate_repository_context": ("coder",),
+ "milvus.list_collections": ("explorer",),
+ "milvus.describe_collection": ("coder", "test_runner"),
+ "milvus.describe_index": ("reviewer",),
+ "milvus.search": ("coder",),
+ "milvus.hybrid_search": ("coder",),
+ "milvus.validate_index_config": ("reviewer",),
+ "milvus.generate_ingestion_code": ("coder",),
+ "milvus.generate_rag_pipeline_context": ("coder",),
+ "milvus.generate_test_vectors": ("test_runner",),
+ "inspector.ping_server": ("reviewer",),
+ "inspector.list_capabilities": ("explorer",),
+ "inspector.validate_tool_schema": ("reviewer",),
+ "inspector.run_contract_tests": ("reviewer",),
+ "inspector.batch_validate": ("reviewer",),
+ "inspector.generate_report": ("reviewer",),
+ "inspector.list_logs": ("reviewer",),
+}
+
+
+# ---------------------------------------------------------------------------
+# State
+# ---------------------------------------------------------------------------
+def _state_path() -> Path:
+ override = os.environ.get(STATE_FILE_ENV)
+ return Path(override) if override else DEFAULT_STATE_FILE
+
+
+@dataclass
+class ServerState:
+ id: str
+ installed: bool = True
+ enabled: bool = True
+ endpoint: str = ""
+ auth_token_env: str = ""
+ description: str = ""
+ tags: list[str] = field(default_factory=list)
+ tool_overrides: dict[str, bool] = field(default_factory=dict)
+ # Sync metadata. Both fields are additive: pre-existing on-disk states
+ # without these keys deserialise with the safe defaults.
+ orphan: bool = False
+ source: str = "manual" # "manual" | "forge-sync" | "user" | "wizard"
+
+ def to_dict(self) -> dict[str, Any]:
+ return {
+ "installed": self.installed,
+ "enabled": self.enabled,
+ "endpoint": self.endpoint,
+ "auth_token_env": self.auth_token_env,
+ "description": self.description,
+ "tags": list(self.tags),
+ "tool_overrides": dict(self.tool_overrides),
+ "orphan": self.orphan,
+ "source": self.source,
+ }
+
+ @classmethod
+ def from_dict(cls, server_id: str, blob: dict[str, Any]) -> "ServerState":
+ return cls(
+ id=server_id,
+ installed=bool(blob.get("installed", True)),
+ enabled=bool(blob.get("enabled", True)),
+ endpoint=str(blob.get("endpoint", "")),
+ auth_token_env=str(blob.get("auth_token_env", "")),
+ description=str(blob.get("description", "")),
+ tags=list(blob.get("tags", []) or []),
+ tool_overrides=dict(blob.get("tool_overrides", {}) or {}),
+ orphan=bool(blob.get("orphan", False)),
+ source=str(blob.get("source", "manual") or "manual"),
+ )
+
+
+@dataclass
+class StoreSnapshot:
+ gateway_url: str
+ servers: dict[str, ServerState]
+
+
+class MCPStore:
+ """File-backed store with atomic writes."""
+
+ def __init__(self, path: Path | None = None) -> None:
+ self.path = path or _state_path()
+
+ def load(self) -> StoreSnapshot:
+ if not self.path.exists():
+ return self._seed_default()
+ try:
+ with self.path.open() as fh:
+ blob = json.load(fh)
+ except (OSError, json.JSONDecodeError) as exc:
+ logger.warning("invalid MCP state file %s, reseeding (%s)", self.path, exc)
+ return self._seed_default()
+
+ servers = {
+ sid: ServerState.from_dict(sid, payload)
+ for sid, payload in (blob.get("servers") or {}).items()
+ }
+ return StoreSnapshot(
+ gateway_url=str(
+ blob.get("gateway_url") or get_settings().gateway_url
+ ),
+ servers=servers,
+ )
+
+ def save(self, snapshot: StoreSnapshot) -> None:
+ self.path.parent.mkdir(parents=True, exist_ok=True)
+ body = {
+ "version": STATE_VERSION,
+ "gateway_url": snapshot.gateway_url,
+ "servers": {sid: s.to_dict() for sid, s in snapshot.servers.items()},
+ }
+ tmp = self.path.with_suffix(self.path.suffix + ".tmp")
+ with tmp.open("w") as fh:
+ json.dump(body, fh, indent=2, sort_keys=True)
+ tmp.replace(self.path)
+
+ def _seed_default(self) -> StoreSnapshot:
+ """Seed with the three known servers, all installed but disabled.
+
+ We do not auto-enable on first boot: the user must opt in from the
+ UI so they see the consent prompt and configure auth tokens before
+ any external tool is callable.
+ """
+ servers: dict[str, ServerState] = {}
+ for sid, entry in KNOWN_SERVERS.items():
+ servers[sid] = ServerState(
+ id=sid,
+ installed=True,
+ enabled=False,
+ endpoint=_default_endpoint(sid),
+ auth_token_env=_default_token_env(sid),
+ description=entry.description,
+ tags=list(entry.tags),
+ )
+ snapshot = StoreSnapshot(
+ gateway_url=get_settings().gateway_url, servers=servers
+ )
+ self.save(snapshot)
+ return snapshot
+
+
+def _default_endpoint(server_id: str) -> str:
+ port = {"mcp-postgre-server": 8080, "mcp-milvus-server": 8082, "mcp-inspector-server": 8081}
+ return f"http://{server_id}:{port.get(server_id, 8080)}/mcp"
+
+
+def _default_token_env(server_id: str) -> str:
+ return server_id.upper().replace("-", "_") + "_TOKEN"
+
+
+# ---------------------------------------------------------------------------
+# Catalog (read-only, packaged with GitPilot)
+# ---------------------------------------------------------------------------
+def _catalog_dir() -> Path:
+ # gitpilot/mcp_admin_api.py -> gitpilot/ ->
+ here = Path(__file__).resolve().parent.parent
+ return here / "extensions" / "mcp_plugins"
+
+
+def load_catalog() -> list[dict[str, Any]]:
+ items: list[dict[str, Any]] = []
+ base = _catalog_dir()
+ if not base.exists():
+ return items
+ for child in sorted(base.iterdir()):
+ if not child.is_dir():
+ continue
+ manifest = child / "register.json"
+ if not manifest.exists():
+ continue
+ try:
+ with manifest.open() as fh:
+ payload = json.load(fh)
+ except (OSError, json.JSONDecodeError):
+ continue
+ items.append(
+ {
+ "id": payload.get("name") or child.name,
+ "slug": child.name,
+ "description": payload.get("description", ""),
+ "tags": payload.get("tags", []),
+ "endpoint": payload.get("endpoint", ""),
+ "auth": payload.get("auth", {}),
+ "metadata": payload.get("metadata", {}),
+ "tool_policy": payload.get("tool_policy", {}),
+ }
+ )
+ return items
+
+
+# ---------------------------------------------------------------------------
+# Risk + tool aggregation
+# ---------------------------------------------------------------------------
+def classify_risk(tool_name: str) -> str:
+ lower = tool_name.lower()
+ if any(kw in lower for kw in DESTRUCTIVE_KEYWORDS):
+ return RISK_HIGH
+ if any(kw in lower for kw in MUTATION_KEYWORDS):
+ return RISK_MEDIUM
+ return RISK_LOW
+
+
+def _expected_tools(server_id: str) -> Iterable[str]:
+ entry = KNOWN_SERVERS.get(server_id)
+ return entry.expected_tools if entry else ()
+
+
+def _tool_payload(server_id: str, tool_name: str, override: bool | None) -> dict[str, Any]:
+ risk = classify_risk(tool_name)
+ return {
+ "name": tool_name,
+ "risk": risk,
+ "enabled_default": risk != RISK_HIGH,
+ "enabled": override if override is not None else risk != RISK_HIGH,
+ "used_by": list(AGENT_USES_TOOL.get(tool_name, ())),
+ "destructive": risk == RISK_HIGH,
+ "mutation": risk == RISK_MEDIUM,
+ }
+
+
+# ---------------------------------------------------------------------------
+# Live status (tries to talk to the forge; degrades gracefully)
+# ---------------------------------------------------------------------------
+def gateway_base_url(gateway_url: str) -> str:
+ """Return the gateway origin without the ``/mcp`` transport suffix.
+
+ The settings string ``gateway_url`` is the canonical address for MCP
+ JSON-RPC traffic (always ends in ``/mcp``), but admin endpoints --
+ ``/health``, ``/gateways``, ``/admin/...`` -- live at the origin.
+ Strip the suffix once, here, so callers always have a clean base.
+ """
+ cleaned = gateway_url.rstrip("/")
+ if cleaned.endswith("/mcp"):
+ cleaned = cleaned[: -len("/mcp")]
+ return cleaned
+
+
+async def gateway_status(gateway_url: str, auth_token: str) -> dict[str, Any]:
+ base = gateway_base_url(gateway_url)
+ try:
+ async with httpx.AsyncClient(
+ timeout=3.0,
+ headers={"Authorization": f"Bearer {auth_token}"} if auth_token else {},
+ ) as http:
+ resp = await http.get(f"{base}/health")
+ if resp.status_code == 200:
+ # Try to surface the gateway's reported version + tool count
+ # if it returns JSON; fall back to the raw text otherwise.
+ try:
+ return {"reachable": True, "detail": resp.json()}
+ except ValueError:
+ return {"reachable": True, "detail": {"status": resp.text[:120]}}
+ return {"reachable": False, "detail": f"status={resp.status_code}"}
+ except httpx.HTTPError as exc:
+ return {"reachable": False, "detail": str(exc)}
+
+
+# ---------------------------------------------------------------------------
+# Pydantic request bodies
+# ---------------------------------------------------------------------------
+class InstallRequest(BaseModel):
+ server_id: str = Field(..., description="catalog slug or full server id")
+ endpoint: str | None = None
+ auth_token_env: str | None = None
+ description: str | None = None
+ tags: list[str] | None = None
+
+
+class CustomInstallRequest(BaseModel):
+ register_json: dict[str, Any] = Field(
+ ..., description="A Context Forge register.json document."
+ )
+
+
+class ToolToggleRequest(BaseModel):
+ enabled: bool
+
+
+# ---------------------------------------------------------------------------
+# Router
+# ---------------------------------------------------------------------------
+router = APIRouter(prefix="/api/mcp", tags=["mcp-admin"])
+
+
+def _store() -> MCPStore:
+ return MCPStore()
+
+
+@router.get("/status")
+async def get_status() -> dict[str, Any]:
+ store = _store()
+ snapshot = store.load()
+ settings = get_settings()
+ gateway = await gateway_status(snapshot.gateway_url, settings.auth_token)
+
+ installed = sum(1 for s in snapshot.servers.values() if s.installed)
+ enabled = sum(1 for s in snapshot.servers.values() if s.installed and s.enabled)
+ expected_tool_count = sum(
+ len(list(_expected_tools(sid)))
+ for sid, s in snapshot.servers.items()
+ if s.installed and s.enabled
+ )
+
+ return {
+ "gateway_url": snapshot.gateway_url,
+ "gateway_reachable": gateway["reachable"],
+ "gateway_detail": gateway["detail"],
+ "plugin_enabled": settings.enabled,
+ "servers_installed": installed,
+ "servers_enabled": enabled,
+ "tools_advertised": expected_tool_count,
+ }
+
+
+@router.get("/servers")
+async def list_servers() -> dict[str, Any]:
+ snapshot = _store().load()
+ out: list[dict[str, Any]] = []
+ for sid, s in snapshot.servers.items():
+ tools = [
+ _tool_payload(sid, name, s.tool_overrides.get(name))
+ for name in _expected_tools(sid)
+ ]
+ out.append(
+ {
+ "id": sid,
+ "installed": s.installed,
+ "enabled": s.enabled,
+ "endpoint": s.endpoint,
+ "auth_token_env": s.auth_token_env,
+ "description": s.description,
+ "tags": s.tags,
+ "tool_count": len(tools),
+ "tools": tools,
+ "is_known": sid in KNOWN_SERVERS,
+ "orphan": s.orphan,
+ "source": s.source,
+ }
+ )
+ out.sort(key=lambda x: x["id"])
+ return {"servers": out}
+
+
+@router.get("/catalog")
+async def list_catalog() -> dict[str, Any]:
+ catalog = load_catalog()
+ snapshot = _store().load()
+ for item in catalog:
+ item["installed"] = item["id"] in snapshot.servers and snapshot.servers[
+ item["id"]
+ ].installed
+ return {"items": catalog}
+
+
+@router.post("/servers/install")
+async def install_server(req: InstallRequest) -> dict[str, Any]:
+ catalog = {item["id"]: item for item in load_catalog()}
+ base = catalog.get(req.server_id)
+ if base is None and req.endpoint is None:
+ raise HTTPException(
+ status_code=404,
+ detail=f"server {req.server_id!r} not in catalog (provide endpoint to install custom)",
+ )
+
+ snapshot = _store().load()
+ server = ServerState(
+ id=req.server_id,
+ installed=True,
+ enabled=False,
+ endpoint=req.endpoint or (base["endpoint"] if base else ""),
+ auth_token_env=req.auth_token_env
+ or (base.get("auth", {}).get("env", "") if base else ""),
+ description=req.description or (base["description"] if base else ""),
+ tags=list(req.tags or (base["tags"] if base else [])),
+ )
+ snapshot.servers[req.server_id] = server
+ _store().save(snapshot)
+ return {"ok": True, "server": server.to_dict() | {"id": server.id}}
+
+
+@router.post("/servers/install-custom")
+async def install_custom(req: CustomInstallRequest) -> dict[str, Any]:
+ rj = req.register_json
+ server_id = str(rj.get("name") or "")
+ endpoint = str(rj.get("endpoint") or "")
+ if not server_id or not endpoint:
+ raise HTTPException(
+ status_code=422, detail="register_json must include 'name' and 'endpoint'"
+ )
+
+ snapshot = _store().load()
+ snapshot.servers[server_id] = ServerState(
+ id=server_id,
+ installed=True,
+ enabled=False,
+ endpoint=endpoint,
+ auth_token_env=str(rj.get("auth", {}).get("env", "")),
+ description=str(rj.get("description", "")),
+ tags=list(rj.get("tags", []) or []),
+ )
+ _store().save(snapshot)
+ return {"ok": True, "server": snapshot.servers[server_id].to_dict() | {"id": server_id}}
+
+
+@router.post("/servers/{server_id}/uninstall")
+async def uninstall_server(server_id: str) -> dict[str, Any]:
+ store = _store()
+ snapshot = store.load()
+ if server_id not in snapshot.servers:
+ raise HTTPException(status_code=404, detail=f"server {server_id!r} not installed")
+ snapshot.servers.pop(server_id)
+ store.save(snapshot)
+ return {"ok": True}
+
+
+@router.post("/servers/{server_id}/enable")
+async def enable_server(server_id: str) -> dict[str, Any]:
+ return _toggle_server(server_id, enabled=True)
+
+
+@router.post("/servers/{server_id}/disable")
+async def disable_server(server_id: str) -> dict[str, Any]:
+ return _toggle_server(server_id, enabled=False)
+
+
+def _toggle_server(server_id: str, *, enabled: bool) -> dict[str, Any]:
+ store = _store()
+ snapshot = store.load()
+ if server_id not in snapshot.servers:
+ raise HTTPException(status_code=404, detail=f"server {server_id!r} not installed")
+ snapshot.servers[server_id].enabled = enabled
+ store.save(snapshot)
+ return {"ok": True, "enabled": enabled}
+
+
+@router.post("/servers/{server_id}/tools/{tool_name}/toggle")
+async def toggle_tool(server_id: str, tool_name: str, req: ToolToggleRequest) -> dict[str, Any]:
+ store = _store()
+ snapshot = store.load()
+ if server_id not in snapshot.servers:
+ raise HTTPException(status_code=404, detail=f"server {server_id!r} not installed")
+
+ # Block client-side policy violations *before* persisting an enable.
+ if req.enabled and PolicyEngine().is_allowed(tool_name, {})[0] is False:
+ raise HTTPException(
+ status_code=409,
+ detail=f"tool {tool_name!r} blocked by policy; cannot be enabled",
+ )
+
+ snapshot.servers[server_id].tool_overrides[tool_name] = req.enabled
+ store.save(snapshot)
+ return {"ok": True, "tool": tool_name, "enabled": req.enabled}
+
+
+# ---------------------------------------------------------------------------
+# Two-network-context endpoint model
+# ---------------------------------------------------------------------------
+# Forge stores server endpoints as docker-network URLs (e.g.
+# "http://mcp-postgre-server:8080/mcp") because that is the URL FORGE
+# uses to call back into the federated server. The GitPilot backend,
+# however, runs on the WSL/macOS/Linux *host* — it cannot resolve docker
+# service names. So the same canonical endpoint maps to a different
+# *probe* URL depending on which side of the network seam you call from.
+#
+# Single source of truth for the host-side ports: docker-compose.mcp.yml
+# publishes them via env vars in .mcp.env (MCP_FORGE_PORT etc.). We
+# read those same env vars here so there is no place the two views can
+# drift.
+#
+# This mirrors the k8s ClusterIP / NodePort split, Consul's
+# `service.address` vs `service.taggedAddresses`, and Linkerd's
+# in-cluster-vs-ingress URL distinction.
+
+# Compose service-name → (env var with published host port, default port).
+DOCKER_SERVICE_TO_HOST_PORT: dict[str, tuple[str, int]] = {
+ "mcp-context-forge": ("MCP_FORGE_PORT", 4444),
+ "mcp-postgre-server": ("MCP_POSTGRE_PORT", 8080),
+ "mcp-milvus-server": ("MCP_MILVUS_PORT", 8082),
+ "mcp-inspector-server": ("MCP_INSPECTOR_PORT", 8081),
+}
+
+
+def to_host_url(in_network_url: str) -> str:
+ """Translate a docker-network URL to its host-reachable form.
+
+ ``http://mcp-postgre-server:8080/mcp`` → ``http://localhost:8080/mcp``
+
+ Idempotent: a URL whose host is already ``localhost``, ``127.0.0.1``,
+ or any name not in :data:`DOCKER_SERVICE_TO_HOST_PORT` is returned
+ unchanged. This keeps user-added custom servers (which point at
+ real DNS / IP) untouched.
+
+ Defensive: a malformed URL falls through unchanged so probes can
+ surface the original failure rather than a translation crash.
+ """
+ try:
+ parsed = urlparse(in_network_url)
+ except (TypeError, ValueError):
+ return in_network_url
+ host = parsed.hostname
+ if host is None or host not in DOCKER_SERVICE_TO_HOST_PORT:
+ return in_network_url
+ env_var, default_port = DOCKER_SERVICE_TO_HOST_PORT[host]
+ try:
+ port = int(os.environ.get(env_var, str(default_port)))
+ except (TypeError, ValueError):
+ port = default_port
+ new_netloc = f"localhost:{port}"
+ return parsed._replace(netloc=new_netloc).geturl()
+
+
+async def _direct_health_probe(endpoint: str, timeout: float = 3.0) -> dict[str, Any]:
+ """Hit ``/health`` directly, bypassing Forge.
+
+ The MCP server endpoints stored locally end in ``/mcp`` (the JSON-RPC
+ transport path). The container's liveness probe lives at ``/health``
+ on the same host:port. We strip the trailing ``/mcp`` and probe.
+
+ The endpoint is first translated through :func:`to_host_url` so that
+ docker-network service names (which the GitPilot backend running on
+ the host cannot resolve) become ``localhost:``.
+
+ Best practice: per-server diagnostics talk to the server itself,
+ *not* through a federation gateway. Gateway proxying is a separate
+ failure surface and a separate "Test Forge" concern.
+ """
+ # Translate docker-network URLs to host-reachable URLs.
+ endpoint = to_host_url(endpoint)
+ base = endpoint.rstrip("/")
+ if base.endswith("/mcp"):
+ base = base[: -len("/mcp")]
+ url = f"{base}/health"
+ started = time.monotonic()
+ try:
+ async with httpx.AsyncClient(timeout=timeout) as http:
+ resp = await http.get(url)
+ elapsed_ms = int((time.monotonic() - started) * 1000)
+ if resp.status_code == 200:
+ try:
+ detail = resp.json()
+ except ValueError:
+ detail = {"raw": resp.text[:120]}
+ return {
+ "ok": True,
+ "status_code": 200,
+ "response_time_ms": elapsed_ms,
+ "detail": detail,
+ "probed": url,
+ }
+ return {
+ "ok": False,
+ "status_code": resp.status_code,
+ "response_time_ms": elapsed_ms,
+ "reason": f"HTTP {resp.status_code}",
+ "probed": url,
+ }
+ except httpx.HTTPError as exc:
+ return {
+ "ok": False,
+ "response_time_ms": int((time.monotonic() - started) * 1000),
+ "reason": str(exc),
+ "probed": url,
+ }
+
+
+@router.post("/servers/{server_id}/test")
+async def test_server(server_id: str) -> dict[str, Any]:
+ """Direct health probe of the MCP server, bypassing Forge.
+
+ Forge proxying is a separate failure mode (auth, federation
+ config, network) that we don't want to conflate with "is this
+ server alive". The card's Test button answers the simpler, more
+ actionable question first; "is Forge proxying it" is surfaced by
+ the gateway dot in the header and by the Sync flow.
+ """
+ snapshot = _store().load()
+ server = snapshot.servers.get(server_id)
+ if server is None:
+ raise HTTPException(status_code=404, detail="server not installed")
+
+ return await _direct_health_probe(server.endpoint)
+
+
+# ---------------------------------------------------------------------------
+# Sync (pull-from-Forge)
+# ---------------------------------------------------------------------------
+# Last successful SyncReport, kept in process memory so the UI can
+# answer GET /api/mcp/sync/status without re-running the reconcile.
+_LAST_SYNC: dict[str, Any] | None = None
+
+
+class SyncRequest(BaseModel):
+ """Optional overrides for one-off syncs (CI, ad-hoc CLI)."""
+
+ forge_url: str | None = Field(
+ default=None,
+ description="Override the configured Forge URL for this call only.",
+ )
+ list_path_override: str = Field(
+ default="",
+ description="Override Forge's list endpoint (default: try a few common paths).",
+ )
+
+
+@router.post("/sync")
+async def trigger_sync(req: SyncRequest | None = None) -> dict[str, Any]:
+ """Pull the Forge registry and reconcile against the local store.
+
+ Idempotent: re-running produces the same SyncReport for the same
+ Forge state. Never raises; transport failures surface as
+ ``forge_unreachable=true``.
+ """
+ global _LAST_SYNC
+ # Local import to keep mcp_admin_api importable when mcp_forge_sync
+ # would otherwise pull httpx in at startup.
+ from .mcp_forge_sync import sync as run_sync
+
+ body = req or SyncRequest()
+ report = await run_sync(
+ store=_store(),
+ forge_url=body.forge_url,
+ list_path_override=body.list_path_override,
+ )
+ _LAST_SYNC = report.to_dict()
+ return _LAST_SYNC
+
+
+@router.get("/agent_tools")
+async def agent_tools(include_mutation: bool = False) -> dict[str, Any]:
+ """Return the toolbox the GitPilot agents currently see.
+
+ Reads from the same store that the MCP Servers tab writes to, so the
+ UI's "what can I use right now" view stays in sync with what the
+ planner / coder / reviewer get when they're invoked.
+ """
+ from .mcp_tools_bridge import describe_available_tools
+
+ tools = describe_available_tools(include_mutation=include_mutation)
+ return {"count": len(tools), "tools": tools}
+
+
+@router.get("/sync/status")
+async def sync_status() -> dict[str, Any]:
+ """Return the last sync's report (or a placeholder if never synced)."""
+ if _LAST_SYNC is None:
+ return {
+ "ran": False,
+ "added": [],
+ "kept": [],
+ "orphaned": [],
+ "forge_unreachable": False,
+ }
+ return {"ran": True, **_LAST_SYNC}
+
+
+@router.post("/servers/{server_id}/forget")
+async def forget_orphan(server_id: str) -> dict[str, Any]:
+ """Remove a server that's been flagged orphan.
+
+ The "Forget" UI action. Refuses to forget non-orphan servers so the
+ user can never accidentally delete an actively-syncing entry.
+ """
+ store = _store()
+ snapshot = store.load()
+ server = snapshot.servers.get(server_id)
+ if server is None:
+ raise HTTPException(status_code=404, detail="server not installed")
+ if not server.orphan:
+ raise HTTPException(
+ status_code=409,
+ detail="server is not flagged orphan; use the Uninstall action instead",
+ )
+ snapshot.servers.pop(server_id)
+ store.save(snapshot)
+ return {"ok": True, "forgotten": server_id}
diff --git a/gitpilot/mcp_client.py b/gitpilot/mcp_client.py
new file mode 100644
index 0000000000000000000000000000000000000000..3c248d9fb58c0e818e270bc35e30b63fecb6cc06
--- /dev/null
+++ b/gitpilot/mcp_client.py
@@ -0,0 +1,341 @@
+# gitpilot/mcp_client.py
+"""Model Context Protocol (MCP) client for GitPilot.
+
+Connects to MCP servers (databases, Slack, Figma, Sentry, etc.) and
+exposes their tools to GitPilot agents. Supports three transport types:
+
+- **stdio** — launch a local subprocess and communicate via stdin/stdout
+- **http** — send JSON-RPC requests over HTTP
+- **sse** — Server-Sent Events streaming connection
+
+Configuration lives in ``.gitpilot/mcp.json``.
+"""
+from __future__ import annotations
+
+import asyncio
+import json
+import logging
+import os
+from dataclasses import dataclass, field
+from enum import Enum
+from pathlib import Path
+from typing import Any, Dict, List, Optional
+
+import httpx
+
+logger = logging.getLogger(__name__)
+
+MCP_CONFIG_FILE = "mcp.json"
+MCP_JSONRPC_VERSION = "2.0"
+
+
+class TransportType(str, Enum):
+ STDIO = "stdio"
+ HTTP = "http"
+ SSE = "sse"
+
+
+@dataclass
+class MCPServerConfig:
+ """Configuration for a single MCP server."""
+
+ name: str
+ transport: TransportType
+ # stdio
+ command: Optional[str] = None
+ args: List[str] = field(default_factory=list)
+ env: Dict[str, str] = field(default_factory=dict)
+ # http / sse
+ url: Optional[str] = None
+ headers: Dict[str, str] = field(default_factory=dict)
+ # auth
+ auth_token: Optional[str] = None
+
+ @classmethod
+ def from_dict(cls, data: dict) -> "MCPServerConfig":
+ transport = TransportType(data.get("type", data.get("transport", "stdio")))
+ env = {}
+ for k, v in data.get("env", {}).items():
+ # Expand $ENV_VAR references
+ env[k] = os.path.expandvars(v) if isinstance(v, str) else v
+ return cls(
+ name=data["name"],
+ transport=transport,
+ command=data.get("command"),
+ args=[os.path.expandvars(a) for a in data.get("args", [])],
+ env=env,
+ url=data.get("url"),
+ headers=data.get("headers", {}),
+ auth_token=os.path.expandvars(data["auth_token"]) if data.get("auth_token") else None,
+ )
+
+
+@dataclass
+class MCPTool:
+ """A tool discovered from an MCP server."""
+
+ name: str
+ description: str
+ input_schema: Dict[str, Any] = field(default_factory=dict)
+ server_name: str = ""
+
+
+@dataclass
+class MCPConnection:
+ """An active connection to an MCP server."""
+
+ config: MCPServerConfig
+ tools: List[MCPTool] = field(default_factory=list)
+ _process: Optional[asyncio.subprocess.Process] = field(default=None, repr=False)
+ _request_id: int = field(default=0, repr=False)
+
+ @property
+ def is_alive(self) -> bool:
+ if self.config.transport == TransportType.STDIO:
+ return self._process is not None and self._process.returncode is None
+ return True # HTTP/SSE are stateless per-request
+
+ def next_id(self) -> int:
+ self._request_id += 1
+ return self._request_id
+
+
+class MCPClient:
+ """Connect to MCP servers and call their tools.
+
+ Usage::
+
+ client = MCPClient()
+ client.load_config(workspace / ".gitpilot")
+ conn = await client.connect("postgres")
+ tools = await client.list_tools(conn)
+ result = await client.call_tool(conn, "query", {"sql": "SELECT 1"})
+ """
+
+ def __init__(self) -> None:
+ self._configs: Dict[str, MCPServerConfig] = {}
+ self._connections: Dict[str, MCPConnection] = {}
+
+ # ------------------------------------------------------------------
+ # Configuration
+ # ------------------------------------------------------------------
+
+ def load_config(self, gitpilot_dir: Path) -> int:
+ """Load MCP server configs from ``.gitpilot/mcp.json``. Returns count."""
+ config_path = gitpilot_dir / MCP_CONFIG_FILE
+ if not config_path.exists():
+ return 0
+ try:
+ data = json.loads(config_path.read_text())
+ servers = data if isinstance(data, list) else data.get("servers", [])
+ for entry in servers:
+ cfg = MCPServerConfig.from_dict(entry)
+ self._configs[cfg.name] = cfg
+ logger.info("Loaded %d MCP server configs", len(servers))
+ return len(servers)
+ except Exception as e:
+ logger.warning("Failed to load MCP config: %s", e)
+ return 0
+
+ def add_server(self, config: MCPServerConfig) -> None:
+ self._configs[config.name] = config
+
+ def list_servers(self) -> List[str]:
+ return list(self._configs.keys())
+
+ # ------------------------------------------------------------------
+ # Connection management
+ # ------------------------------------------------------------------
+
+ async def connect(self, server_name: str) -> MCPConnection:
+ """Connect to a named MCP server and discover its tools."""
+ if server_name in self._connections and self._connections[server_name].is_alive:
+ return self._connections[server_name]
+
+ config = self._configs.get(server_name)
+ if not config:
+ raise ValueError(f"Unknown MCP server: {server_name}")
+
+ conn = MCPConnection(config=config)
+
+ if config.transport == TransportType.STDIO:
+ await self._connect_stdio(conn)
+
+ # Discover tools via initialize + tools/list
+ await self._initialize(conn)
+ conn.tools = await self.list_tools(conn)
+
+ self._connections[server_name] = conn
+ logger.info("Connected to MCP server '%s' — %d tools", server_name, len(conn.tools))
+ return conn
+
+ async def disconnect(self, server_name: str) -> None:
+ conn = self._connections.pop(server_name, None)
+ if conn and conn._process:
+ conn._process.terminate()
+ await conn._process.wait()
+
+ async def disconnect_all(self) -> None:
+ for name in list(self._connections):
+ await self.disconnect(name)
+
+ # ------------------------------------------------------------------
+ # Tool operations
+ # ------------------------------------------------------------------
+
+ async def list_tools(self, conn: MCPConnection) -> List[MCPTool]:
+ """List tools available on the connected server."""
+ result = await self._send_request(conn, "tools/list", {})
+ tools = []
+ for t in result.get("tools", []):
+ tools.append(MCPTool(
+ name=t["name"],
+ description=t.get("description", ""),
+ input_schema=t.get("inputSchema", {}),
+ server_name=conn.config.name,
+ ))
+ return tools
+
+ async def call_tool(
+ self,
+ conn: MCPConnection,
+ tool_name: str,
+ params: Optional[Dict[str, Any]] = None,
+ ) -> Any:
+ """Call a tool on the connected server."""
+ result = await self._send_request(conn, "tools/call", {
+ "name": tool_name,
+ "arguments": params or {},
+ })
+ # MCP returns content array; flatten text content
+ content = result.get("content", [])
+ texts = [c.get("text", "") for c in content if c.get("type") == "text"]
+ return "\n".join(texts) if texts else result
+
+ def to_crewai_tools(self, conn: MCPConnection) -> list:
+ """Wrap MCP tools as CrewAI-compatible tool functions.
+
+ Returns a list of callables decorated with ``@tool``.
+ """
+ from crewai.tools import tool as crewai_tool
+
+ wrapped = []
+ for mcp_tool in conn.tools:
+ # Capture in closure
+ _conn = conn
+ _name = mcp_tool.name
+ _desc = mcp_tool.description or f"MCP tool: {_name}"
+
+ @crewai_tool(_name)
+ def _wrapper(params: str = "{}") -> str:
+ __doc__ = _desc # noqa: F841
+ import asyncio as _aio
+ loop = _aio.new_event_loop()
+ try:
+ parsed = json.loads(params) if isinstance(params, str) else params
+ return str(loop.run_until_complete(
+ MCPClient.call_tool(self, _conn, _name, parsed)
+ ))
+ finally:
+ loop.close()
+
+ _wrapper.__doc__ = _desc
+ wrapped.append(_wrapper)
+ return wrapped
+
+ # ------------------------------------------------------------------
+ # Transport internals
+ # ------------------------------------------------------------------
+
+ async def _connect_stdio(self, conn: MCPConnection) -> None:
+ config = conn.config
+ if not config.command:
+ raise ValueError(f"stdio server '{config.name}' requires a command")
+ env = {**os.environ, **config.env}
+ conn._process = await asyncio.create_subprocess_exec(
+ config.command, *config.args,
+ stdin=asyncio.subprocess.PIPE,
+ stdout=asyncio.subprocess.PIPE,
+ stderr=asyncio.subprocess.PIPE,
+ env=env,
+ )
+
+ async def _initialize(self, conn: MCPConnection) -> Dict[str, Any]:
+ return await self._send_request(conn, "initialize", {
+ "protocolVersion": "2024-11-05",
+ "capabilities": {},
+ "clientInfo": {"name": "gitpilot", "version": "1.0"},
+ })
+
+ async def _send_request(
+ self,
+ conn: MCPConnection,
+ method: str,
+ params: Dict[str, Any],
+ ) -> Dict[str, Any]:
+ """Send a JSON-RPC request via the appropriate transport."""
+ msg = {
+ "jsonrpc": MCP_JSONRPC_VERSION,
+ "id": conn.next_id(),
+ "method": method,
+ "params": params,
+ }
+
+ if conn.config.transport == TransportType.STDIO:
+ return await self._send_stdio(conn, msg)
+ else:
+ return await self._send_http(conn, msg)
+
+ async def _send_stdio(
+ self, conn: MCPConnection, msg: dict,
+ ) -> Dict[str, Any]:
+ proc = conn._process
+ if not proc or not proc.stdin or not proc.stdout:
+ raise RuntimeError(f"stdio process not running for '{conn.config.name}'")
+ payload = json.dumps(msg) + "\n"
+ proc.stdin.write(payload.encode())
+ await proc.stdin.drain()
+ line = await proc.stdout.readline()
+ if not line:
+ raise RuntimeError(f"No response from MCP server '{conn.config.name}'")
+ resp = json.loads(line)
+ if "error" in resp:
+ raise RuntimeError(f"MCP error: {resp['error']}")
+ return resp.get("result", {})
+
+ async def _send_http(
+ self, conn: MCPConnection, msg: dict,
+ ) -> Dict[str, Any]:
+ url = conn.config.url
+ if not url:
+ raise ValueError(f"HTTP server '{conn.config.name}' requires a url")
+ headers = {**conn.config.headers, "Content-Type": "application/json"}
+ if conn.config.auth_token:
+ headers["Authorization"] = f"Bearer {conn.config.auth_token}"
+ async with httpx.AsyncClient(timeout=30) as client:
+ resp = await client.post(url, json=msg, headers=headers)
+ resp.raise_for_status()
+ data = resp.json()
+ if "error" in data:
+ raise RuntimeError(f"MCP error: {data['error']}")
+ return data.get("result", {})
+
+ # ------------------------------------------------------------------
+ # Serialisation
+ # ------------------------------------------------------------------
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "servers": [
+ {
+ "name": c.name,
+ "transport": c.transport.value,
+ "command": c.command,
+ "url": c.url,
+ "tools_count": len(self._connections[c.name].tools)
+ if c.name in self._connections else 0,
+ "connected": c.name in self._connections,
+ }
+ for c in self._configs.values()
+ ]
+ }
diff --git a/gitpilot/mcp_forge_sync.py b/gitpilot/mcp_forge_sync.py
new file mode 100644
index 0000000000000000000000000000000000000000..3af4bce0b909b2a2b69c20c850a156b89ed0dc4a
--- /dev/null
+++ b/gitpilot/mcp_forge_sync.py
@@ -0,0 +1,313 @@
+"""Reconcile loop: pull the MCP server registry from MCP Context Forge.
+
+This is the canonical implementation for both ``POST /api/mcp/sync`` (UI
+button + ``make sync-mcp``) and any future scheduled background job.
+
+Design contract (industry / GitOps best practices applied)
+----------------------------------------------------------
+
+* **Pull-only.** GitPilot never writes back to Forge. Forge is the
+ source of truth for "what servers exist"; GitPilot keeps the local
+ user-state (`enabled`, `tool_overrides`, `orphan` flag) in
+ ``mcp_admin_api.MCPStore``.
+* **Idempotent.** ``sync()`` is a pure function from ``forge_state ->
+ local_state``. Re-running it produces the same result. Tests rely on
+ this property.
+* **Non-destructive.** Servers that disappear from Forge are flagged
+ ``orphan=True`` and kept locally; the user has a "Forget" action in
+ the UI but nothing is auto-deleted.
+* **User-state preservation.** Every reconcile re-applies
+ ``tool_overrides`` and ``enabled`` from the previous local state. A
+ sync never silently disables a server the user just turned on.
+* **Custom servers respected.** Servers added via the wizard's "Custom"
+ tab carry ``source="user"``. They never get the orphan badge even
+ when missing from Forge.
+* **Observable.** The :class:`SyncReport` returned is the full
+ structured result -- counts, ids, correlation id, timestamp -- so the
+ UI banner and audit logs share one source.
+"""
+from __future__ import annotations
+
+import logging
+import os
+from dataclasses import asdict, dataclass, field
+from datetime import datetime, timezone
+from typing import Any, Iterable, Mapping
+from uuid import uuid4
+
+import httpx
+
+from gitpilot.mcp_admin_api import MCPStore, ServerState, StoreSnapshot
+
+logger = logging.getLogger(__name__)
+
+# ---------------------------------------------------------------------------
+# Settings
+# ---------------------------------------------------------------------------
+ENV_FORGE_URL = "GITPILOT_MCP_FORGE_URL"
+ENV_FORGE_TOKEN = "GITPILOT_MCP_FORGE_TOKEN"
+ENV_FORGE_LIST_PATH = "GITPILOT_MCP_FORGE_LIST_PATH"
+DEFAULT_FORGE_URL = "http://localhost:4444"
+# Forge's admin API; configurable so users on a customised gateway can override.
+DEFAULT_LIST_PATHS: tuple[str, ...] = (
+ "/admin/servers",
+ "/registry/servers",
+ "/api/servers",
+)
+
+
+def _settings() -> dict[str, str]:
+ return {
+ "url": os.environ.get(ENV_FORGE_URL, DEFAULT_FORGE_URL).rstrip("/"),
+ "token": os.environ.get(ENV_FORGE_TOKEN, ""),
+ "list_path": os.environ.get(ENV_FORGE_LIST_PATH, ""),
+ }
+
+
+# ---------------------------------------------------------------------------
+# DTOs
+# ---------------------------------------------------------------------------
+@dataclass(frozen=True)
+class ForgeServer:
+ """One server entry as advertised by Forge.
+
+ Forge schemas vary slightly by version; we keep this dataclass as a
+ forgiving normalised view. Unknown fields are dropped silently
+ (forward-compat) -- see :func:`_normalise`.
+ """
+ id: str
+ endpoint: str
+ description: str = ""
+ tags: tuple[str, ...] = ()
+ auth_token_env: str = ""
+
+
+@dataclass
+class SyncReport:
+ """Structured outcome of one reconcile pass."""
+
+ added: list[str] = field(default_factory=list)
+ kept: list[str] = field(default_factory=list)
+ orphaned: list[str] = field(default_factory=list)
+ forge_unreachable: bool = False
+ error: str | None = None
+ correlation_id: str = field(default_factory=lambda: uuid4().hex)
+ timestamp: str = field(
+ default_factory=lambda: datetime.now(timezone.utc).isoformat()
+ )
+
+ def to_dict(self) -> dict[str, Any]:
+ return asdict(self)
+
+ @property
+ def total(self) -> int:
+ return len(self.added) + len(self.kept) + len(self.orphaned)
+
+
+# ---------------------------------------------------------------------------
+# Normalisation
+# ---------------------------------------------------------------------------
+def _normalise(payload: Mapping[str, Any]) -> ForgeServer | None:
+ """Extract a :class:`ForgeServer` from a Forge dict.
+
+ Returns ``None`` if the entry lacks ``name`` or ``endpoint``; we'd
+ rather drop a malformed row than crash the reconcile.
+ """
+ server_id = (
+ payload.get("name")
+ or payload.get("id")
+ or payload.get("server_name")
+ )
+ endpoint = (
+ payload.get("endpoint")
+ or payload.get("url")
+ or payload.get("uri")
+ )
+ if not server_id or not endpoint:
+ return None
+ raw_tags = payload.get("tags") or []
+ auth = payload.get("auth") or {}
+ return ForgeServer(
+ id=str(server_id),
+ endpoint=str(endpoint),
+ description=str(payload.get("description") or ""),
+ tags=tuple(str(t) for t in raw_tags if isinstance(t, str)),
+ auth_token_env=str(auth.get("env") or auth.get("token_env") or ""),
+ )
+
+
+def _extract_servers(body: Any) -> list[ForgeServer]:
+ """Forge variants return ``{"servers": [...]}`` or a bare array.
+
+ Both shapes are handled; anything else returns an empty list.
+ """
+ if isinstance(body, dict):
+ items = body.get("servers") or body.get("items") or []
+ elif isinstance(body, list):
+ items = body
+ else:
+ items = []
+ return [s for s in (_normalise(it) for it in items if isinstance(it, dict)) if s]
+
+
+# ---------------------------------------------------------------------------
+# HTTP fetch
+# ---------------------------------------------------------------------------
+async def fetch_forge_registry(
+ *,
+ url: str,
+ token: str,
+ list_path_override: str = "",
+ http_client: httpx.AsyncClient | None = None,
+ timeout_seconds: float = 5.0,
+) -> list[ForgeServer]:
+ """Fetch + normalise the Forge server list.
+
+ Tries ``list_path_override`` first when supplied; otherwise iterates
+ :data:`DEFAULT_LIST_PATHS` until one returns 200. Raises
+ :class:`httpx.HTTPError` only when *every* candidate fails.
+ """
+ headers = {"Accept": "application/json"}
+ if token:
+ headers["Authorization"] = f"Bearer {token}"
+
+ candidates: tuple[str, ...] = (
+ (list_path_override,) if list_path_override else DEFAULT_LIST_PATHS
+ )
+
+ own_client = http_client is None
+ client = http_client or httpx.AsyncClient(timeout=timeout_seconds)
+ last_exc: Exception | None = None
+ try:
+ for path in candidates:
+ full = f"{url.rstrip('/')}{path}"
+ try:
+ resp = await client.get(full, headers=headers)
+ if resp.status_code == 200:
+ return _extract_servers(resp.json())
+ # 404 = wrong path; try the next candidate.
+ if resp.status_code != 404:
+ last_exc = httpx.HTTPStatusError(
+ f"{full} -> HTTP {resp.status_code}",
+ request=resp.request,
+ response=resp,
+ )
+ except httpx.HTTPError as exc:
+ last_exc = exc
+ if last_exc is not None:
+ raise last_exc
+ return []
+ finally:
+ if own_client:
+ await client.aclose()
+
+
+# ---------------------------------------------------------------------------
+# Reconcile (pure)
+# ---------------------------------------------------------------------------
+def reconcile(
+ snapshot: StoreSnapshot,
+ forge_servers: Iterable[ForgeServer],
+) -> tuple[StoreSnapshot, SyncReport]:
+ """Apply ``forge_servers`` onto ``snapshot``.
+
+ Pure: identical inputs produce identical outputs. Tests rely on
+ this. The caller saves the returned snapshot.
+ """
+ forge_by_id: dict[str, ForgeServer] = {fs.id: fs for fs in forge_servers}
+ report = SyncReport()
+
+ # --- additions / refreshes -------------------------------------------
+ for fid, fserver in forge_by_id.items():
+ if fid not in snapshot.servers:
+ snapshot.servers[fid] = ServerState(
+ id=fid,
+ installed=True,
+ enabled=False, # opt-in; user toggles in UI
+ endpoint=fserver.endpoint,
+ auth_token_env=fserver.auth_token_env,
+ description=fserver.description,
+ tags=list(fserver.tags),
+ source="forge-sync",
+ )
+ report.added.append(fid)
+ else:
+ # Refresh metadata Forge owns; preserve user-owned fields.
+ current = snapshot.servers[fid]
+ current.endpoint = fserver.endpoint or current.endpoint
+ current.description = fserver.description or current.description
+ current.tags = list(fserver.tags) or current.tags
+ if fserver.auth_token_env:
+ current.auth_token_env = fserver.auth_token_env
+ # Drop any orphan flag; the server is back in Forge.
+ current.orphan = False
+ report.kept.append(fid)
+
+ # --- orphans (in local but not in forge) -----------------------------
+ for sid, sstate in list(snapshot.servers.items()):
+ if sid in forge_by_id:
+ continue
+ # Custom (user-added) servers are never orphaned.
+ if sstate.source == "user":
+ continue
+ # Mark, never delete.
+ sstate.orphan = True
+ report.orphaned.append(sid)
+
+ return snapshot, report
+
+
+# ---------------------------------------------------------------------------
+# End-to-end entry point
+# ---------------------------------------------------------------------------
+async def sync(
+ *,
+ store: MCPStore | None = None,
+ forge_url: str | None = None,
+ forge_token: str | None = None,
+ list_path_override: str = "",
+ http_client: httpx.AsyncClient | None = None,
+) -> SyncReport:
+ """Top-level entry point used by the REST endpoint and the CLI.
+
+ Always returns a :class:`SyncReport`; never raises. Transport
+ failures are captured as ``forge_unreachable=True`` so the UI can
+ render a single, deterministic error path.
+ """
+ settings = _settings()
+ s = MCPStore() if store is None else store
+
+ url = forge_url or settings["url"]
+ token = forge_token or settings["token"]
+ path = list_path_override or settings["list_path"]
+
+ snapshot = s.load()
+
+ try:
+ forge_servers = await fetch_forge_registry(
+ url=url,
+ token=token,
+ list_path_override=path,
+ http_client=http_client,
+ )
+ except httpx.HTTPError as exc:
+ report = SyncReport(forge_unreachable=True, error=str(exc))
+ logger.warning(
+ "forge sync unreachable",
+ extra={"url": url, "correlation_id": report.correlation_id},
+ )
+ return report
+
+ new_snapshot, report = reconcile(snapshot, forge_servers)
+ s.save(new_snapshot)
+
+ logger.info(
+ "mcp forge sync completed",
+ extra={
+ "correlation_id": report.correlation_id,
+ "added": len(report.added),
+ "kept": len(report.kept),
+ "orphaned": len(report.orphaned),
+ },
+ )
+ return report
diff --git a/gitpilot/mcp_plugin/__init__.py b/gitpilot/mcp_plugin/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c7ec4c2960a8c9ce6c629a43bc64e3b683094287
--- /dev/null
+++ b/gitpilot/mcp_plugin/__init__.py
@@ -0,0 +1,59 @@
+"""MCP Context Forge plugin for GitPilot.
+
+This package wires GitPilot's agents to external MCP servers (PostgreSQL,
+Milvus, Inspector) through a single MCP Context Forge gateway. The
+existing :mod:`gitpilot.mcp_client` keeps providing low-level transport;
+this package adds:
+
+- a per-server registry that the agents consult by capability tag,
+- a policy layer that blocks destructive tool calls before they reach
+ Context Forge,
+- thin agent hooks (``coder_describe_table``, ``test_runner_fixtures``,
+ ``reviewer_validate_migration``) so agents don't need to know the
+ exact tool names.
+
+The plugin degrades gracefully: if ``GITPILOT_MCP_ENABLED`` is false or
+the gateway is unreachable, calls return ``None`` and the agents fall
+back to their previous behaviour.
+"""
+
+from .agent_hooks import (
+ AgentMCPHooks,
+ coder_describe_collection,
+ coder_describe_table,
+ coder_generate_repository_context,
+ reviewer_batch_validate,
+ reviewer_validate_migration,
+ test_runner_fixtures,
+ test_runner_test_vectors,
+)
+from .config import MCPPluginSettings, get_settings
+from .forge_client import (
+ ForgeClientError,
+ MCPContextForgeClient,
+ PolicyViolation,
+ ToolResult,
+)
+from .policies import PolicyEngine
+from .registry import KNOWN_SERVERS, ServerEntry, ToolRegistry
+
+__all__ = [
+ "AgentMCPHooks",
+ "ForgeClientError",
+ "KNOWN_SERVERS",
+ "MCPContextForgeClient",
+ "MCPPluginSettings",
+ "PolicyEngine",
+ "PolicyViolation",
+ "ServerEntry",
+ "ToolRegistry",
+ "ToolResult",
+ "coder_describe_collection",
+ "coder_describe_table",
+ "coder_generate_repository_context",
+ "get_settings",
+ "reviewer_batch_validate",
+ "reviewer_validate_migration",
+ "test_runner_fixtures",
+ "test_runner_test_vectors",
+]
diff --git a/gitpilot/mcp_plugin/agent_hooks.py b/gitpilot/mcp_plugin/agent_hooks.py
new file mode 100644
index 0000000000000000000000000000000000000000..e4ac59f96a6e19d4b16b36994b2fb9b44cbb69ff
--- /dev/null
+++ b/gitpilot/mcp_plugin/agent_hooks.py
@@ -0,0 +1,183 @@
+"""Convenience helpers used directly by GitPilot's agents.
+
+Each helper:
+
+* swallows transport / policy errors and returns ``None`` so the agent
+ can fall back to its previous behaviour;
+* takes only the inputs the agent already has (table name, collection
+ name, migration SQL);
+* returns the parsed JSON content of the tool response, not the
+ :class:`ToolResult` wrapper, so call sites stay short.
+"""
+from __future__ import annotations
+
+import logging
+from dataclasses import dataclass
+from typing import Any
+
+from .forge_client import ForgeClientError, MCPContextForgeClient, ToolResult
+
+logger = logging.getLogger(__name__)
+
+
+def _unwrap(result: ToolResult) -> dict[str, Any] | None:
+ if not result.success:
+ logger.info("mcp tool %s failed: %s", result.tool_name, result.error)
+ return None
+ return result.content
+
+
+@dataclass
+class AgentMCPHooks:
+ """Bind a forge client + agent name once, call helpers many times."""
+
+ client: MCPContextForgeClient
+ agent_name: str
+ repository_path: str | None = None
+
+ async def call(
+ self, tool_name: str, arguments: dict[str, Any]
+ ) -> dict[str, Any] | None:
+ try:
+ result = await self.client.call_tool(
+ tool_name,
+ arguments,
+ agent_name=self.agent_name,
+ repository_path=self.repository_path,
+ )
+ except ForgeClientError as exc:
+ logger.info("mcp call %s skipped: %s", tool_name, exc)
+ return None
+ return _unwrap(result)
+
+
+# ---------------------------------------------------------------------------
+# Coder agent
+# ---------------------------------------------------------------------------
+async def coder_describe_table(
+ client: MCPContextForgeClient,
+ table: str,
+ schema: str = "public",
+ *,
+ agent_name: str = "coder",
+ repository_path: str | None = None,
+) -> dict[str, Any] | None:
+ return await AgentMCPHooks(client, agent_name, repository_path).call(
+ "postgres.describe_table",
+ {
+ "schema": schema,
+ "table": table,
+ "include_indexes": True,
+ "include_constraints": True,
+ },
+ )
+
+
+async def coder_generate_repository_context(
+ client: MCPContextForgeClient,
+ table: str,
+ schema: str = "public",
+ language: str = "python",
+ *,
+ agent_name: str = "coder",
+ repository_path: str | None = None,
+) -> dict[str, Any] | None:
+ return await AgentMCPHooks(client, agent_name, repository_path).call(
+ "postgres.generate_repository_context",
+ {"schema": schema, "table": table, "language": language, "async_mode": True},
+ )
+
+
+async def coder_describe_collection(
+ client: MCPContextForgeClient,
+ collection: str,
+ *,
+ agent_name: str = "coder",
+ repository_path: str | None = None,
+) -> dict[str, Any] | None:
+ return await AgentMCPHooks(client, agent_name, repository_path).call(
+ "milvus.describe_collection",
+ {"collection": collection, "include_statistics": True},
+ )
+
+
+# ---------------------------------------------------------------------------
+# Test runner agent
+# ---------------------------------------------------------------------------
+async def test_runner_fixtures(
+ client: MCPContextForgeClient,
+ table: str,
+ schema: str = "public",
+ num_rows: int = 10,
+ seed: int = 42,
+ *,
+ agent_name: str = "test_runner",
+ repository_path: str | None = None,
+) -> dict[str, Any] | None:
+ return await AgentMCPHooks(client, agent_name, repository_path).call(
+ "postgres.generate_test_fixtures",
+ {
+ "schema": schema,
+ "table": table,
+ "num_rows": num_rows,
+ "format": "python_dict",
+ "seed": seed,
+ },
+ )
+
+
+async def test_runner_test_vectors(
+ client: MCPContextForgeClient,
+ dimension: int,
+ num_vectors: int = 20,
+ *,
+ seed: int = 42,
+ agent_name: str = "test_runner",
+ repository_path: str | None = None,
+) -> dict[str, Any] | None:
+ return await AgentMCPHooks(client, agent_name, repository_path).call(
+ "milvus.generate_test_vectors",
+ {
+ "dimension": dimension,
+ "num_vectors": num_vectors,
+ "distribution": "normal",
+ "seed": seed,
+ },
+ )
+
+
+# ---------------------------------------------------------------------------
+# Reviewer agent
+# ---------------------------------------------------------------------------
+async def reviewer_validate_migration(
+ client: MCPContextForgeClient,
+ migration_sql: str,
+ *,
+ dry_run: bool = False,
+ agent_name: str = "reviewer",
+ repository_path: str | None = None,
+) -> dict[str, Any] | None:
+ return await AgentMCPHooks(client, agent_name, repository_path).call(
+ "postgres.validate_migration",
+ {
+ "migration_sql": migration_sql,
+ "check_syntax": True,
+ "check_dependencies": True,
+ "dry_run": dry_run,
+ },
+ )
+
+
+async def reviewer_batch_validate(
+ client: MCPContextForgeClient,
+ targets: list[dict[str, Any]],
+ *,
+ include_contract_tests: bool = True,
+ agent_name: str = "reviewer",
+ repository_path: str | None = None,
+) -> dict[str, Any] | None:
+ """Ask the inspector to roll up health for every attached MCP server."""
+ return await AgentMCPHooks(client, agent_name, repository_path).call(
+ "inspector.batch_validate",
+ {"targets": targets, "includeContractTests": include_contract_tests},
+ )
diff --git a/gitpilot/mcp_plugin/config.py b/gitpilot/mcp_plugin/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..52d093d966614af3a80119d678a5e10a991be2b6
--- /dev/null
+++ b/gitpilot/mcp_plugin/config.py
@@ -0,0 +1,70 @@
+"""Settings for the Context Forge plugin.
+
+All values are read from environment variables prefixed with
+``GITPILOT_MCP_`` so they can be set without touching code, and so they
+don't interact with the existing :mod:`gitpilot.mcp_client` settings.
+"""
+from __future__ import annotations
+
+import os
+from dataclasses import dataclass, field
+from functools import lru_cache
+
+
+def _csv(value: str) -> list[str]:
+ return [item.strip() for item in value.split(",") if item.strip()]
+
+
+def _bool(value: str, default: bool) -> bool:
+ if not value:
+ return default
+ return value.strip().lower() in {"1", "true", "yes", "on"}
+
+
+@dataclass(frozen=True)
+class MCPPluginSettings:
+ enabled: bool = True
+ gateway_url: str = "http://localhost:4444/mcp"
+ auth_token: str = "change-me"
+ allowed_servers: tuple[str, ...] = (
+ "mcp-postgre-server",
+ "mcp-milvus-server",
+ "mcp-inspector-server",
+ )
+ enable_during_code_generation: bool = True
+ enable_during_test_generation: bool = True
+ require_approval_for_mutations: bool = True
+ max_calls_per_request: int = 20
+ timeout_seconds: int = 30
+ log_all_calls: bool = True
+
+
+@lru_cache
+def get_settings() -> MCPPluginSettings:
+ """Load settings once per process from the environment."""
+ env = os.environ
+ allowed = env.get("GITPILOT_MCP_ALLOWED_SERVERS", "")
+ return MCPPluginSettings(
+ enabled=_bool(env.get("GITPILOT_MCP_ENABLED", ""), True),
+ gateway_url=env.get(
+ "GITPILOT_MCP_GATEWAY_URL", "http://localhost:4444/mcp"
+ ),
+ auth_token=env.get("GITPILOT_MCP_AUTH_TOKEN", "change-me"),
+ allowed_servers=(
+ tuple(_csv(allowed))
+ if allowed
+ else MCPPluginSettings.__dataclass_fields__["allowed_servers"].default
+ ),
+ enable_during_code_generation=_bool(
+ env.get("GITPILOT_MCP_ENABLE_DURING_CODE_GENERATION", ""), True
+ ),
+ enable_during_test_generation=_bool(
+ env.get("GITPILOT_MCP_ENABLE_DURING_TEST_GENERATION", ""), True
+ ),
+ require_approval_for_mutations=_bool(
+ env.get("GITPILOT_MCP_REQUIRE_APPROVAL_FOR_MUTATIONS", ""), True
+ ),
+ max_calls_per_request=int(env.get("GITPILOT_MCP_MAX_CALLS_PER_REQUEST", "20")),
+ timeout_seconds=int(env.get("GITPILOT_MCP_TIMEOUT_SECONDS", "30")),
+ log_all_calls=_bool(env.get("GITPILOT_MCP_LOG_ALL_CALLS", ""), True),
+ )
diff --git a/gitpilot/mcp_plugin/forge_client.py b/gitpilot/mcp_plugin/forge_client.py
new file mode 100644
index 0000000000000000000000000000000000000000..906b7a81cd6afff4cd748bb9f84122465e14ab76
--- /dev/null
+++ b/gitpilot/mcp_plugin/forge_client.py
@@ -0,0 +1,203 @@
+"""Async HTTP client for the MCP Context Forge gateway.
+
+Built on :mod:`httpx`. The lower-level MCP transport (stdio/HTTP/SSE)
+already lives in :mod:`gitpilot.mcp_client`; this client targets the
+forge's REST surface (``/tools/list``, ``/tools/invoke``) which is what
+Context Forge exposes for orchestrated multi-server access.
+
+The client is intentionally narrow: discovery + invocation, with policy
+enforcement and per-request call counting. Error handling is structured:
+``ForgeClientError`` for transport/HTTP issues, ``PolicyViolation`` for
+client-side policy denials.
+"""
+from __future__ import annotations
+
+import logging
+import time
+from dataclasses import dataclass, field
+from datetime import datetime, timezone
+from typing import Any
+from uuid import uuid4
+
+import httpx
+
+from .config import MCPPluginSettings, get_settings
+from .policies import PolicyEngine
+from .registry import ToolRegistry
+
+logger = logging.getLogger(__name__)
+
+
+class ForgeClientError(RuntimeError):
+ """Raised when the forge gateway returns an error or is unreachable."""
+
+
+class PolicyViolation(ForgeClientError):
+ """Raised when a tool call is blocked by the local policy engine."""
+
+
+@dataclass
+class ToolCall:
+ id: str
+ tool_name: str
+ arguments: dict[str, Any]
+ agent_name: str
+ repository_path: str | None
+ timestamp: datetime
+
+
+@dataclass
+class ToolResult:
+ tool_name: str
+ success: bool
+ content: dict[str, Any] = field(default_factory=dict)
+ error: str | None = None
+ execution_time_ms: int = 0
+
+
+class MCPContextForgeClient:
+ """Talk to MCP Context Forge over HTTP."""
+
+ def __init__(
+ self,
+ settings: MCPPluginSettings | None = None,
+ *,
+ registry: ToolRegistry | None = None,
+ policy: PolicyEngine | None = None,
+ http_client: httpx.AsyncClient | None = None,
+ ) -> None:
+ self.settings = settings or get_settings()
+ self.registry = registry or ToolRegistry()
+ self.policy = policy or PolicyEngine(self.settings)
+ self._call_count = 0
+ self._http: httpx.AsyncClient | None = http_client
+ self._owns_http = http_client is None
+
+ async def __aenter__(self) -> "MCPContextForgeClient":
+ await self.initialize()
+ return self
+
+ async def __aexit__(self, *exc_info: Any) -> None:
+ await self.close()
+
+ async def initialize(self) -> None:
+ """Open the HTTP client and discover the tool list (best-effort)."""
+ if not self.settings.enabled:
+ logger.info("mcp-plugin disabled via settings")
+ return
+ if self._http is None:
+ self._http = httpx.AsyncClient(
+ base_url=self.settings.gateway_url,
+ headers={
+ "Authorization": f"Bearer {self.settings.auth_token}",
+ "Content-Type": "application/json",
+ "User-Agent": "gitpilot-mcp-plugin/1.0",
+ },
+ timeout=self.settings.timeout_seconds,
+ )
+ try:
+ await self.discover_tools()
+ except Exception: # noqa: BLE001
+ logger.exception("tool discovery failed; plugin will degrade to no-op")
+
+ async def close(self) -> None:
+ if self._http is not None and self._owns_http:
+ await self._http.aclose()
+ self._http = None
+
+ # ------------------------------------------------------------------
+ async def discover_tools(self) -> list[dict]:
+ if self._http is None:
+ return []
+ resp = await self._http.get("/tools/list")
+ resp.raise_for_status()
+ body = resp.json()
+ tools = body.get("tools", body) if isinstance(body, dict) else []
+ if not isinstance(tools, list):
+ tools = []
+ self.registry.replace(tools)
+ return tools
+
+ async def call_tool(
+ self,
+ tool_name: str,
+ arguments: dict[str, Any],
+ *,
+ agent_name: str,
+ repository_path: str | None = None,
+ ) -> ToolResult:
+ if not self.settings.enabled:
+ raise ForgeClientError("mcp-plugin disabled")
+
+ if self._call_count >= self.settings.max_calls_per_request:
+ raise ForgeClientError(
+ f"per-request call cap exceeded ({self.settings.max_calls_per_request})"
+ )
+
+ allowed, reason = self.policy.is_allowed(tool_name, arguments)
+ if not allowed:
+ raise PolicyViolation(f"tool {tool_name!r} blocked: {reason}")
+
+ if self._http is None:
+ raise ForgeClientError("client not initialised")
+
+ call = ToolCall(
+ id=str(uuid4()),
+ tool_name=tool_name,
+ arguments=arguments,
+ agent_name=agent_name,
+ repository_path=repository_path,
+ timestamp=datetime.now(timezone.utc),
+ )
+
+ started = time.monotonic()
+ try:
+ resp = await self._http.post(
+ "/tools/invoke",
+ json={
+ "tool": tool_name,
+ "arguments": arguments,
+ "context": {
+ "agent": agent_name,
+ "repository": repository_path,
+ "call_id": call.id,
+ },
+ },
+ )
+ resp.raise_for_status()
+ except httpx.HTTPError as exc:
+ elapsed_ms = int((time.monotonic() - started) * 1000)
+ logger.warning("forge invoke failed: %s", exc)
+ return ToolResult(
+ tool_name=tool_name,
+ success=False,
+ content={},
+ error=f"forge transport error: {exc}",
+ execution_time_ms=elapsed_ms,
+ )
+
+ self._call_count += 1
+ body = resp.json()
+ elapsed_ms = int((time.monotonic() - started) * 1000)
+ if self.settings.log_all_calls:
+ logger.info(
+ "mcp tool=%s agent=%s success=%s elapsed_ms=%d",
+ tool_name,
+ agent_name,
+ body.get("success", True),
+ elapsed_ms,
+ )
+ return ToolResult(
+ tool_name=tool_name,
+ success=bool(body.get("success", True)),
+ content=body.get("content") or body,
+ error=body.get("error"),
+ execution_time_ms=int(body.get("execution_time_ms", elapsed_ms)),
+ )
+
+ @property
+ def call_count(self) -> int:
+ return self._call_count
+
+ def reset_call_count(self) -> None:
+ self._call_count = 0
diff --git a/gitpilot/mcp_plugin/policies.py b/gitpilot/mcp_plugin/policies.py
new file mode 100644
index 0000000000000000000000000000000000000000..d580bf11376e2df84ada53faea4a0768a25c7a45
--- /dev/null
+++ b/gitpilot/mcp_plugin/policies.py
@@ -0,0 +1,85 @@
+"""Policy gates for MCP tool calls.
+
+The Context Forge gateway enforces its own server-side policies, but we
+also enforce them client-side: failing fast keeps a destructive tool
+call from ever leaving the GitPilot process, and surfaces the policy
+decision in the agent transcript.
+"""
+from __future__ import annotations
+
+import logging
+from typing import Any, ClassVar
+
+from .config import MCPPluginSettings, get_settings
+
+logger = logging.getLogger(__name__)
+
+
+class PolicyEngine:
+ """Decides whether a given tool call is allowed.
+
+ Three gates, in order:
+
+ 1. Server allowlist (``allowed_servers``).
+ 2. Hard-coded destructive keyword denylist (``drop``, ``delete``…).
+ 3. Mutation tools listed in :data:`MUTATION_TOOLS` need explicit
+ approval when ``require_approval_for_mutations`` is set.
+ """
+
+ DESTRUCTIVE_KEYWORDS: tuple[str, ...] = (
+ "drop",
+ "delete",
+ "truncate",
+ "remove",
+ "destroy",
+ "clear",
+ "purge",
+ )
+
+ # Tool-prefix → server-id mapping. Kept explicit because the server
+ # name "mcp-postgre-server" does not match the tool prefix "postgres".
+ PREFIX_TO_SERVER: ClassVar[dict[str, str]] = {
+ "postgres": "mcp-postgre-server",
+ "milvus": "mcp-milvus-server",
+ "inspector": "mcp-inspector-server",
+ }
+
+ MUTATION_TOOLS: frozenset[str] = frozenset(
+ {
+ "postgres.execute_write",
+ "postgres.unsafe_execute",
+ "milvus.insert",
+ "milvus.upsert",
+ "milvus.create_collection",
+ "milvus.drop_collection",
+ "milvus.create_index",
+ "milvus.drop_index",
+ }
+ )
+
+ def __init__(self, settings: MCPPluginSettings | None = None) -> None:
+ self.settings = settings or get_settings()
+
+ def is_allowed(self, tool_name: str, arguments: dict[str, Any]) -> tuple[bool, str]:
+ """Return (allowed, reason)."""
+ prefix = tool_name.split(".", 1)[0] if "." in tool_name else ""
+ server_id = self.PREFIX_TO_SERVER.get(prefix, "")
+ if (
+ self.settings.allowed_servers
+ and server_id
+ and server_id not in self.settings.allowed_servers
+ ):
+ return False, f"server {server_id!r} not in allowlist"
+
+ lower = tool_name.lower()
+ for kw in self.DESTRUCTIVE_KEYWORDS:
+ if kw in lower:
+ return False, f"destructive keyword {kw!r} in tool name"
+
+ if (
+ self.settings.require_approval_for_mutations
+ and tool_name in self.MUTATION_TOOLS
+ ):
+ return False, "mutation tool requires explicit approval"
+
+ return True, "ok"
diff --git a/gitpilot/mcp_plugin/registry.py b/gitpilot/mcp_plugin/registry.py
new file mode 100644
index 0000000000000000000000000000000000000000..b8528d44b99b2992adc3f4017ab31aa32be3ca07
--- /dev/null
+++ b/gitpilot/mcp_plugin/registry.py
@@ -0,0 +1,96 @@
+"""Static catalog of MCP servers GitPilot ships with.
+
+The actual tool list per server is discovered at runtime from Context
+Forge (``ToolRegistry.discover``); this module just keeps the list of
+servers we *expect* to attach so policy decisions and health checks can
+reason about them without a network round-trip.
+"""
+from __future__ import annotations
+
+from dataclasses import dataclass, field
+from typing import ClassVar
+
+
+@dataclass(frozen=True)
+class ServerEntry:
+ name: str
+ description: str
+ tags: tuple[str, ...]
+ expected_tools: tuple[str, ...]
+
+
+KNOWN_SERVERS: dict[str, ServerEntry] = {
+ "mcp-postgre-server": ServerEntry(
+ name="mcp-postgre-server",
+ description="PostgreSQL schema discovery, safe SELECT, migration validation, fixtures.",
+ tags=("postgresql", "database", "code-generation", "testing"),
+ expected_tools=(
+ "postgres.list_databases",
+ "postgres.list_schemas",
+ "postgres.list_tables",
+ "postgres.describe_table",
+ "postgres.safe_select",
+ "postgres.validate_migration",
+ "postgres.generate_test_fixtures",
+ "postgres.generate_repository_context",
+ ),
+ ),
+ "mcp-milvus-server": ServerEntry(
+ name="mcp-milvus-server",
+ description="Milvus collection discovery, vector search, RAG codegen, test vectors.",
+ tags=("milvus", "vector-database", "rag", "code-generation"),
+ expected_tools=(
+ "milvus.list_collections",
+ "milvus.describe_collection",
+ "milvus.search",
+ "milvus.validate_index_config",
+ "milvus.generate_rag_pipeline_context",
+ "milvus.generate_test_vectors",
+ ),
+ ),
+ "mcp-inspector-server": ServerEntry(
+ name="mcp-inspector-server",
+ description="Validate, ping and contract-test other MCP servers.",
+ tags=("mcp", "diagnostics"),
+ expected_tools=(
+ "inspector.ping_server",
+ "inspector.list_capabilities",
+ "inspector.validate_tool_schema",
+ "inspector.run_contract_tests",
+ "inspector.batch_validate",
+ "inspector.generate_report",
+ ),
+ ),
+}
+
+
+@dataclass
+class ToolRegistry:
+ """Runtime cache of tools advertised by Context Forge."""
+
+ tools: dict[str, dict] = field(default_factory=dict)
+
+ def replace(self, tools: list[dict]) -> None:
+ self.tools = {t["name"]: t for t in tools if "name" in t}
+
+ def has(self, name: str) -> bool:
+ return name in self.tools
+
+ # Server-id → tool-prefix mapping (mirrors PolicyEngine.PREFIX_TO_SERVER).
+ SERVER_TO_PREFIX: ClassVar[dict[str, str]] = {
+ "mcp-postgre-server": "postgres",
+ "mcp-milvus-server": "milvus",
+ "mcp-inspector-server": "inspector",
+ }
+
+ def by_server(self, server_name: str) -> list[dict]:
+ prefix = self.SERVER_TO_PREFIX.get(
+ server_name,
+ server_name.removeprefix("mcp-").removesuffix("-server"),
+ )
+ return [t for n, t in self.tools.items() if n.startswith(f"{prefix}.")]
+
+ def by_tag(self, tag: str) -> list[dict]:
+ return [
+ t for t in self.tools.values() if tag in (t.get("tags") or [])
+ ]
diff --git a/gitpilot/mcp_server.py b/gitpilot/mcp_server.py
new file mode 100644
index 0000000000000000000000000000000000000000..c8d545cf618487875b61baf36b16a18c41c3fca1
--- /dev/null
+++ b/gitpilot/mcp_server.py
@@ -0,0 +1,407 @@
+"""GitPilot MCP server — additive, env-gated, read-only by default.
+
+This module turns GitPilot itself into an MCP server so that other agents
+(notably HomePilot's Coder persona) can invoke a curated, scoped subset
+of GitPilot's functionality through the Model Context Protocol.
+
+Design contract
+---------------
+
+* **Off by default.** The Starlette app in :mod:`gitpilot.api` only mounts
+ this server when ``GITPILOT_EXPOSE_MCP_SERVER=true``. Existing
+ deployments are unaffected.
+* **No edits to existing routes.** This is a *facade* over the same
+ high-level functions the FastAPI routes already use; the routes
+ themselves stay untouched.
+* **Curated tool surface.** We expose ten tools, six read-only and four
+ mutation. Mutation tools require a separate scope token
+ (``GITPILOT_MCP_SERVER_MUTATION_TOKEN``) AND
+ ``GITPILOT_MCP_SERVER_ALLOW_MUTATION=true``.
+* **Recursion guard.** If a tool call carries the
+ ``X-Gitpilot-Origin: self`` header, we reject it. This stops a runaway
+ loop where GitPilot's own agents reach back into the GitPilot MCP
+ server and trigger themselves.
+* **Stateless.** Per-call session id is generated and forwarded to
+ GitPilot's existing event channel; the server itself keeps no state.
+
+The companion ``register.json`` in ``extensions/mcp_plugins/gitpilot/``
+mirrors what the HomePilot wizard expects.
+"""
+from __future__ import annotations
+
+import asyncio
+import logging
+import os
+import time
+from dataclasses import dataclass, field
+from typing import Any, Awaitable, Callable
+from uuid import uuid4
+
+logger = logging.getLogger(__name__)
+
+# ---------------------------------------------------------------------------
+# Configuration
+# ---------------------------------------------------------------------------
+ENV_EXPOSE = "GITPILOT_EXPOSE_MCP_SERVER"
+ENV_AUTH_TOKEN = "GITPILOT_MCP_SERVER_TOKEN"
+ENV_MUTATION_TOKEN = "GITPILOT_MCP_SERVER_MUTATION_TOKEN"
+ENV_ALLOW_MUTATION = "GITPILOT_MCP_SERVER_ALLOW_MUTATION"
+ENV_MOUNT_PATH = "GITPILOT_MCP_SERVER_MOUNT_PATH"
+ENV_SERVER_NAME = "GITPILOT_MCP_SERVER_NAME"
+
+DEFAULT_MOUNT_PATH = "/mcp-server/mcp"
+DEFAULT_SERVER_NAME = "gitpilot-mcp-server"
+
+ORIGIN_HEADER = "x-gitpilot-origin"
+ORIGIN_SELF_VALUE = "self"
+
+
+def _bool_env(key: str, default: bool = False) -> bool:
+ raw = os.environ.get(key)
+ if raw is None:
+ return default
+ return raw.strip().lower() in {"1", "true", "yes", "y", "on"}
+
+
+@dataclass(frozen=True)
+class MCPServerConfig:
+ """Runtime configuration for the GitPilot MCP server."""
+
+ enabled: bool = False
+ server_name: str = DEFAULT_SERVER_NAME
+ mount_path: str = DEFAULT_MOUNT_PATH
+ auth_token: str = ""
+ mutation_token: str = ""
+ allow_mutation: bool = False
+
+ @classmethod
+ def from_env(cls) -> "MCPServerConfig":
+ return cls(
+ enabled=_bool_env(ENV_EXPOSE, default=False),
+ server_name=os.environ.get(ENV_SERVER_NAME, DEFAULT_SERVER_NAME),
+ mount_path=os.environ.get(ENV_MOUNT_PATH, DEFAULT_MOUNT_PATH),
+ auth_token=os.environ.get(ENV_AUTH_TOKEN, ""),
+ mutation_token=os.environ.get(ENV_MUTATION_TOKEN, ""),
+ allow_mutation=_bool_env(ENV_ALLOW_MUTATION, default=False),
+ )
+
+
+# ---------------------------------------------------------------------------
+# Errors
+# ---------------------------------------------------------------------------
+class MCPServerError(Exception):
+ """Base class for GitPilot MCP server errors."""
+
+
+class AuthError(MCPServerError):
+ """Raised when the bearer token is missing/invalid for the requested scope."""
+
+
+class ScopeError(MCPServerError):
+ """Raised when a mutation tool is called without mutation scope."""
+
+
+class RecursionGuardError(MCPServerError):
+ """Raised when a tool call carries ``X-Gitpilot-Origin: self``."""
+
+
+# ---------------------------------------------------------------------------
+# Tool registry (data-only; the actual implementations live in
+# mcp_server_tools.py to keep this module easy to read)
+# ---------------------------------------------------------------------------
+SCOPE_READ = "read"
+SCOPE_PLAN = "plan"
+SCOPE_MUTATION = "mutation"
+
+
+@dataclass(frozen=True)
+class ToolSpec:
+ name: str
+ description: str
+ scope: str
+ input_schema: dict[str, Any]
+ handler_name: str # attribute on mcp_server_tools.Tools
+
+
+TOOL_CATALOG: tuple[ToolSpec, ...] = (
+ ToolSpec(
+ name="gitpilot.healthz",
+ description="Liveness probe; returns version + capabilities.",
+ scope=SCOPE_READ,
+ input_schema={"type": "object", "properties": {}},
+ handler_name="healthz",
+ ),
+ ToolSpec(
+ name="gitpilot.list_repos",
+ description="List repositories visible to the authenticated GitPilot user.",
+ scope=SCOPE_READ,
+ input_schema={
+ "type": "object",
+ "properties": {
+ "page": {"type": "integer", "minimum": 1, "default": 1},
+ "per_page": {
+ "type": "integer",
+ "minimum": 1,
+ "maximum": 100,
+ "default": 30,
+ },
+ "query": {"type": "string"},
+ },
+ },
+ handler_name="list_repos",
+ ),
+ ToolSpec(
+ name="gitpilot.list_branches",
+ description="List branches in a repository.",
+ scope=SCOPE_READ,
+ input_schema={
+ "type": "object",
+ "properties": {
+ "owner": {"type": "string"},
+ "repo": {"type": "string"},
+ },
+ "required": ["owner", "repo"],
+ },
+ handler_name="list_branches",
+ ),
+ ToolSpec(
+ name="gitpilot.describe_repo",
+ description="Return high-level repo metadata: tree summary, default branch, recent commits.",
+ scope=SCOPE_READ,
+ input_schema={
+ "type": "object",
+ "properties": {
+ "owner": {"type": "string"},
+ "repo": {"type": "string"},
+ "branch": {"type": "string"},
+ "max_files": {"type": "integer", "default": 50, "maximum": 500},
+ },
+ "required": ["owner", "repo"],
+ },
+ handler_name="describe_repo",
+ ),
+ ToolSpec(
+ name="gitpilot.list_skills",
+ description="List installed GitPilot skills with their slugs and descriptions.",
+ scope=SCOPE_READ,
+ input_schema={"type": "object", "properties": {}},
+ handler_name="list_skills",
+ ),
+ ToolSpec(
+ name="gitpilot.classify_topology",
+ description="Classify a free-text request into a GitPilot topology id.",
+ scope=SCOPE_READ,
+ input_schema={
+ "type": "object",
+ "properties": {"prompt": {"type": "string"}},
+ "required": ["prompt"],
+ },
+ handler_name="classify_topology",
+ ),
+ ToolSpec(
+ name="gitpilot.plan",
+ description=(
+ "Produce a multi-step plan for a code-generation request. Read-only: "
+ "no files are written and no PRs are opened."
+ ),
+ scope=SCOPE_PLAN,
+ input_schema={
+ "type": "object",
+ "properties": {
+ "prompt": {"type": "string"},
+ "owner": {"type": "string"},
+ "repo": {"type": "string"},
+ "branch": {"type": "string"},
+ "workspace_root": {"type": "string"},
+ },
+ "required": ["prompt"],
+ },
+ handler_name="plan",
+ ),
+ ToolSpec(
+ name="gitpilot.execute",
+ description=(
+ "Execute a previously-returned plan. Streaming. Honours the same scope "
+ "checks as the equivalent /api/chat/execute route."
+ ),
+ scope=SCOPE_PLAN,
+ input_schema={
+ "type": "object",
+ "properties": {
+ "plan_id": {"type": "string"},
+ "approval_token": {"type": "string"},
+ },
+ "required": ["plan_id"],
+ },
+ handler_name="execute",
+ ),
+ ToolSpec(
+ name="gitpilot.run_skill",
+ description="Invoke a named GitPilot skill. Mutation scope required.",
+ scope=SCOPE_MUTATION,
+ input_schema={
+ "type": "object",
+ "properties": {
+ "slug": {"type": "string"},
+ "arguments": {"type": "object"},
+ },
+ "required": ["slug"],
+ },
+ handler_name="run_skill",
+ ),
+ ToolSpec(
+ name="gitpilot.create_pr",
+ description=(
+ "Open a pull request from a branch produced by a previous execute step. "
+ "Mutation scope required."
+ ),
+ scope=SCOPE_MUTATION,
+ input_schema={
+ "type": "object",
+ "properties": {
+ "owner": {"type": "string"},
+ "repo": {"type": "string"},
+ "head": {"type": "string"},
+ "base": {"type": "string", "default": "main"},
+ "title": {"type": "string"},
+ "body": {"type": "string"},
+ },
+ "required": ["owner", "repo", "head", "title"],
+ },
+ handler_name="create_pr",
+ ),
+)
+
+
+# ---------------------------------------------------------------------------
+# Auth + scope enforcement
+# ---------------------------------------------------------------------------
+def authorize(
+ config: MCPServerConfig,
+ *,
+ bearer: str | None,
+ origin_header: str | None,
+ scope: str,
+) -> None:
+ """Validate the request before dispatching to a tool handler.
+
+ Raises one of :class:`AuthError`, :class:`ScopeError`,
+ :class:`RecursionGuardError`. Returning normally means the call may
+ proceed.
+ """
+ if origin_header and origin_header.strip().lower() == ORIGIN_SELF_VALUE:
+ raise RecursionGuardError(
+ "Refusing self-call: X-Gitpilot-Origin: self detected"
+ )
+
+ if not config.auth_token:
+ raise AuthError("Server has no auth token configured")
+ if not bearer or not bearer.startswith("Bearer "):
+ raise AuthError("Missing bearer token")
+ presented = bearer.removeprefix("Bearer ").strip()
+ if presented != config.auth_token:
+ raise AuthError("Invalid bearer token")
+
+ if scope == SCOPE_MUTATION:
+ if not config.allow_mutation:
+ raise ScopeError(
+ "Mutation tools disabled (set GITPILOT_MCP_SERVER_ALLOW_MUTATION=true)"
+ )
+ if not config.mutation_token or presented != config.mutation_token:
+ raise ScopeError(
+ "Mutation tools require GITPILOT_MCP_SERVER_MUTATION_TOKEN"
+ )
+
+
+# ---------------------------------------------------------------------------
+# Per-call envelope
+# ---------------------------------------------------------------------------
+@dataclass
+class CallContext:
+ call_id: str = field(default_factory=lambda: str(uuid4()))
+ started_at: float = field(default_factory=time.monotonic)
+ tool_name: str = ""
+ scope: str = SCOPE_READ
+
+ def elapsed_ms(self) -> int:
+ return int((time.monotonic() - self.started_at) * 1000)
+
+
+# ---------------------------------------------------------------------------
+# Dispatch surface (sync + async friendly)
+# ---------------------------------------------------------------------------
+ToolHandler = Callable[[dict[str, Any], CallContext], Awaitable[dict[str, Any]]]
+
+
+class GitPilotMCPServer:
+ """Tool catalog + auth gate. Transport-agnostic.
+
+ The actual MCP transport (FastMCP / streamable-http) is wired in
+ :mod:`gitpilot.api` only when the server is enabled. This class is
+ safe to import unconditionally: it does not touch the network or
+ GitPilot internals at construction time.
+ """
+
+ def __init__(
+ self,
+ config: MCPServerConfig | None = None,
+ *,
+ handlers: dict[str, ToolHandler] | None = None,
+ ) -> None:
+ self.config = config or MCPServerConfig.from_env()
+ self._handlers: dict[str, ToolHandler] = handlers or {}
+
+ def register_handler(self, name: str, handler: ToolHandler) -> None:
+ self._handlers[name] = handler
+
+ def list_tools(self) -> list[dict[str, Any]]:
+ """Return the public tool catalog as MCP tool definitions."""
+ return [
+ {
+ "name": t.name,
+ "description": t.description,
+ "scope": t.scope,
+ "inputSchema": t.input_schema,
+ }
+ for t in TOOL_CATALOG
+ ]
+
+ async def call_tool(
+ self,
+ name: str,
+ arguments: dict[str, Any],
+ *,
+ bearer: str | None,
+ origin_header: str | None,
+ ) -> dict[str, Any]:
+ spec = next((t for t in TOOL_CATALOG if t.name == name), None)
+ if spec is None:
+ raise MCPServerError(f"Unknown tool: {name!r}")
+
+ authorize(
+ self.config,
+ bearer=bearer,
+ origin_header=origin_header,
+ scope=spec.scope,
+ )
+
+ handler = self._handlers.get(name)
+ if handler is None:
+ raise MCPServerError(
+ f"No handler registered for {name!r}; did mcp_server_tools.bind run?"
+ )
+
+ ctx = CallContext(tool_name=name, scope=spec.scope)
+ try:
+ result = await asyncio.wait_for(handler(arguments, ctx), timeout=120)
+ except asyncio.TimeoutError as exc: # noqa: PERF203
+ logger.warning("mcp tool %s timed out after 120s", name)
+ raise MCPServerError("Tool call timed out") from exc
+
+ return {
+ "tool": name,
+ "scope": spec.scope,
+ "call_id": ctx.call_id,
+ "elapsed_ms": ctx.elapsed_ms(),
+ "result": result,
+ }
diff --git a/gitpilot/mcp_server_bridge.py b/gitpilot/mcp_server_bridge.py
new file mode 100644
index 0000000000000000000000000000000000000000..4a93dd9db6418ccb6920b9cc7e48643b0497c551
--- /dev/null
+++ b/gitpilot/mcp_server_bridge.py
@@ -0,0 +1,123 @@
+"""Transport bridge for the GitPilot MCP server.
+
+This module is loaded *only* when ``GITPILOT_EXPOSE_MCP_SERVER=true``.
+It exposes a single :func:`mount` function that wires the public tool
+catalog onto the existing FastAPI app at the configured mount path.
+
+Two routes are added:
+
+* ``GET {mount_path}/healthz`` — unauthenticated liveness probe.
+* ``POST {mount_path}`` — JSON envelope that emulates the
+ ``tools/list`` and ``tools/call`` MCP methods. We do *not* attempt to
+ re-implement the full MCP streamable-HTTP framing here; the JSON
+ envelope is intentionally simple so the HomePilot wizard can drive it
+ directly during the connection test, and a richer FastMCP transport
+ can be layered on later (Phase 6 in the plan).
+
+Keeping it additive
+-------------------
+
+* No existing route is modified.
+* No existing model is renamed.
+* All routes are scoped under the configurable ``mount_path``
+ (default ``/mcp-server/mcp``); collision with existing routes is
+ impossible in practice.
+* The entire module is importable on demand only — operators who never
+ set ``GITPILOT_EXPOSE_MCP_SERVER=true`` never load FastMCP, never
+ construct the tool registry, and pay zero startup cost.
+"""
+from __future__ import annotations
+
+import logging
+from typing import Any
+
+from fastapi import APIRouter, FastAPI, Header, HTTPException, Request
+
+from .mcp_server import (
+ AuthError,
+ GitPilotMCPServer,
+ MCPServerConfig,
+ MCPServerError,
+ RecursionGuardError,
+ ScopeError,
+)
+from .mcp_server_tools import bind
+
+logger = logging.getLogger(__name__)
+
+
+def _build_router(server: GitPilotMCPServer, *, mount_path: str) -> APIRouter:
+ router = APIRouter()
+
+ @router.get(f"{mount_path}/healthz")
+ async def healthz() -> dict[str, Any]: # noqa: D401
+ """Liveness probe used by the HomePilot wizard's step 1."""
+ return {
+ "status": "ok",
+ "server": server.config.server_name,
+ "tool_count": len(server.list_tools()),
+ }
+
+ @router.post(mount_path)
+ async def mcp_endpoint(
+ request: Request,
+ authorization: str | None = Header(default=None),
+ x_gitpilot_origin: str | None = Header(default=None, alias="x-gitpilot-origin"),
+ ) -> dict[str, Any]:
+ try:
+ payload = await request.json()
+ except Exception as exc: # noqa: BLE001
+ raise HTTPException(status_code=400, detail=f"invalid JSON body: {exc}")
+
+ method = payload.get("method") or "tools/call"
+ if method == "tools/list":
+ # tools/list does not need auth: the server name + tool catalog
+ # is public so the HomePilot wizard can show it during step 3.
+ return {"tools": server.list_tools()}
+
+ if method == "tools/call":
+ tool_name = payload.get("tool") or payload.get("name")
+ arguments = payload.get("arguments") or {}
+ if not isinstance(tool_name, str) or not tool_name:
+ raise HTTPException(status_code=422, detail="missing 'tool'")
+ if not isinstance(arguments, dict):
+ raise HTTPException(
+ status_code=422, detail="'arguments' must be an object"
+ )
+ try:
+ result = await server.call_tool(
+ tool_name,
+ arguments,
+ bearer=authorization,
+ origin_header=x_gitpilot_origin,
+ )
+ except RecursionGuardError as exc:
+ raise HTTPException(status_code=409, detail=str(exc))
+ except AuthError as exc:
+ raise HTTPException(status_code=401, detail=str(exc))
+ except ScopeError as exc:
+ raise HTTPException(status_code=403, detail=str(exc))
+ except MCPServerError as exc:
+ raise HTTPException(status_code=400, detail=str(exc))
+ return {"success": True, **result}
+
+ raise HTTPException(status_code=400, detail=f"unknown method: {method!r}")
+
+ return router
+
+
+def mount(app: FastAPI, config: MCPServerConfig) -> GitPilotMCPServer:
+ """Mount the GitPilot MCP server onto an existing FastAPI app.
+
+ Returns the configured :class:`GitPilotMCPServer` so callers can
+ introspect the catalog (used in tests).
+ """
+ server = bind(GitPilotMCPServer(config))
+ router = _build_router(server, mount_path=config.mount_path)
+ app.include_router(router)
+ logger.info(
+ "gitpilot mcp server: %d tools mounted at %s",
+ len(server.list_tools()),
+ config.mount_path,
+ )
+ return server
diff --git a/gitpilot/mcp_server_tools.py b/gitpilot/mcp_server_tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..bb14f471f43a9566cce9b69f5b3de9a8b6ad624a
--- /dev/null
+++ b/gitpilot/mcp_server_tools.py
@@ -0,0 +1,345 @@
+"""Tool handler implementations for the GitPilot MCP server.
+
+Each handler is a thin facade over an existing GitPilot module. The goal
+is *zero* duplication of business logic: handlers call the same code
+that the FastAPI routes call. Handlers are also defensive — if an
+optional GitPilot subsystem is missing (e.g. github_api in offline
+mode), they degrade to a clear error payload rather than crashing the
+MCP transport.
+
+This module is imported from :mod:`gitpilot.api` only when the MCP
+server is enabled, so its side-effect-free imports never run in the
+default off-by-default path.
+"""
+from __future__ import annotations
+
+import logging
+from typing import Any
+
+from .mcp_server import (
+ TOOL_CATALOG,
+ CallContext,
+ GitPilotMCPServer,
+ MCPServerError,
+)
+from .version import __version__
+
+logger = logging.getLogger(__name__)
+
+
+def _safe_import(modname: str):
+ """Import a module by name, returning None if not available."""
+ try:
+ return __import__(modname, fromlist=["_"])
+ except Exception: # noqa: BLE001
+ return None
+
+
+# ---------------------------------------------------------------------------
+# Read-only handlers
+# ---------------------------------------------------------------------------
+async def _healthz(args: dict[str, Any], ctx: CallContext) -> dict[str, Any]:
+ return {
+ "status": "ok",
+ "version": __version__,
+ "tools": [t.name for t in TOOL_CATALOG],
+ }
+
+
+async def _list_repos(args: dict[str, Any], ctx: CallContext) -> dict[str, Any]:
+ page = int(args.get("page", 1))
+ per_page = max(1, min(int(args.get("per_page", 30)), 100))
+ query = args.get("query")
+
+ gh = _safe_import("gitpilot.github_api")
+ if gh is None or not hasattr(gh, "list_repositories"):
+ return {
+ "available": False,
+ "reason": "github_api unavailable; configure GitHub auth in GitPilot",
+ "repos": [],
+ }
+ try:
+ repos = await gh.list_repositories( # type: ignore[attr-defined]
+ page=page, per_page=per_page, query=query
+ )
+ except Exception as exc: # noqa: BLE001
+ return {"available": False, "reason": str(exc), "repos": []}
+
+ # Trim to what HomePilot's wizard step 4 needs.
+ trimmed = [
+ {
+ "owner": r.get("owner", {}).get("login") or r.get("owner"),
+ "name": r.get("name"),
+ "default_branch": r.get("default_branch", "main"),
+ "private": bool(r.get("private", False)),
+ "updated_at": r.get("updated_at"),
+ }
+ for r in (repos or [])
+ ]
+ return {"available": True, "page": page, "per_page": per_page, "repos": trimmed}
+
+
+async def _list_branches(args: dict[str, Any], ctx: CallContext) -> dict[str, Any]:
+ owner = args["owner"]
+ repo = args["repo"]
+ gh = _safe_import("gitpilot.github_api")
+ if gh is None or not hasattr(gh, "list_branches"):
+ return {"available": False, "branches": []}
+ try:
+ branches = await gh.list_branches(owner, repo) # type: ignore[attr-defined]
+ except Exception as exc: # noqa: BLE001
+ return {"available": False, "reason": str(exc), "branches": []}
+ return {
+ "available": True,
+ "owner": owner,
+ "repo": repo,
+ "branches": [
+ {"name": b.get("name"), "protected": bool(b.get("protected", False))}
+ for b in (branches or [])
+ ],
+ }
+
+
+async def _describe_repo(args: dict[str, Any], ctx: CallContext) -> dict[str, Any]:
+ owner = args["owner"]
+ repo = args["repo"]
+ branch = args.get("branch")
+ max_files = max(1, min(int(args.get("max_files", 50)), 500))
+
+ gh = _safe_import("gitpilot.github_api")
+ if gh is None:
+ return {"available": False, "reason": "github_api unavailable"}
+
+ summary: dict[str, Any] = {"owner": owner, "repo": repo, "branch": branch}
+ try:
+ if hasattr(gh, "get_repo"):
+ meta = await gh.get_repo(owner, repo) # type: ignore[attr-defined]
+ summary["description"] = meta.get("description")
+ summary["default_branch"] = meta.get("default_branch")
+ summary["language"] = meta.get("language")
+ summary["topics"] = meta.get("topics", [])
+ if hasattr(gh, "get_tree"):
+ tree = await gh.get_tree( # type: ignore[attr-defined]
+ owner, repo, branch=branch, max_items=max_files
+ )
+ summary["tree"] = tree
+ except Exception as exc: # noqa: BLE001
+ summary["error"] = str(exc)
+ return summary
+
+
+async def _list_skills(args: dict[str, Any], ctx: CallContext) -> dict[str, Any]:
+ skills_mod = _safe_import("gitpilot.skills")
+ if skills_mod is None:
+ return {"available": False, "skills": []}
+ manager_cls = getattr(skills_mod, "SkillManager", None)
+ if manager_cls is None:
+ return {"available": False, "skills": []}
+ try:
+ manager = manager_cls()
+ if hasattr(manager, "list"):
+ entries = manager.list()
+ elif hasattr(manager, "all"):
+ entries = manager.all()
+ else:
+ entries = []
+ except Exception as exc: # noqa: BLE001
+ return {"available": False, "reason": str(exc), "skills": []}
+
+ serialised: list[dict[str, Any]] = []
+ for entry in entries or []:
+ if isinstance(entry, dict):
+ serialised.append(entry)
+ else:
+ serialised.append(
+ {
+ "slug": getattr(entry, "slug", None),
+ "name": getattr(entry, "name", None),
+ "description": getattr(entry, "description", None),
+ }
+ )
+ return {"available": True, "skills": serialised}
+
+
+async def _classify_topology(args: dict[str, Any], ctx: CallContext) -> dict[str, Any]:
+ prompt = args["prompt"]
+ topology_mod = _safe_import("gitpilot.topology_registry")
+ if topology_mod is None:
+ return {"available": False, "topology_id": None}
+ classify_fn = getattr(topology_mod, "classify", None) or getattr(
+ topology_mod, "classify_prompt", None
+ )
+ if classify_fn is None:
+ return {"available": False, "topology_id": None}
+ try:
+ result = classify_fn(prompt)
+ except Exception as exc: # noqa: BLE001
+ return {"available": False, "reason": str(exc), "topology_id": None}
+ if isinstance(result, dict):
+ return {"available": True, **result}
+ return {"available": True, "topology_id": str(result)}
+
+
+# ---------------------------------------------------------------------------
+# Plan / execute (read-mostly; exposed at SCOPE_PLAN)
+# ---------------------------------------------------------------------------
+async def _plan(args: dict[str, Any], ctx: CallContext) -> dict[str, Any]:
+ prompt = args["prompt"]
+ agentic = _safe_import("gitpilot.agentic")
+ if agentic is None:
+ return {"available": False, "reason": "agentic engine not loaded"}
+
+ plan_fn = getattr(agentic, "build_plan", None) or getattr(agentic, "plan", None)
+ if plan_fn is None:
+ return {"available": False, "reason": "no plan() entrypoint"}
+ try:
+ plan = await _maybe_await(
+ plan_fn(
+ prompt=prompt,
+ owner=args.get("owner"),
+ repo=args.get("repo"),
+ branch=args.get("branch"),
+ workspace_root=args.get("workspace_root"),
+ )
+ )
+ except TypeError:
+ # Older signatures may not accept all kwargs; fall back to positional.
+ try:
+ plan = await _maybe_await(plan_fn(prompt))
+ except Exception as exc: # noqa: BLE001
+ return {"available": False, "reason": str(exc)}
+ except Exception as exc: # noqa: BLE001
+ return {"available": False, "reason": str(exc)}
+
+ return {"available": True, "plan": plan, "call_id": ctx.call_id}
+
+
+async def _execute(args: dict[str, Any], ctx: CallContext) -> dict[str, Any]:
+ plan_id = args["plan_id"]
+ agentic = _safe_import("gitpilot.agentic")
+ if agentic is None:
+ return {"available": False, "reason": "agentic engine not loaded"}
+
+ exec_fn = (
+ getattr(agentic, "execute_plan", None)
+ or getattr(agentic, "execute", None)
+ )
+ if exec_fn is None:
+ return {"available": False, "reason": "no execute() entrypoint"}
+ try:
+ result = await _maybe_await(
+ exec_fn(plan_id=plan_id, approval_token=args.get("approval_token"))
+ )
+ except TypeError:
+ try:
+ result = await _maybe_await(exec_fn(plan_id))
+ except Exception as exc: # noqa: BLE001
+ return {"available": False, "reason": str(exc)}
+ except Exception as exc: # noqa: BLE001
+ return {"available": False, "reason": str(exc)}
+
+ return {"available": True, "result": result, "call_id": ctx.call_id}
+
+
+# ---------------------------------------------------------------------------
+# Mutation handlers
+# ---------------------------------------------------------------------------
+async def _run_skill(args: dict[str, Any], ctx: CallContext) -> dict[str, Any]:
+ slug = args["slug"]
+ arguments = dict(args.get("arguments", {}))
+ skills_mod = _safe_import("gitpilot.skills")
+ if skills_mod is None:
+ return {"available": False, "reason": "skills module not loaded"}
+ manager_cls = getattr(skills_mod, "SkillManager", None)
+ if manager_cls is None:
+ return {"available": False, "reason": "SkillManager unavailable"}
+ try:
+ manager = manager_cls()
+ run_fn = getattr(manager, "run", None) or getattr(manager, "invoke", None)
+ if run_fn is None:
+ return {"available": False, "reason": "no run() on SkillManager"}
+ result = await _maybe_await(run_fn(slug, arguments))
+ except Exception as exc: # noqa: BLE001
+ return {"available": False, "reason": str(exc)}
+ return {"available": True, "slug": slug, "result": result}
+
+
+async def _create_pr(args: dict[str, Any], ctx: CallContext) -> dict[str, Any]:
+ pr_mod = _safe_import("gitpilot.pr_tools") or _safe_import("gitpilot.github_pulls")
+ if pr_mod is None:
+ return {"available": False, "reason": "pr tools not loaded"}
+ create_fn = getattr(pr_mod, "create_pull_request", None) or getattr(
+ pr_mod, "create_pr", None
+ )
+ if create_fn is None:
+ return {"available": False, "reason": "no create_pr() entrypoint"}
+ try:
+ result = await _maybe_await(
+ create_fn(
+ owner=args["owner"],
+ repo=args["repo"],
+ head=args["head"],
+ base=args.get("base", "main"),
+ title=args["title"],
+ body=args.get("body", ""),
+ )
+ )
+ except TypeError:
+ # Older positional API.
+ try:
+ result = await _maybe_await(
+ create_fn(
+ args["owner"],
+ args["repo"],
+ args["head"],
+ args.get("base", "main"),
+ args["title"],
+ args.get("body", ""),
+ )
+ )
+ except Exception as exc: # noqa: BLE001
+ return {"available": False, "reason": str(exc)}
+ except Exception as exc: # noqa: BLE001
+ return {"available": False, "reason": str(exc)}
+ return {"available": True, "pull_request": result}
+
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+async def _maybe_await(value):
+ """Await ``value`` if it's a coroutine, otherwise return it."""
+ import inspect
+
+ if inspect.isawaitable(value):
+ return await value
+ return value
+
+
+# ---------------------------------------------------------------------------
+# Public binder
+# ---------------------------------------------------------------------------
+HANDLER_TABLE = {
+ "gitpilot.healthz": _healthz,
+ "gitpilot.list_repos": _list_repos,
+ "gitpilot.list_branches": _list_branches,
+ "gitpilot.describe_repo": _describe_repo,
+ "gitpilot.list_skills": _list_skills,
+ "gitpilot.classify_topology": _classify_topology,
+ "gitpilot.plan": _plan,
+ "gitpilot.execute": _execute,
+ "gitpilot.run_skill": _run_skill,
+ "gitpilot.create_pr": _create_pr,
+}
+
+
+def bind(server: GitPilotMCPServer) -> GitPilotMCPServer:
+ """Wire all handlers into a :class:`GitPilotMCPServer` instance."""
+ catalog_names = {t.name for t in TOOL_CATALOG}
+ for name, handler in HANDLER_TABLE.items():
+ if name not in catalog_names:
+ raise MCPServerError(
+ f"Handler {name!r} is not in the public TOOL_CATALOG"
+ )
+ server.register_handler(name, handler)
+ return server
diff --git a/gitpilot/mcp_toggles.py b/gitpilot/mcp_toggles.py
new file mode 100644
index 0000000000000000000000000000000000000000..27614d0c797a6ac535b103c09ac277db03766277
--- /dev/null
+++ b/gitpilot/mcp_toggles.py
@@ -0,0 +1,184 @@
+# gitpilot/mcp_toggles.py
+"""Per-server MCP tool toggles and ``alwaysAllow`` semantics.
+
+Additive overlay on :mod:`gitpilot.mcp_client`. The existing client is
+left untouched; callers that want fine-grained control wrap their server
+configs with :class:`MCPServerToggles` and ask :meth:`filter_tools` /
+:meth:`is_always_allowed` before exposing a tool to the model.
+
+Project file::
+
+ .gitpilot/mcp.json
+
+ {
+ "servers": [
+ {
+ "name": "github",
+ "transport": "stdio",
+ "command": "uvx", "args": ["mcp-github"],
+ "enabledTools": ["search_code", "list_issues"],
+ "disabledTools": [],
+ "alwaysAllow": ["search_code"],
+ "disabled": false
+ }
+ ]
+ }
+
+User overrides at ``~/.gitpilot/mcp.json`` are merged underneath the
+project file, with the project taking precedence on name conflicts.
+"""
+from __future__ import annotations
+
+import fnmatch
+import json
+import logging
+from dataclasses import dataclass, field
+from pathlib import Path
+from typing import Any, Dict, Iterable, List, Optional, Set
+
+logger = logging.getLogger(__name__)
+
+GLOBAL_MCP_PATH = Path.home() / ".gitpilot" / "mcp.json"
+PROJECT_MCP_REL = Path(".gitpilot") / "mcp.json"
+
+
+@dataclass
+class MCPServerToggles:
+ """Configurable visibility for an MCP server's tools."""
+
+ name: str
+ enabled_tools: Set[str] = field(default_factory=set) # empty == all
+ disabled_tools: Set[str] = field(default_factory=set)
+ always_allow: Set[str] = field(default_factory=set)
+ disabled: bool = False
+
+ def is_tool_enabled(self, tool_name: str) -> bool:
+ if self.disabled:
+ return False
+ if _glob_in_set(tool_name, self.disabled_tools):
+ return False
+ if not self.enabled_tools:
+ return True
+ return _glob_in_set(tool_name, self.enabled_tools)
+
+ def is_always_allowed(self, tool_name: str) -> bool:
+ return _glob_in_set(tool_name, self.always_allow)
+
+ def filter_tools(self, tools: Iterable[Any]) -> List[Any]:
+ """Filter a list of tool descriptors by name.
+
+ Each ``tool`` must expose a ``.name`` attribute (the
+ :class:`gitpilot.mcp_client.MCPTool` dataclass already does).
+ """
+ return [t for t in tools if self.is_tool_enabled(getattr(t, "name", ""))]
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "name": self.name,
+ "enabledTools": sorted(self.enabled_tools),
+ "disabledTools": sorted(self.disabled_tools),
+ "alwaysAllow": sorted(self.always_allow),
+ "disabled": self.disabled,
+ }
+
+ @classmethod
+ def from_dict(cls, data: Dict[str, Any]) -> "MCPServerToggles":
+ return cls(
+ name=data.get("name", ""),
+ enabled_tools=set(data.get("enabledTools", [])),
+ disabled_tools=set(data.get("disabledTools", [])),
+ always_allow=set(data.get("alwaysAllow", [])),
+ disabled=bool(data.get("disabled", False)),
+ )
+
+
+@dataclass
+class MCPToggleRegistry:
+ """Aggregate toggles loaded from global + project config files."""
+
+ by_server: Dict[str, MCPServerToggles] = field(default_factory=dict)
+
+ def get(self, server: str) -> MCPServerToggles:
+ return self.by_server.get(server) or MCPServerToggles(name=server)
+
+ def is_tool_enabled(self, server: str, tool: str) -> bool:
+ return self.get(server).is_tool_enabled(tool)
+
+ def is_always_allowed(self, server: str, tool: str) -> bool:
+ return self.get(server).is_always_allowed(tool)
+
+ def register(self, toggles: MCPServerToggles) -> None:
+ self.by_server[toggles.name] = toggles
+
+ @classmethod
+ def load(cls, workspace_path: Optional[Path] = None) -> "MCPToggleRegistry":
+ reg = cls()
+ # Global first…
+ reg._merge_from(GLOBAL_MCP_PATH)
+ # …then project (overrides on name conflicts).
+ if workspace_path is not None:
+ reg._merge_from(workspace_path / PROJECT_MCP_REL)
+ return reg
+
+ def _merge_from(self, path: Path) -> None:
+ if not path.exists():
+ return
+ try:
+ data = json.loads(path.read_text(encoding="utf-8"))
+ except Exception as e:
+ logger.warning("could not read %s: %s", path, e)
+ return
+ servers = data.get("servers", []) if isinstance(data, dict) else data
+ if not isinstance(servers, list):
+ return
+ for entry in servers:
+ if not isinstance(entry, dict) or not entry.get("name"):
+ continue
+ toggles = MCPServerToggles.from_dict(entry)
+ self.by_server[toggles.name] = toggles
+
+
+# ----------------------------------------------------------------------
+# Output validator (defends against context poisoning via tool replies)
+# ----------------------------------------------------------------------
+
+@dataclass
+class ToolOutputCheck:
+ """Result of a tool-output sanity check."""
+
+ ok: bool
+ reason: Optional[str] = None
+ sanitised: Optional[str] = None
+
+
+def validate_tool_output(
+ raw: str,
+ *,
+ max_bytes: int = 256_000,
+ forbid_control_chars: bool = True,
+) -> ToolOutputCheck:
+ """Validate the text a tool wants to inject into context history.
+
+ The check is conservative: oversize outputs are truncated rather
+ than rejected (truncation is recorded via ``sanitised``), but
+ obviously contaminated payloads (NUL bytes, bell, etc.) are flagged
+ so the caller can ask the user instead of poisoning the prompt.
+ """
+ if raw is None:
+ return ToolOutputCheck(ok=True, sanitised="")
+ text = str(raw)
+ if forbid_control_chars:
+ bad = [c for c in text if ord(c) < 0x09 or (0x0B <= ord(c) <= 0x1F and c not in "\r")]
+ if bad:
+ return ToolOutputCheck(ok=False, reason=f"control characters ({len(bad)})")
+ if len(text.encode("utf-8", errors="replace")) > max_bytes:
+ return ToolOutputCheck(
+ ok=True,
+ reason="truncated",
+ sanitised=text[: max_bytes // 2] + "\n…\n[truncated]\n",
+ )
+ return ToolOutputCheck(ok=True)
+
+
+def _glob_in_set(name: str, patterns: Iterable[str]) -> bool:
+ return any(fnmatch.fnmatchcase(name, p) for p in patterns)
diff --git a/gitpilot/mcp_tools_bridge.py b/gitpilot/mcp_tools_bridge.py
new file mode 100644
index 0000000000000000000000000000000000000000..24d8df51774c94bd84ad451d0654a6b0bfd3e1b1
--- /dev/null
+++ b/gitpilot/mcp_tools_bridge.py
@@ -0,0 +1,330 @@
+"""Expose every enabled MCP tool as a CrewAI agent tool.
+
+This is the "Claude Code feel" bridge: when a server is enabled in the
+MCP Servers tab and individual tools are ticked on, those tools appear
+in the agents' toolbox so the planner / coder / reviewer can pick them
+mid-conversation.
+
+Design contract (best practices applied)
+----------------------------------------
+
+* **Read-only by default.** Mutation tools are excluded unless the
+ caller explicitly opts in via ``include_mutation=True``.
+* **Failure-safe.** Every tool catches exceptions and returns a
+ human-readable error string. Agents never see a Python traceback;
+ they see a sentence like ``MCP tool 'postgres.describe_table' failed:
+ HTTP 401 (check the bearer token)``.
+* **Cheap to rebuild.** The bridge reads the local
+ :class:`~gitpilot.mcp_admin_api.MCPStore` on every call to
+ :func:`build_mcp_agent_tools`, so toggle changes in the UI take
+ effect on the next agent task without a process restart.
+* **Discoverable.** :func:`describe_available_tools` returns a
+ serialisable summary the planner (and the UI's "what can I do"
+ pane) can both consume.
+* **Bounded.** A hard cap on tool count keeps the LLM context budget
+ predictable; default 32 (overridable via env var).
+* **No global state.** No imports of CrewAI at module top-level so
+ this module is testable without crewai installed; we import it
+ lazily inside :func:`build_mcp_agent_tools`.
+"""
+from __future__ import annotations
+
+import json
+import logging
+import os
+from dataclasses import dataclass, field
+from typing import Any, Callable
+
+import httpx
+
+from gitpilot.mcp_admin_api import MCPStore, ServerState
+
+logger = logging.getLogger(__name__)
+
+ENV_MAX_TOOLS = "GITPILOT_MCP_BRIDGE_MAX_TOOLS"
+DEFAULT_MAX_TOOLS = 32
+
+
+# ---------------------------------------------------------------------------
+# Tool descriptor (data only)
+# ---------------------------------------------------------------------------
+@dataclass(frozen=True)
+class MCPToolDescriptor:
+ """One callable MCP tool, suitable for both an LLM tool list and a
+ CrewAI ``@tool`` registration."""
+
+ name: str
+ description: str
+ server_id: str
+ server_endpoint: str
+ auth_token: str
+ scope: str # "read" | "plan" | "mutation"
+ risk: str # "low" | "medium" | "high"
+
+ def short(self) -> str:
+ return f"[{self.server_id}] {self.name} — {self.description or self.scope}"
+
+
+# ---------------------------------------------------------------------------
+# Build pipeline
+# ---------------------------------------------------------------------------
+def _select_enabled_tools(
+ snapshot,
+ *,
+ include_mutation: bool,
+) -> list[MCPToolDescriptor]:
+ """Walk the local store and return one descriptor per enabled tool."""
+ out: list[MCPToolDescriptor] = []
+ for server_id, server in snapshot.servers.items():
+ if not server.installed or not server.enabled:
+ continue
+ if server.orphan:
+ # Orphaned servers stay in the store but their tools are not
+ # advertised to agents until the user re-attaches them.
+ continue
+ token = _resolve_token(server)
+ for tool_name, override in (server.tool_overrides or {}).items():
+ # Honor explicit user toggle.
+ if override is False:
+ continue
+ risk, scope = _classify(tool_name)
+ if scope == "mutation" and not include_mutation:
+ continue
+ out.append(
+ MCPToolDescriptor(
+ name=tool_name,
+ description=_describe(tool_name),
+ server_id=server_id,
+ server_endpoint=server.endpoint,
+ auth_token=token,
+ scope=scope,
+ risk=risk,
+ )
+ )
+ return out
+
+
+def _resolve_token(server: ServerState) -> str:
+ """Pick the token a tool call should use.
+
+ Priority: per-server token env var → fallback ``MCP_AUTH_TOKEN``.
+ Empty token is acceptable for unauthenticated dev gateways; the
+ server itself enforces the auth requirement.
+ """
+ if server.auth_token_env:
+ v = os.environ.get(server.auth_token_env)
+ if v:
+ return v
+ return os.environ.get("MCP_AUTH_TOKEN", "")
+
+
+def _classify(tool_name: str) -> tuple[str, str]:
+ """Return (risk, scope) for a tool name.
+
+ The scope drives whether ``include_mutation`` gates the tool:
+
+ * low risk → "read" (always exposed when enabled)
+ * medium risk → "mutation" (writes data; gated)
+ * high risk → "mutation" (destructive; gated)
+
+ Risk is computed by :func:`gitpilot.mcp_admin_api.classify_risk`,
+ which is the same function the UI uses to colour-code badges, so
+ the agent toolbox stays in sync with what the user sees.
+ """
+ from gitpilot.mcp_admin_api import classify_risk
+
+ risk = classify_risk(tool_name)
+ if risk in ("high", "medium"):
+ return (risk, "mutation")
+ return ("low", "read")
+
+
+def _describe(tool_name: str) -> str:
+ """Cheap human-friendly description.
+
+ Could query Forge for the long-form description, but the planner
+ only needs a short hint. Falls back to the tool's own segments.
+ """
+ if "." in tool_name:
+ ns, leaf = tool_name.split(".", 1)
+ return f"{ns} server: {leaf.replace('_', ' ')}"
+ return tool_name
+
+
+# ---------------------------------------------------------------------------
+# HTTP invocation
+# ---------------------------------------------------------------------------
+async def invoke_remote_tool(
+ descriptor: MCPToolDescriptor,
+ arguments: dict[str, Any],
+ *,
+ http_client: httpx.AsyncClient | None = None,
+ timeout_seconds: float = 30.0,
+) -> dict[str, Any]:
+ """Issue one ``tools/call`` against the descriptor's server.
+
+ Pure async; never raises -- returns ``{"ok": False, "error": "..."}``
+ on failure so the agent-facing wrapper can present a string.
+ """
+ headers: dict[str, str] = {
+ "content-type": "application/json",
+ "x-gitpilot-origin": "agent",
+ }
+ if descriptor.auth_token:
+ headers["authorization"] = f"Bearer {descriptor.auth_token}"
+
+ body = {"method": "tools/call", "tool": descriptor.name, "arguments": arguments}
+
+ own = http_client is None
+ client = http_client or httpx.AsyncClient(timeout=timeout_seconds)
+ try:
+ resp = await client.post(
+ descriptor.server_endpoint, json=body, headers=headers
+ )
+ if resp.status_code != 200:
+ text = resp.text[:300]
+ return {"ok": False, "error": f"HTTP {resp.status_code}: {text}"}
+ return {"ok": True, "result": resp.json()}
+ except httpx.HTTPError as exc:
+ return {"ok": False, "error": str(exc)}
+ finally:
+ if own:
+ await client.aclose()
+
+
+# ---------------------------------------------------------------------------
+# Sync wrapper used by CrewAI tools
+# ---------------------------------------------------------------------------
+def _run_async(coro):
+ """Run an async function from a sync CrewAI tool body.
+
+ Uses ``asyncio.run`` when no loop is running, otherwise schedules
+ on the current loop with ``asyncio.run_coroutine_threadsafe`` --
+ matches the pattern in :mod:`gitpilot.agent_tools`.
+ """
+ import asyncio
+
+ try:
+ loop = asyncio.get_running_loop()
+ except RuntimeError:
+ return asyncio.run(coro)
+ # Already in a loop (rare under CrewAI but possible) -- spawn a
+ # dedicated thread so we don't deadlock.
+ import concurrent.futures
+
+ with concurrent.futures.ThreadPoolExecutor(max_workers=1) as pool:
+ return pool.submit(asyncio.run, coro).result()
+
+
+def _make_callable(descriptor: MCPToolDescriptor) -> Callable[..., str]:
+ """Turn a descriptor into a sync callable an agent can invoke.
+
+ The signature is intentionally generic (``**kwargs``) to keep the
+ binding compatible with CrewAI's runtime tool dispatcher; the
+ server-side ``inputSchema`` is what gates argument validity.
+ """
+
+ async def _call(args: dict[str, Any]) -> dict[str, Any]:
+ return await invoke_remote_tool(descriptor, args)
+
+ def _tool_fn(**kwargs: Any) -> str:
+ out = _run_async(_call(dict(kwargs)))
+ if out.get("ok"):
+ try:
+ return json.dumps(out["result"], indent=2, default=str)
+ except (TypeError, ValueError):
+ return str(out["result"])
+ return f"MCP tool {descriptor.name!r} failed: {out.get('error', 'unknown error')}"
+
+ _tool_fn.__name__ = descriptor.name.replace(".", "_")
+ _tool_fn.__doc__ = (
+ f"{descriptor.description}\n"
+ f"Server: {descriptor.server_id} · Scope: {descriptor.scope} · "
+ f"Risk: {descriptor.risk}"
+ )
+ return _tool_fn
+
+
+# ---------------------------------------------------------------------------
+# Public API
+# ---------------------------------------------------------------------------
+def build_mcp_agent_tools(
+ *,
+ store: MCPStore | None = None,
+ include_mutation: bool = False,
+ max_tools: int | None = None,
+ policy: Any = None,
+) -> list[Any]:
+ """Build the live list of CrewAI tools backed by enabled MCP tools.
+
+ Returns an empty list if MCP is disabled, no servers are enabled,
+ or CrewAI is not importable. Never raises.
+
+ Batch P2-B — accepts an optional ``policy`` (a
+ :class:`gitpilot.tool_groups.ToolPolicy`). When the
+ ``lazy_tool_defs`` flag is on and ``policy`` is supplied, the
+ descriptors are filtered through
+ :func:`gitpilot.tool_def_pruner.prune_descriptors` *before* they
+ enter the LLM tool definitions; smaller tool list → smaller
+ prompt. When ``policy`` is ``None`` or the flag is off, behaviour
+ is identical to the legacy code path.
+ """
+ s = store or MCPStore()
+ snap = s.load()
+ descriptors = _select_enabled_tools(snap, include_mutation=include_mutation)
+
+ if not descriptors:
+ return []
+
+ cap = int(
+ max_tools
+ if max_tools is not None
+ else os.environ.get(ENV_MAX_TOOLS, DEFAULT_MAX_TOOLS)
+ )
+ descriptors = descriptors[:cap]
+
+ if policy is not None:
+ # Lazy import to keep this module decoupled from the pruner.
+ from .tool_def_pruner import prune_descriptors as _prune
+ descriptors, report = _prune(descriptors, policy=policy)
+ if report.dropped:
+ logger.info(
+ "mcp-bridge: lazy_tool_defs pruned %d/%d descriptor(s) (%s)",
+ report.dropped, report.dropped + report.kept, report.reason_counts,
+ )
+
+ try:
+ from crewai.tools import tool as crewai_tool
+ except Exception:
+ logger.info("crewai not installed; mcp tool bridge returning raw callables")
+ return [_make_callable(d) for d in descriptors]
+
+ wrapped: list[Any] = []
+ for descriptor in descriptors:
+ fn = _make_callable(descriptor)
+ # Decorate with CrewAI's @tool so the planner sees the description.
+ decorated = crewai_tool(descriptor.short())(fn)
+ wrapped.append(decorated)
+ return wrapped
+
+
+def describe_available_tools(
+ *,
+ store: MCPStore | None = None,
+ include_mutation: bool = False,
+) -> list[dict[str, Any]]:
+ """Serialisable summary; used by the planner and a future UI pane."""
+ s = store or MCPStore()
+ descriptors = _select_enabled_tools(
+ s.load(), include_mutation=include_mutation
+ )
+ return [
+ {
+ "name": d.name,
+ "description": d.description,
+ "server_id": d.server_id,
+ "scope": d.scope,
+ "risk": d.risk,
+ }
+ for d in descriptors
+ ]
diff --git a/gitpilot/memory.py b/gitpilot/memory.py
new file mode 100644
index 0000000000000000000000000000000000000000..552e01e9d46096749c4cec4307616ff087f28589
--- /dev/null
+++ b/gitpilot/memory.py
@@ -0,0 +1,137 @@
+# gitpilot/memory.py
+"""Project context memory — the GITPILOT.md system.
+
+Loads project-specific conventions, rules, and context from:
+
+1. ``.gitpilot/GITPILOT.md`` — project root (committed to repo)
+2. ``.gitpilot/rules/*.md`` — modular rule files
+3. ``.gitpilot/memory.json`` — auto-learned patterns (local only)
+
+The combined context is injected into agent system prompts so they
+follow project conventions automatically.
+"""
+from __future__ import annotations
+
+import json
+import logging
+from dataclasses import dataclass, field
+from pathlib import Path
+from typing import Any, Dict, List
+
+logger = logging.getLogger(__name__)
+
+MEMORY_FILE = "GITPILOT.md"
+RULES_DIR = "rules"
+AUTO_MEMORY_FILE = "memory.json"
+MAX_CONVENTIONS_CHARS = 10_000
+MAX_RULE_CHARS = 5_000
+MAX_PATTERNS = 100
+
+
+@dataclass
+class ProjectContext:
+ """Combined project context for agent injection."""
+
+ conventions: str = ""
+ rules: List[str] = field(default_factory=list)
+ auto_memory: Dict[str, Any] = field(default_factory=dict)
+
+ def to_system_prompt(self) -> str:
+ """Format as a system-prompt section to prepend to agent backstory."""
+ parts: List[str] = []
+ if self.conventions:
+ parts.append(f"## Project Conventions\n\n{self.conventions}")
+ if self.rules:
+ parts.append("## Project Rules\n\n" + "\n\n---\n\n".join(self.rules))
+ patterns = self.auto_memory.get("patterns", [])
+ if patterns:
+ parts.append(
+ "## Learned Patterns\n\n"
+ + "\n".join(f"- {p}" for p in patterns)
+ )
+ return "\n\n".join(parts)
+
+ @property
+ def is_empty(self) -> bool:
+ return not self.conventions and not self.rules and not self.auto_memory
+
+
+class MemoryManager:
+ """Load and manage project-level context and conventions."""
+
+ def __init__(self, workspace_path: Path):
+ self.workspace_path = workspace_path
+ self.gitpilot_dir = workspace_path / ".gitpilot"
+
+ def load_context(self) -> ProjectContext:
+ ctx = ProjectContext()
+
+ # 1. GITPILOT.md
+ md_path = self.gitpilot_dir / MEMORY_FILE
+ if md_path.exists():
+ ctx.conventions = md_path.read_text(encoding="utf-8")[
+ :MAX_CONVENTIONS_CHARS
+ ]
+
+ # 2. rules/*.md
+ rules_dir = self.gitpilot_dir / RULES_DIR
+ if rules_dir.is_dir():
+ for rule_file in sorted(rules_dir.glob("*.md")):
+ content = rule_file.read_text(encoding="utf-8")[:MAX_RULE_CHARS]
+ ctx.rules.append(f"### {rule_file.stem}\n\n{content}")
+
+ # 3. auto-learned memory
+ auto_path = self.gitpilot_dir / AUTO_MEMORY_FILE
+ if auto_path.exists():
+ try:
+ ctx.auto_memory = json.loads(auto_path.read_text())
+ except Exception:
+ pass
+
+ return ctx
+
+ def save_auto_memory(self, memory: Dict[str, Any]):
+ self.gitpilot_dir.mkdir(parents=True, exist_ok=True)
+ auto_path = self.gitpilot_dir / AUTO_MEMORY_FILE
+ auto_path.write_text(json.dumps(memory, indent=2))
+
+ def add_learned_pattern(self, pattern: str):
+ auto_path = self.gitpilot_dir / AUTO_MEMORY_FILE
+ memory: Dict[str, Any] = {}
+ if auto_path.exists():
+ try:
+ memory = json.loads(auto_path.read_text())
+ except Exception:
+ pass
+ patterns = memory.setdefault("patterns", [])
+ if pattern not in patterns:
+ patterns.append(pattern)
+ memory["patterns"] = patterns[-MAX_PATTERNS:]
+ self.save_auto_memory(memory)
+
+ def init_project(self) -> Path:
+ """Create .gitpilot/ with template GITPILOT.md. Returns path."""
+ self.gitpilot_dir.mkdir(parents=True, exist_ok=True)
+ md_path = self.gitpilot_dir / MEMORY_FILE
+ if not md_path.exists():
+ md_path.write_text(
+ "# GitPilot Project Conventions\n\n"
+ "\n"
+ "\n\n"
+ "## Code Style\n\n\n"
+ "## Testing\n\n\n"
+ "## Commit Messages\n\n\n"
+ )
+ (self.gitpilot_dir / RULES_DIR).mkdir(exist_ok=True)
+ return md_path
+
+ def get_conventions_text(self) -> str:
+ md_path = self.gitpilot_dir / MEMORY_FILE
+ if md_path.exists():
+ return md_path.read_text(encoding="utf-8")
+ return ""
+
+ def set_conventions_text(self, text: str):
+ self.gitpilot_dir.mkdir(parents=True, exist_ok=True)
+ md_path = self.gitpilot_dir / MEMORY_FILE
+ md_path.write_text(text, encoding="utf-8")
diff --git a/gitpilot/mentions.py b/gitpilot/mentions.py
new file mode 100644
index 0000000000000000000000000000000000000000..c119b9b4f1258dc0c4a5881f25f3ff6f1dafc6d3
--- /dev/null
+++ b/gitpilot/mentions.py
@@ -0,0 +1,232 @@
+# gitpilot/mentions.py
+"""@-mention parser for chat input — additive context expander.
+
+Recognised tokens (additive, non-destructive — unknown tokens are left as-is)::
+
+ @/abs/path — single file (path under workspace)
+ @./rel/path — relative path resolved against workspace
+ @glob:src/**/*.ts — file glob expanded under workspace
+ @problems — current diagnostics (read from .gitpilot/problems.json
+ if present, otherwise empty)
+ @commit: — `git show ` summary
+ @diff: — `git diff ` summary
+ @selection — selection sent from the editor (falls back to
+ the GITPILOT_SELECTION env var)
+ @pr: — placeholder block; resolved by API layer
+
+The parser is intentionally pure-Python and side-effect-free except for
+shelling out to git when a commit/diff mention is encountered. All output
+is size-capped so a noisy mention can never blow the prompt budget.
+"""
+from __future__ import annotations
+
+import json
+import logging
+import os
+import re
+import subprocess
+from dataclasses import dataclass, field
+from pathlib import Path
+from typing import List, Optional
+
+logger = logging.getLogger(__name__)
+
+MAX_FILE_BYTES = 16_000
+MAX_GLOB_FILES = 12
+MAX_DIAGNOSTICS = 50
+MAX_GIT_OUTPUT = 8_000
+
+# A mention starts with @ and runs until whitespace OR the next @ that is
+# clearly the start of a fresh token (preceded by whitespace). We scan
+# greedily on the leading @ then stop at whitespace.
+_MENTION_RE = re.compile(r"(? str:
+ """Render expansions as a single markdown block, or '' if none."""
+ if not self.expansions:
+ return ""
+ parts = ["## Mentions"]
+ for exp in self.expansions:
+ head = f"### `@{exp.token}` ({exp.kind})"
+ if exp.error:
+ parts.append(f"{head}\n\n_error: {exp.error}_")
+ else:
+ parts.append(f"{head}\n\n{exp.body}")
+ return "\n\n".join(parts)
+
+
+class MentionParser:
+ """Parse and expand @-mentions in a chat message."""
+
+ def __init__(
+ self,
+ workspace_path: Path,
+ *,
+ max_file_bytes: int = MAX_FILE_BYTES,
+ max_glob_files: int = MAX_GLOB_FILES,
+ ) -> None:
+ self.workspace_path = workspace_path.resolve()
+ self.max_file_bytes = max_file_bytes
+ self.max_glob_files = max_glob_files
+
+ # ------------------------------------------------------------------
+ # Public API
+ # ------------------------------------------------------------------
+ def parse(self, message: str) -> MentionResult:
+ if not message:
+ return MentionResult(cleaned_message=message)
+
+ expansions: List[ExpandedMention] = []
+ for match in _MENTION_RE.finditer(message):
+ token = match.group(1)
+ expansions.append(self._expand_token(token))
+
+ return MentionResult(cleaned_message=message, expansions=expansions)
+
+ # ------------------------------------------------------------------
+ # Token dispatch
+ # ------------------------------------------------------------------
+ def _expand_token(self, token: str) -> ExpandedMention:
+ try:
+ if token == "problems":
+ return self._expand_problems(token)
+ if token == "selection":
+ return self._expand_selection(token)
+ if token.startswith("glob:"):
+ return self._expand_glob(token, token[5:])
+ if token.startswith("commit:"):
+ return self._expand_commit(token, token[7:])
+ if token.startswith("diff:"):
+ return self._expand_diff(token, token[5:])
+ if token.startswith("pr:"):
+ return ExpandedMention(
+ token=token,
+ kind="pr",
+ body=f"_PR reference `{token[3:]}` will be resolved by the API layer._",
+ )
+ # Path-like: @/..., @./..., @../..., @name/path
+ if token.startswith(("/", "./", "../")) or "/" in token or token.endswith(
+ (".py", ".ts", ".tsx", ".js", ".md", ".json", ".yaml", ".yml")
+ ):
+ return self._expand_file(token, token)
+ return ExpandedMention(token=token, kind="unknown", body="", error="unrecognised token")
+ except Exception as exc: # pragma: no cover - defensive
+ logger.debug("mention %s failed", token, exc_info=True)
+ return ExpandedMention(token=token, kind="unknown", body="", error=str(exc))
+
+ # ------------------------------------------------------------------
+ # Expanders
+ # ------------------------------------------------------------------
+ def _resolve_under_workspace(self, raw: str) -> Path:
+ if raw.startswith("/"):
+ # Allow absolute paths but only if they live under the workspace.
+ p = Path(raw).resolve()
+ else:
+ p = (self.workspace_path / raw.lstrip("./")).resolve()
+ if not str(p).startswith(str(self.workspace_path)):
+ raise PermissionError(f"path escapes workspace: {raw}")
+ return p
+
+ def _expand_file(self, token: str, raw: str) -> ExpandedMention:
+ path = self._resolve_under_workspace(raw)
+ if not path.exists() or not path.is_file():
+ return ExpandedMention(token=token, kind="file", body="", error="not found")
+ data = path.read_bytes()[: self.max_file_bytes]
+ text = data.decode("utf-8", errors="replace")
+ rel = path.relative_to(self.workspace_path)
+ body = f"```{_guess_lang(path)} title={rel}\n{text}\n```"
+ return ExpandedMention(token=token, kind="file", body=body)
+
+ def _expand_glob(self, token: str, pattern: str) -> ExpandedMention:
+ files = sorted(self.workspace_path.glob(pattern))[: self.max_glob_files]
+ if not files:
+ return ExpandedMention(token=token, kind="glob", body="", error="no matches")
+ rel = [str(p.relative_to(self.workspace_path)) for p in files]
+ body = "Matched files:\n" + "\n".join(f"- `{r}`" for r in rel)
+ return ExpandedMention(token=token, kind="glob", body=body)
+
+ def _expand_problems(self, token: str) -> ExpandedMention:
+ path = self.workspace_path / ".gitpilot" / "problems.json"
+ if not path.exists():
+ return ExpandedMention(token=token, kind="problems", body="_no diagnostics file present_")
+ try:
+ items = json.loads(path.read_text())[:MAX_DIAGNOSTICS]
+ except Exception as e:
+ return ExpandedMention(token=token, kind="problems", body="", error=str(e))
+ lines = []
+ for it in items:
+ sev = it.get("severity", "info")
+ file_ = it.get("file", "?")
+ line = it.get("line", "?")
+ msg = it.get("message", "")
+ lines.append(f"- [{sev}] {file_}:{line} — {msg}")
+ return ExpandedMention(token=token, kind="problems", body="\n".join(lines) or "_no diagnostics_")
+
+ def _expand_selection(self, token: str) -> ExpandedMention:
+ text = os.environ.get("GITPILOT_SELECTION", "")
+ if not text:
+ return ExpandedMention(token=token, kind="selection", body="", error="no selection")
+ return ExpandedMention(token=token, kind="selection", body=f"```\n{text[:self.max_file_bytes]}\n```")
+
+ def _expand_commit(self, token: str, sha: str) -> ExpandedMention:
+ out = self._git("show", "--stat", "--patch", sha)
+ if out is None:
+ return ExpandedMention(token=token, kind="commit", body="", error="git failed")
+ return ExpandedMention(token=token, kind="commit", body=f"```diff\n{out[:MAX_GIT_OUTPUT]}\n```")
+
+ def _expand_diff(self, token: str, rng: str) -> ExpandedMention:
+ out = self._git("diff", "--stat", "--patch", rng)
+ if out is None:
+ return ExpandedMention(token=token, kind="diff", body="", error="git failed")
+ return ExpandedMention(token=token, kind="diff", body=f"```diff\n{out[:MAX_GIT_OUTPUT]}\n```")
+
+ def _git(self, *args: str) -> Optional[str]:
+ try:
+ proc = subprocess.run(
+ ["git", *args],
+ cwd=str(self.workspace_path),
+ capture_output=True,
+ text=True,
+ timeout=15,
+ check=False,
+ )
+ if proc.returncode != 0:
+ return None
+ return proc.stdout
+ except Exception:
+ return None
+
+
+_LANG_BY_EXT = {
+ ".py": "python", ".ts": "ts", ".tsx": "tsx", ".js": "js", ".jsx": "jsx",
+ ".rs": "rust", ".go": "go", ".java": "java", ".rb": "ruby",
+ ".md": "md", ".json": "json", ".yaml": "yaml", ".yml": "yaml",
+ ".toml": "toml", ".sql": "sql", ".sh": "bash",
+}
+
+
+def _guess_lang(path: Path) -> str:
+ return _LANG_BY_EXT.get(path.suffix.lower(), "")
+
+
+def expand(message: str, workspace_path: Path) -> MentionResult:
+ """Module-level convenience wrapper."""
+ return MentionParser(workspace_path).parse(message)
diff --git a/gitpilot/model_catalog.py b/gitpilot/model_catalog.py
new file mode 100644
index 0000000000000000000000000000000000000000..0656298644852fc50180f366fd0ae4e182b8f9f7
--- /dev/null
+++ b/gitpilot/model_catalog.py
@@ -0,0 +1,197 @@
+# gitpilot/model_catalog.py
+from __future__ import annotations
+
+import os
+from datetime import datetime
+from typing import List, Tuple, Optional, Dict, Any
+
+import requests
+
+from .settings import AppSettings, LLMProvider, get_settings
+
+# --- Watsonx.ai config (public endpoint, no key needed for IBM-managed models) ---
+
+WATSONX_BASE_URLS = [
+ "https://us-south.ml.cloud.ibm.com",
+ "https://eu-de.ml.cloud.ibm.com",
+ "https://jp-tok.ml.cloud.ibm.com",
+ "https://au-syd.ml.cloud.ibm.com",
+]
+
+WATSONX_ENDPOINT = "/ml/v1/foundation_model_specs"
+WATSONX_PARAMS = {
+ "version": "2024-09-16",
+ "filters": "!function_embedding,!lifecycle_withdrawn",
+}
+TODAY = datetime.today().strftime("%Y-%m-%d")
+
+
+def _is_deprecated_or_withdrawn(lifecycle: List[Dict[str, Any]]) -> bool:
+ """Return True if a model lifecycle includes a deprecated/withdrawn item active today."""
+ for entry in lifecycle:
+ if entry.get("id") in {"deprecated", "withdrawn"} and entry.get("start_date", "") <= TODAY:
+ return True
+ return False
+
+
+# --- Provider-specific listing functions --------------------------------------
+
+
+def _list_openai_models(settings: AppSettings) -> Tuple[List[str], Optional[str]]:
+ """
+ Use OpenAI /v1/models endpoint to list models available to the configured key.
+ Requires OPENAI_API_KEY or settings.openai.api_key.
+ """
+ api_key = settings.openai.api_key or os.getenv("OPENAI_API_KEY")
+ if not api_key:
+ return [], "OpenAI API key not configured"
+
+ base_url = os.getenv("OPENAI_BASE_URL", "https://api.openai.com")
+ url = f"{base_url.rstrip('/')}/v1/models"
+
+ try:
+ resp = requests.get(
+ url,
+ headers={"Authorization": f"Bearer {api_key}"},
+ timeout=10,
+ )
+ resp.raise_for_status()
+ data = resp.json().get("data", [])
+ models = sorted({m.get("id", "") for m in data if m.get("id")})
+ return models, None
+ except Exception as e:
+ return [], f"Error listing OpenAI models: {e}"
+
+
+def _list_claude_models(settings: AppSettings) -> Tuple[List[str], Optional[str]]:
+ """
+ Use Anthropic /v1/models endpoint to list Claude models available to the key.
+ Requires ANTHROPIC_API_KEY or settings.claude.api_key.
+ """
+ api_key = settings.claude.api_key or os.getenv("ANTHROPIC_API_KEY")
+ if not api_key:
+ return [], "Claude (Anthropic) API key not configured"
+
+ base_url = os.getenv("ANTHROPIC_BASE_URL", "https://api.anthropic.com")
+ url = f"{base_url.rstrip('/')}/v1/models"
+ anthropic_version = os.getenv("ANTHROPIC_VERSION", "2023-06-01")
+
+ try:
+ resp = requests.get(
+ url,
+ headers={
+ "x-api-key": api_key,
+ "anthropic-version": anthropic_version,
+ },
+ timeout=10,
+ )
+ resp.raise_for_status()
+ data = resp.json().get("data", [])
+ models = sorted({m.get("id", "") for m in data if m.get("id")})
+ return models, None
+ except Exception as e:
+ return [], f"Error listing Claude models: {e}"
+
+
+def _list_watsonx_models(settings: AppSettings) -> Tuple[List[str], Optional[str]]:
+ """
+ List foundation models from Watsonx public specs endpoint.
+ No API key required for IBM-managed models.
+ Returns a unique sorted list of model_id's across major regions.
+ """
+ all_models = set()
+
+ for base in WATSONX_BASE_URLS:
+ url = f"{base}{WATSONX_ENDPOINT}"
+ try:
+ resp = requests.get(url, params=WATSONX_PARAMS, timeout=10)
+ resp.raise_for_status()
+ resources = resp.json().get("resources", [])
+ for m in resources:
+ if _is_deprecated_or_withdrawn(m.get("lifecycle", [])):
+ continue
+ model_id = m.get("model_id")
+ if model_id:
+ all_models.add(model_id)
+ except Exception:
+ # Just skip this region on error
+ continue
+
+ if not all_models:
+ return [], "No Watsonx models found (public specs call failed for all regions?)"
+
+ return sorted(all_models), None
+
+
+def _list_ollama_models(settings: AppSettings) -> Tuple[List[str], Optional[str]]:
+ """
+ List models from a local/remote Ollama server via /api/tags.
+ """
+ base_url = getattr(settings.ollama, "base_url", None) or os.getenv(
+ "OLLAMA_BASE_URL", "http://localhost:11434"
+ )
+ url = f"{base_url.rstrip('/')}/api/tags"
+
+ try:
+ resp = requests.get(url, timeout=5)
+ resp.raise_for_status()
+ data = resp.json().get("models", [])
+ models = sorted({m.get("name", "") for m in data if m.get("name")})
+ return models, None
+ except Exception as e:
+ return [], f"Error listing Ollama models from {url}: {e}"
+
+
+def _list_ollabridge_models(settings: AppSettings) -> Tuple[List[str], Optional[str]]:
+ """
+ List models from an OllaBridge / OllaBridge Cloud instance via /v1/models.
+ Uses the OpenAI-compatible endpoint.
+ """
+ base_url = getattr(settings.ollabridge, "base_url", None) or os.getenv(
+ "OLLABRIDGE_BASE_URL", "http://localhost:8000"
+ )
+ api_key = getattr(settings.ollabridge, "api_key", None) or os.getenv("OLLABRIDGE_API_KEY", "")
+ url = f"{base_url.rstrip('/')}/v1/models"
+
+ headers: Dict[str, str] = {}
+ if api_key:
+ headers["Authorization"] = f"Bearer {api_key}"
+
+ try:
+ resp = requests.get(url, headers=headers, timeout=10)
+ resp.raise_for_status()
+ data = resp.json().get("data", [])
+ models = sorted({m.get("id", "") for m in data if m.get("id")})
+ return models, None
+ except Exception as e:
+ return [], f"Error listing OllaBridge models from {url}: {e}"
+
+
+# --- Public helper ------------------------------------------------------------
+
+
+def list_models_for_provider(
+ provider: LLMProvider,
+ settings: Optional[AppSettings] = None,
+) -> Tuple[List[str], Optional[str]]:
+ """
+ Return (models, error) for a given provider.
+
+ models: list of strings (model IDs / names)
+ error: human-readable error if something went wrong, otherwise None
+ """
+ if settings is None:
+ settings = get_settings()
+
+ if provider == LLMProvider.openai:
+ return _list_openai_models(settings)
+ if provider == LLMProvider.claude:
+ return _list_claude_models(settings)
+ if provider == LLMProvider.watsonx:
+ return _list_watsonx_models(settings)
+ if provider == LLMProvider.ollama:
+ return _list_ollama_models(settings)
+ if provider == LLMProvider.ollabridge:
+ return _list_ollabridge_models(settings)
+
+ return [], f"Unsupported provider: {provider}"
diff --git a/gitpilot/models.py b/gitpilot/models.py
new file mode 100644
index 0000000000000000000000000000000000000000..9c86d3db4ff1723b62aa3ad18f695719753c0a80
--- /dev/null
+++ b/gitpilot/models.py
@@ -0,0 +1,279 @@
+"""
+GitPilot Redesign — Shared Models & Schemas
+Centralized Pydantic models for the redesigned API contract.
+"""
+
+from enum import StrEnum
+from typing import Any, Literal
+
+from pydantic import BaseModel, Field
+
+
+# ─── Enums ────────────────────────────────────────────────
+
+class WorkspaceMode(StrEnum):
+ folder = "folder"
+ local_git = "local_git"
+ github = "github"
+
+
+class ProviderName(StrEnum):
+ openai = "openai"
+ claude = "claude"
+ watsonx = "watsonx"
+ ollama = "ollama"
+ ollabridge = "ollabridge"
+
+
+class ProviderHealth(StrEnum):
+ ok = "ok"
+ warning = "warning"
+ error = "error"
+ unknown = "unknown"
+
+
+class ProviderConnectionType(StrEnum):
+ local = "local"
+ api_key = "api_key"
+ pairing = "pairing"
+ cloud = "cloud"
+ managed = "managed"
+
+
+class SessionMode(StrEnum):
+ folder = "folder"
+ local_git = "local_git"
+ github = "github"
+
+
+# ─── Provider Models ─────────────────────────────────────
+
+class ProviderSummary(BaseModel):
+ configured: bool = False
+ name: ProviderName = ProviderName.ollama
+ source: Literal[".env", "settings", "unknown"] = "unknown"
+ model: str | None = None
+ base_url: str | None = None
+ connection_type: ProviderConnectionType | None = None
+ has_api_key: bool = False
+ health: ProviderHealth | None = ProviderHealth.unknown
+ models_available: bool | None = None
+ warning: str | None = None
+
+
+class ProviderStatusResponse(BaseModel):
+ configured: bool
+ name: ProviderName
+ source: Literal[".env", "settings", "unknown"] = "unknown"
+ model: str | None = None
+ base_url: str | None = None
+ connection_type: ProviderConnectionType | None = None
+ has_api_key: bool = False
+ health: ProviderHealth | None = ProviderHealth.unknown
+ models_available: bool | None = None
+ warning: str | None = None
+
+
+# ─── Workspace Models ────────────────────────────────────
+
+class WorkspaceCapabilitySummary(BaseModel):
+ folder_mode_available: bool = False
+ local_git_available: bool = False
+ github_mode_available: bool = False
+
+
+class WorkspaceSummary(BaseModel):
+ folder_open: bool = False
+ folder_path: str | None = None
+ folder_name: str | None = None
+ git_detected: bool = False
+ repo_root: str | None = None
+ repo_name: str | None = None
+ branch: str | None = None
+ remotes: list[str] = Field(default_factory=list)
+
+
+# ─── GitHub Models ────────────────────────────────────────
+
+class GithubStatusSummary(BaseModel):
+ connected: bool = False
+ token_configured: bool = False
+ username: str | None = None
+
+
+# ─── Status Response ─────────────────────────────────────
+
+class StatusResponse(BaseModel):
+ server_ready: bool = True
+ provider: ProviderStatusResponse
+ workspace: WorkspaceCapabilitySummary
+ github: GithubStatusSummary
+
+
+# ─── Session Models ──────────────────────────────────────
+
+class StartSessionRequest(BaseModel):
+ mode: WorkspaceMode
+ folder_path: str | None = None
+ repo_root: str | None = None
+ repo_full_name: str | None = None
+ branch: str | None = None
+
+
+class StartSessionResponse(BaseModel):
+ session_id: str
+ mode: WorkspaceMode
+ title: str
+ status: Literal["active"] = "active"
+ folder_path: str | None = None
+ repo_root: str | None = None
+ repo_full_name: str | None = None
+ branch: str | None = None
+
+
+# ─── Chat / Task Models ──────────────────────────────────
+
+class PlanStepSummary(BaseModel):
+ step: int
+ title: str
+ action: str
+ file: str | None = None
+ description: str
+ status: Literal["pending", "ready", "applied", "failed"] | None = "pending"
+
+
+class PlanSummary(BaseModel):
+ goal: str
+ summary: str
+ steps: list[PlanStepSummary]
+
+
+class FileReference(BaseModel):
+ path: str
+ line: int | None = None
+
+
+class FileTreeEntry(BaseModel):
+ path: str
+ type: Literal["file", "dir"]
+
+
+class FileInScope(BaseModel):
+ path: str
+ reason: str | None = None
+ confidence: Literal["low", "medium", "high"] | None = None
+
+
+class ProposedEdit(BaseModel):
+ file: str
+ kind: Literal["create", "replace", "patch"]
+ summary: str | None = None
+ diff: str | None = None
+ content: str | None = None
+
+
+class StructuredProjectContext(BaseModel):
+ mode: str | None = None
+ workspaceRoot: str | None = None
+ repoRoot: str | None = None
+ repoName: str | None = None
+ branch: str | None = None
+ languages: list[str] = Field(default_factory=list)
+ manifests: list[str] = Field(default_factory=list)
+ keyFiles: list[str] = Field(default_factory=list)
+ readmePreview: str | None = None
+ treeSummary: list[FileTreeEntry] = Field(default_factory=list)
+ indexedAt: str | None = None
+
+
+class StructuredWorkingSet(BaseModel):
+ currentFile: str | None = None
+ languageId: str | None = None
+ currentSelection: str | None = None
+ openTabs: list[str] = Field(default_factory=list)
+ recentFiles: list[str] = Field(default_factory=list)
+ relatedFiles: list[str] = Field(default_factory=list)
+
+
+class StructuredTaskContext(BaseModel):
+ intent: str | None = None
+ scope: Literal["workspace", "selection", "file"] | None = None
+ summary: str | None = None
+
+
+class ChatMessageRequest(BaseModel):
+ session_id: str
+ message: str
+ scope: Literal["workspace", "selection", "file"] = "workspace"
+ topology_id: str | None = None
+ intent: str | None = None
+ project_context: StructuredProjectContext | None = None
+ working_set: StructuredWorkingSet | None = None
+ task_context: StructuredTaskContext | None = None
+
+
+class ChatMessageResponse(BaseModel):
+ session_id: str
+ answer: str
+ message_id: str | None = None
+ plan: PlanSummary | None = None
+ filesInScope: list[FileInScope] = Field(default_factory=list)
+ edits: list[ProposedEdit] = Field(default_factory=list)
+ references: list[FileReference] = Field(default_factory=list)
+
+
+# ─── Provider Test Models ────────────────────────────────
+
+class OpenAIProviderInput(BaseModel):
+ api_key: str | None = None
+ base_url: str | None = None
+ model: str | None = None
+
+
+class ClaudeProviderInput(BaseModel):
+ api_key: str | None = None
+ base_url: str | None = None
+ model: str | None = None
+
+
+class WatsonxProviderInput(BaseModel):
+ api_key: str | None = None
+ project_id: str | None = None
+ base_url: str | None = None
+ model_id: str | None = None
+
+
+class OllamaProviderInput(BaseModel):
+ base_url: str | None = None
+ model: str | None = None
+
+
+class OllaBridgeProviderInput(BaseModel):
+ base_url: str | None = None
+ model: str | None = None
+ api_key: str | None = None
+ connection_type: ProviderConnectionType | None = None
+
+
+class ProviderTestRequest(BaseModel):
+ provider: ProviderName
+ openai: OpenAIProviderInput | None = None
+ claude: ClaudeProviderInput | None = None
+ watsonx: WatsonxProviderInput | None = None
+ ollama: OllamaProviderInput | None = None
+ ollabridge: OllaBridgeProviderInput | None = None
+
+
+class ProviderTestResponse(ProviderStatusResponse):
+ details: str | None = None
+
+
+# ─── OllaBridge Health ────────────────────────────────────
+
+class OllaBridgeHealthResponse(BaseModel):
+ status: Literal["ok", "error"]
+ base_url: str
+ effective_api_base: str
+ models_available: bool = False
+ auth_mode: str = "unknown"
+ warning: str | None = None
\ No newline at end of file
diff --git a/gitpilot/modes.py b/gitpilot/modes.py
new file mode 100644
index 0000000000000000000000000000000000000000..7328dc17503ef267707448e5142046301e5e0574
--- /dev/null
+++ b/gitpilot/modes.py
@@ -0,0 +1,493 @@
+# gitpilot/modes.py
+"""Custom modes — declarative YAML personas with bound tool policies.
+
+A mode is a YAML record describing GitPilot's behaviour for a session.
+Schema is intentionally minimal so a developer can add a new mode (and
+attach new MCP servers to it) in a few lines.
+
+Files searched, in this order::
+
+ ~/.gitpilot/modes.yaml — user-global modes
+ .gitpilot/modes.yaml — project modes (project wins on slug clash)
+
+Example::
+
+ customModes:
+ - slug: db-pilot
+ name: "DB Pilot"
+ description: "Natural-language queries against staging Postgres"
+ roleDefinition: |
+ You are a senior DBA. Always EXPLAIN before mutating.
+ whenToUse: |
+ User asks about schema, queries, or migrations.
+ groups:
+ - read
+ - mcp:
+ allow: ["postgres.query", "postgres.explain"]
+ alwaysAllow: ["postgres.explain"]
+ - edit:
+ fileRegex: "^migrations/.*\\.sql$"
+ customInstructions: |
+ Refuse DROP / TRUNCATE without explicit confirmation.
+ mcpServers:
+ postgres:
+ command: uvx
+ args: [mcp-postgres-server]
+ env: { PG_URL: "${STAGING_PG_URL}" }
+ alwaysAllow: [postgres.explain]
+
+Nothing in :mod:`gitpilot.modes` mutates the legacy code path — callers
+opt in by instantiating :class:`ModeRegistry` and asking for the
+:class:`Mode` they want to activate.
+"""
+from __future__ import annotations
+
+import json
+import logging
+import os
+import re
+from dataclasses import dataclass, field
+from pathlib import Path
+from typing import Any, Dict, List, Optional, Tuple
+
+from .tool_groups import ToolPolicy
+
+logger = logging.getLogger(__name__)
+
+USER_MODES_FILE = Path.home() / ".gitpilot" / "modes.yaml"
+PROJECT_MODES_REL = Path(".gitpilot") / "modes.yaml"
+
+
+# ----------------------------------------------------------------------
+# Data
+# ----------------------------------------------------------------------
+
+@dataclass
+class ModeMCPServer:
+ """An MCP server declared inline by a mode."""
+
+ name: str
+ command: Optional[str] = None
+ args: List[str] = field(default_factory=list)
+ env: Dict[str, str] = field(default_factory=dict)
+ url: Optional[str] = None
+ http_url: Optional[str] = None
+ headers: Dict[str, str] = field(default_factory=dict)
+ always_allow: List[str] = field(default_factory=list)
+ enabled_tools: List[str] = field(default_factory=list)
+
+ def to_mcp_client_dict(self) -> Dict[str, Any]:
+ """Render as the dict shape :class:`MCPServerConfig` accepts."""
+ transport = "stdio"
+ if self.http_url:
+ transport = "http"
+ elif self.url:
+ transport = "sse"
+ return {
+ "name": self.name,
+ "transport": transport,
+ "command": self.command,
+ "args": self.args,
+ "env": self.env,
+ "url": self.url or self.http_url,
+ "headers": self.headers,
+ }
+
+
+@dataclass
+class Mode:
+ """A declarative GitPilot mode."""
+
+ slug: str
+ name: str
+ description: str = ""
+ role_definition: str = ""
+ when_to_use: str = ""
+ custom_instructions: str = ""
+ groups: List[Any] = field(default_factory=list)
+ mcp_servers: Dict[str, ModeMCPServer] = field(default_factory=dict)
+ source: str = "" # "user" | "project"
+
+ def tool_policy(self) -> ToolPolicy:
+ return ToolPolicy.from_mode_groups(self.groups)
+
+ def system_prompt_block(self) -> str:
+ parts: List[str] = []
+ if self.role_definition:
+ parts.append(f"## Role\n{self.role_definition.strip()}")
+ if self.when_to_use:
+ parts.append(f"## When to use this mode\n{self.when_to_use.strip()}")
+ if self.custom_instructions:
+ parts.append(f"## Mode instructions\n{self.custom_instructions.strip()}")
+ return "\n\n".join(parts)
+
+
+# ----------------------------------------------------------------------
+# Registry / loader
+# ----------------------------------------------------------------------
+
+class ModeRegistry:
+ """Discover modes from user + project YAML files."""
+
+ def __init__(self) -> None:
+ self._modes: Dict[str, Mode] = {}
+
+ # ----- public ---------------------------------------------------
+ def load(self, workspace_path: Optional[Path] = None) -> int:
+ count = 0
+ count += self._load_file(USER_MODES_FILE, source="user")
+ if workspace_path is not None:
+ count += self._load_file(workspace_path / PROJECT_MODES_REL, source="project")
+ return count
+
+ def register(self, mode: Mode) -> None:
+ self._modes[mode.slug] = mode
+
+ def get(self, slug: str) -> Optional[Mode]:
+ return self._modes.get(slug)
+
+ def all(self) -> List[Mode]:
+ return list(self._modes.values())
+
+ def listing(self) -> List[Dict[str, str]]:
+ return [
+ {
+ "slug": m.slug,
+ "name": m.name,
+ "description": m.description,
+ "source": m.source,
+ }
+ for m in self._modes.values()
+ ]
+
+ # ----- loading --------------------------------------------------
+ def _load_file(self, path: Path, *, source: str) -> int:
+ if not path.exists():
+ return 0
+ try:
+ data = _load_yaml_or_json(path.read_text(encoding="utf-8"))
+ except Exception as e:
+ logger.warning("could not parse modes file %s: %s", path, e)
+ return 0
+ modes = data.get("customModes") if isinstance(data, dict) else None
+ if not isinstance(modes, list):
+ return 0
+ count = 0
+ for entry in modes:
+ if not isinstance(entry, dict):
+ continue
+ slug = entry.get("slug")
+ if not slug:
+ continue
+ mode = _build_mode(entry, source=source)
+ self._modes[slug] = mode # project loaded second, wins
+ count += 1
+ return count
+
+
+def _build_mode(entry: Dict[str, Any], *, source: str) -> Mode:
+ mcp_servers: Dict[str, ModeMCPServer] = {}
+ raw_servers = entry.get("mcpServers") or {}
+ if isinstance(raw_servers, dict):
+ for name, cfg in raw_servers.items():
+ if not isinstance(cfg, dict):
+ continue
+ mcp_servers[name] = ModeMCPServer(
+ name=name,
+ command=cfg.get("command"),
+ args=list(cfg.get("args", [])),
+ env={k: _expand_env(v) for k, v in (cfg.get("env") or {}).items()},
+ url=cfg.get("url"),
+ http_url=cfg.get("httpURL") or cfg.get("http_url"),
+ headers={**(cfg.get("headers") or {})},
+ always_allow=list(cfg.get("alwaysAllow", [])),
+ enabled_tools=list(cfg.get("enabledTools", [])),
+ )
+ return Mode(
+ slug=str(entry["slug"]),
+ name=str(entry.get("name", entry["slug"])),
+ description=str(entry.get("description", "")),
+ role_definition=str(entry.get("roleDefinition", "")),
+ when_to_use=str(entry.get("whenToUse", "")),
+ custom_instructions=str(entry.get("customInstructions", "")),
+ groups=list(entry.get("groups", [])),
+ mcp_servers=mcp_servers,
+ source=source,
+ )
+
+
+# ----------------------------------------------------------------------
+# Session lifecycle helper
+# ----------------------------------------------------------------------
+
+@dataclass
+class ActiveModeContext:
+ """Bundle of artefacts derived from the active mode for a session.
+
+ Returned by :func:`activate_mode` so the caller can:
+
+ * inject ``system_prompt_block`` into the agent system prompt
+ * pass ``tool_policy`` to the executor
+ * spin up the MCP servers listed in ``mcp_server_configs``
+ (each dict is ready for :class:`gitpilot.mcp_client.MCPServerConfig.from_dict`)
+ """
+
+ mode: Mode
+ system_prompt_block: str
+ tool_policy: ToolPolicy
+ mcp_server_configs: List[Dict[str, Any]]
+ extra_mcp_toggles: List[Tuple[str, List[str], List[str]]] # (server, allow, alwaysAllow)
+
+
+def activate_mode(registry: ModeRegistry, slug: str) -> Optional[ActiveModeContext]:
+ """Resolve a mode by slug and return the bundle to apply.
+
+ Returns ``None`` for an unknown slug — callers should fall back to
+ the legacy unconfigured behaviour.
+ """
+ mode = registry.get(slug)
+ if mode is None:
+ return None
+ server_configs = [s.to_mcp_client_dict() for s in mode.mcp_servers.values()]
+ extras = [
+ (s.name, list(s.enabled_tools), list(s.always_allow))
+ for s in mode.mcp_servers.values()
+ ]
+ return ActiveModeContext(
+ mode=mode,
+ system_prompt_block=mode.system_prompt_block(),
+ tool_policy=mode.tool_policy(),
+ mcp_server_configs=server_configs,
+ extra_mcp_toggles=extras,
+ )
+
+
+# ----------------------------------------------------------------------
+# Minimal YAML loader (no PyYAML dependency)
+# ----------------------------------------------------------------------
+
+def _expand_env(value: Any) -> str:
+ if isinstance(value, str):
+ return os.path.expandvars(value)
+ return str(value)
+
+
+def _load_yaml_or_json(text: str) -> Dict[str, Any]:
+ """Parse YAML or JSON text. Prefers ``yaml`` when installed.
+
+ Falls back to ``json`` for ``.yaml`` files that happen to be JSON
+ and to a tiny in-tree YAML subset otherwise. The subset supports
+ the shape used by ``modes.yaml``: nested mappings, lists, and
+ folded/block scalars.
+ """
+ try:
+ import yaml
+
+ loaded = yaml.safe_load(text)
+ if isinstance(loaded, dict):
+ return loaded
+ return {}
+ except ImportError:
+ pass
+ # Fast path: JSON masquerading as YAML.
+ stripped = text.strip()
+ if stripped.startswith("{"):
+ try:
+ parsed_json = json.loads(stripped)
+ if isinstance(parsed_json, dict):
+ return parsed_json
+ except Exception:
+ pass
+ return _tiny_yaml(text)
+
+
+# --- in-tree minimal YAML parser ---------------------------------------
+# Supports: scalars, lists ("- foo"), nested maps via indentation, block
+# scalars ("|" and ">-"), and inline ``{a: 1, b: 2}`` / ``[a, b]`` flows.
+# Sufficient for ``modes.yaml`` examples shipped with GitPilot.
+
+_BLOCK_SCALAR_RE = re.compile(r"^(?P[^:#\s][^:]*):\s*(?P